input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
<reponame>mpopatia/rovewithme<filename>startup/views.py
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from django.utils.html import escape, strip_tags
from django.views.decorators.csrf import csrf_exempt
from django.core.urlresolvers import reverse
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from startup.models import Airport, Tag, Flight, Hotel, City, CityTag, CityHotel, AirportFlight, CityInPlan, DateInPlan, Plan, CityGraph
from datetime import datetime
import json
import requests
import random
flight_key1 = "####################################"
flight_request = "https://www.googleapis.com/qpxExpress/v1/trips/search?key="+flight_key1
def index(request):
return render(request,'startup/index.html')
def login_page(request):
return render(request,'startup/login.html')
def signup_page(request):
return render(request,'startup/registration.html')
def plan(request, key):
return render(request,'startup/plan.html', {'key': key})
def plan_page(request):
hash = str(random.getrandbits(128))[0:7]
return HttpResponseRedirect(reverse('plan', kwargs={'key': hash}))
@csrf_exempt
def login_view(request):
username = request.POST['username']
password = request.POST['password']
user = authenticate(username = username, password = password)
if user is not None:
if user.is_active:
login(request, user)
# Should redirect to previous page as well
return HttpResponseRedirect(reverse('index'))
else:
return HttpResponse("Disabled account")
else:
return HttpResponse("Invalid login")
# logout
def logout_view(request):
logout(request)
return HttpResponseRedirect(reverse('index'))
@csrf_exempt
def signup(request):
username = escape(strip_tags(request.POST['username']))
email = escape(strip_tags(request.POST['email']))
password = escape(strip_tags(request.POST['password']))
first=escape(strip_tags(request.POST['firstname']))
last = escape(strip_tags(request.POST['lastname']))
user = User.objects.create_user(username=username, email=email, password=password, first_name=first, last_name=last)
return HttpResponseRedirect(reverse('index'))
def flight_query(request):
return HttpResponse("test")
def query_flights(request_dict):
url = flight_request
payload = request_dict
headers = {"Content-Type": "application/json"}
response = requests.post(url, data=json.dumps(payload), headers=headers)
return response.json()
# @login_required(login_url='/login_page/')
def create_plan(request):
c = request.GET['source']
c = c.split(',')
city = c[0].strip()
state = c[1].strip()
source = City.objects.get(city=city, state = state)
plan_key = request.GET['plan_key']
start = datetime.strptime(request.GET['start_date'], '%m/%d/%Y')
end = datetime.strptime(request.GET['end_date'], '%m/%d/%Y')
cities = json.loads(request.GET['cities'])
elem = Plan.objects.filter(key=plan_key)
budget = int(float(request.GET['budget']))
weight = []
def helper1(request, plan_key, start, end, cities, elem):
p = Plan(owner=request.user, key=plan_key, source=source, budget=budget)
p.save()
d = DateInPlan(start=start, end=end, plan=p)
d.save()
for c in cities:
c = c.split(',')
city = c[0].strip()
state = c[1].strip()
obj = City.objects.get(city=city, state = state)
weight.append(obj)
cp = CityInPlan(city=obj, plan=p, owner=request.user)
cp.save()
return weight
def helper2(request, plan_key, start, end, cities, elem):
elem = elem[0]
dp = DateInPlan.objects.get(plan=elem)
if start.date() > dp.start:
dp.start = start
if end.date() < dp.end:
dp.end = end
dp.save()
for c in cities:
c = c.split(',')
city = c[0].strip()
state = c[1].strip()
obj = City.objects.get(city=city, state = state)
weight.append(obj)
cp = CityInPlan(city=obj, plan=elem, owner=request.user)
cp.save()
return weight
if len(elem) == 0:
weight = helper1(request, plan_key, start, end, cities, elem)
else:
weight = helper2(request, plan_key, start, end, cities, elem)
return HttpResponse("Y:")
def update_city_weight(source, destination):
one = CityGraph.objects.filter(one=source, two=destination)
if one.exists() == False:
two = CityGraph.objects.filter(one=destination, two=source)
if two.exists() == False:
c = CityGraph(one=source, two=destination, weight = 1)
c.save()
else:
two = two[0]
nw = two.weight+1
two.weight = nw
two.save()
else:
one = one[0]
nw = one.weight+1
one.weight = nw
one.save()
def get_city_weight(source, destination):
one = CityGraph.objects.filter(one=source, two=destination)
if one.exists() == False:
two = CityGraph.objects.filter(one=destination, two=source)
if two.exists() == False:
return 0
else:
two = two[0]
return two.weight
else:
one = one[0]
return one.weight
def get_intersection(request):
plan_key = request.GET['plan_key']
res = get_max_destinations(Plan.objects.get(key=plan_key))
ans = map(lambda x: x.city+", "+x.state, res)
final = []
for i in ans:
if i not in final:
final.append(i)
return HttpResponse(json.dumps(final), content_type = "application/json")
def get_max_destinations(plan):
all_cities = CityInPlan.objects.filter(plan = plan)
map1 = {}
map2 = {}
for c in all_cities:
c = c.city
if c in map1:
current_count = map1[c]
new_count = current_count+1
map1[c] = new_count
if new_count in map2:
map2[new_count] = map2[new_count]+[c]
else:
map2[new_count] = [c]
else:
new_count = 1
map1[c] = new_count
if new_count in map2:
map2[new_count] = map2[new_count]+[c]
else:
map2[new_count] = [c]
members = len(get_plan_members(plan))
if members in map2:
return map2[members]
else:
nc = members-1
while nc >= 0:
if nc in map2:
return map2[nc]
nc -= 1
return []
def get_members(request):
key = request.GET['plan_key']
if key == "":
ans = []
else:
plan = Plan.objects.filter(key=key)
if plan.exists() == False:
ans = []
else:
plan = plan[0]
ans = get_plan_members(plan)
return HttpResponse(json.dumps(ans), content_type = "application/json")
def get_plan_members(plan):
all_cities = CityInPlan.objects.filter(plan=plan)
ans = []
for c in all_cities:
if c.owner.first_name in ans:
1
else:
ans.append(c.owner.first_name)
return ans
def populate_cities(request):
with open("txt/new.txt") as f:
content = f.readlines()
for line in content:
try:
s = line.split(',')
s = map(lambda x: x.strip(), s)
airport_name = s[0]
city = s[1]
state = s[2]
airport_code = s[3]
city_object = City.objects.filter(city=city)
if len(city_object) == 0:
city_object = City(city=city, state=state)
city_object.save()
else:
city_object = city_object[0]
a = Airport(name=airport_name, code=airport_code, city=city_object)
a.save()
except:
1
with open("txt/one.txt") as f:
one = f.readlines()
with open("txt/two.txt") as f:
two = f.readlines()
with open("txt/fare.txt") as f:
fare = f.readlines()
for i in xrange(0, len(fare)):
source = one[i].strip().split(',')
source_city = source[0].strip()
source_state = source[1][0:4].strip()
destination = two[i].strip().split(',')
destination_city = destination[0].strip()
destination_state = destination[1][0:4].strip()
fare2 = int(float(fare[i].strip()))
if "/" in source_city:
source_city = source_city.split("/")[0].strip()
if "/" in destination_city:
destination_city = destination_city.split("/")[0].strip()
try:
c_source = City.objects.get(city=source_city, state=source_state)
c_destination = City.objects.get(city=destination_city, state=destination_state)
source_airports = Airport.objects.get_city_airports(c_source)
destination_airports = Airport.objects.get_city_airports(c_destination)
for s in source_airports:
for d in destination_airports:
af = AirportFlight(fromFlight=s, toFlight=d,flight=fare2)
af.save()
except:
print "ERROR"
return HttpResponse("Done")
def get_hotels(dest, start_date, end_date, per_night):
start_date = datetime.strptime('2015 '+str(start_date), '%Y %j').strftime('%m/%d/%Y')
end_date = datetime.strptime('2015 '+str(end_date), '%Y %j').strftime('%m/%d/%Y')
start = start_date.split('/')
end = end_date.split('/')
start = start[2]+start[0]+start[1]
end = end[2]+end[0]+end[1]
dest = dest.city
dest = dest.replace(" ", "%20")
url = "https://www.priceline.com/pws/v0/stay/retail/listing/"
url+= dest
url+="?rguid=3459hjdfdf&check-in="+start
url+="&check-out="+end
url+="¤cy=USD&responseoptions=DETAILED_HOTEL,NEARBY_ATTR&rooms=1"
url+="&sort=HDR&offset=0&page-size=5&max-price="+str(int(per_night))
response = requests.get(url)
res = response.json()
if 'hotels' in res:
return res['hotels']
else:
return []
def can_add_city(request):
source = request.GET['source']
destination = request.GET['destination']
budget = int(request.GET['budget'])
start = datetime.strptime(request.GET['start_date'], '%m/%d/%Y')
end = datetime.strptime(request.GET['end_date'], '%m/%d/%Y')
ans = {'status':validate_city(source, destination, budget, start, end)}
return HttpResponse(json.dumps(ans), content_type = "application/json")
# bool
def validate_city(s, d, budget, start_date, end_date):
mn = 10000000
airport_source = Airport.objects.filter(city=s)
airport_destination = Airport.objects.filter(city=d)
for i in airport_source:
for j in airport_destination:
air = AirportFlight.objects.filter(fromFlight=i, toFlight=j)
for a in air:
flycost = a.getAverageFlight()
if flycost < mn:
mn = flycost
break
flycost = mn
remain = budget - flycost
if (remain <= 0):
return False
return True
def get_recommendations(request):
c = request.GET['source']
c = c.split(',')
city = c[0].strip()
state = c[1].strip()
source = City.objects.get(city=city, state = state)
if 'cities' in request.GET:
current_cities = json.loads(request.GET['cities'])
else:
current_cities = []
budget = int(float(request.GET['budget']))
start = datetime.strptime(request.GET['start_date'], '%m/%d/%Y').timetuple().tm_yday
end = datetime.strptime(request.GET['end_date'], '%m/%d/%Y').timetuple().tm_yday
result = get_recommendations_internal(source, current_cities, budget, start, end)
res = map(lambda x: x.city+", "+x.state, result)
ans = []
for i in res:
if i not in ans:
ans.append(i)
return HttpResponse(json.dumps(ans), content_type = "application/json")
def validate_hotel(s, d, budget, start_date, end_date):
per_night = (float(budget-250) / float((end_date - start_date)))
h = get_hotels(d, start_date, end_date, per_night)
if h == []:
return False
return True
def get_recommendations_internal(source, current_cities, budget, start_date, end_date):
pc = AirportFlight.objects.getCitiesWithin(source, budget)
pc2 = filter (lambda d: validate_city(source,d, budget, start_date, end_date) , pc)
tuples = map( lambda d: (d, get_city_weight(source, d)), pc2)
tuples = [ (d,w) for (d,w) in tuples if (d.city+", "+d.state) not in current_cities ]
tuples.sort(key=lambda tup: tup[1], reverse=True)
if len(tuples) > 4:
tuples = tuples[:4]
tuples = map(lambda (d,w): d, tuples)
ans = filter (lambda d: validate_hotel(source,d, budget, start_date, end_date) , tuples)
return ans
def get_flights(start, end, source, dest):
start = datetime.strftime(start, '%Y-%m-%d')
end = datetime.strftime(end, '%Y-%m-%d')
data = {
"request": {
"slice": [
{
"origin": source,
"destination": dest,
"date": start
},
{
"origin": dest,
"destination": source,
"date": end
}
],
"passengers": {
"adultCount": 1,
"infantInLapCount": 0,
"infantInSeatCount": 0,
"childCount": 0,
"seniorCount": 0
},
"solutions": 10
}
}
response = query_flights(data)
return response
def compute_plan(request):
key = request.GET['plan_key']
plan = Plan.objects.filter(key=key)
if plan.exists() == False:
ans = {}
else:
plan = plan[0]
dp = DateInPlan.objects.get(plan=plan)
dest = get_max_destinations(plan)[0]
ap = Airport.objects.filter(city=plan.source)[0]
ap2 = Airport.objects.filter(city=dest)[0]
f = get_flights(dp.start, dp.end, ap.code, ap2.code)
st = dp.start.timetuple().tm_yday
end = dp.end.timetuple().tm_yday
h = get_hotels(dest, st, end, plan.budget/(end-st))
h2 = []
for i in h:
d = {}
d['name'] = i['name']
d['stars'] = i['starRating']
d['price'] = i['ratesSummary']['minPrice']
d['rating'] = i['overallGuestRating']
d['image'] = i['thumbnailUrl']
h2.append(d)
f2 = []
if 'trips' in f:
if 'tripOption' in f['trips']:
for j in f['trips']['tripOption']:
d = {}
d['price'] = j['saleTotal']
d['source'] = ap.city.city
d['destination'] = ap2.city.city
f2.append(d)
ans = {'hotels':h2, 'flights': f2, 'destination': ap2.city.city}
return | |
<gh_stars>0
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for memory attention layer."""
import collections
import itertools
from absl.testing import absltest
from absl.testing import parameterized
import jax
import jax.numpy as jnp
from language.mentionmemory.modules import memory_attention_layer
from language.mentionmemory.utils import test_utils
import ml_collections
import numpy as np
import scipy.special
_LARGE_NUMBER = 1e10
def _gen_compare_retrievals_with_numpy_list():
top_k_max_text_identifiers_list = [
# First, we experiment without max_text_identifiers
(None, None),
(3, None),
(5, None),
# Second, we experiment with very small max_text_identifiers
# which should significantly increase the num_disallowed
(None, 2),
(3, 2),
(5, 2),
# Finally, we experiment with medium max_text_identifiers
(None, 10),
(3, 10),
(5, 10),
]
same_passage_memory_policy_list = ['disallow', 'allow', 'only']
test_list = itertools.product(top_k_max_text_identifiers_list,
same_passage_memory_policy_list)
test_list = [
tuple([index] + list(x) + [y]) for index, (x, y) in enumerate(test_list)
]
return test_list
class MemoryAttentionLayerTest(parameterized.TestCase):
"""Memory attention layer test."""
dtype = jnp.float32
memory_key_dim = 4
input_dim = 8
memory_update_type = 'additive'
table_size = 128
rows = 4
splits = 2
k_top_device = 2
k_top_post_selection = 3
seq_len = 20
bsz = 2
n_devices = 4
n_mentions = 5
entity_vocab_size = 10
memory_update_config = ml_collections.FrozenConfigDict({})
@parameterized.parameters(
(False),
(True),
)
def test_mention_memory_layer(self, separate_memory_values):
"""Testing memory attention layer."""
test_utils.force_multi_devices(self.n_devices)
devices = jax.local_devices()
model = memory_attention_layer.MemoryAttentionLayer(
memory_key_dim=self.memory_key_dim,
input_dim=self.input_dim,
memory_update_type=self.memory_update_type,
memory_update_config=self.memory_update_config,
k_top_device=self.k_top_device,
k_top_post_selection=self.k_top_post_selection,
splits=self.splits,
dtype=self.dtype)
static_argnums = (9) if separate_memory_values else (9, 10)
pinit_with_output = jax.pmap(
model.init_with_output,
axis_name='batch',
static_broadcasted_argnums=static_argnums)
rng = jax.random.PRNGKey(0)
split_rng = jax.random.split(rng, self.n_devices)
encoded_input = jnp.ones(
shape=(self.bsz, self.seq_len, self.input_dim), dtype=self.dtype)
encoded_input = jax.device_put_replicated(encoded_input, devices)
mention_batch_positions = jnp.tile(
jnp.arange(self.bsz).reshape(-1, 1), (1, 3)).reshape(-1)
mention_batch_positions = jax.device_put_replicated(mention_batch_positions,
devices)
mention_start_positions = jnp.tile(jnp.asarray([0, 5, 10]), (self.bsz))
mention_start_positions = jax.device_put_replicated(mention_start_positions,
devices)
mention_end_positions = jnp.tile(jnp.asarray([2, 7, 12]), (self.bsz))
mention_end_positions = jax.device_put_replicated(mention_end_positions,
devices)
n_mentions = mention_start_positions.shape[-1]
mention_mask = jnp.tile(jnp.asarray([1, 1, 1]), (self.bsz))
mention_mask = jax.device_put_replicated(mention_mask, devices)
memory_table = np.ones(
(self.n_devices * self.table_size, self.memory_key_dim),
dtype=self.dtype)
# Make sure id 0 or 1 will be highest scoring
memory_table[0] = memory_table[0] * 2.0
memory_table[1] = memory_table[1] * -2.0
memory_table = jnp.asarray(memory_table, dtype=self.dtype)
memory_keys = memory_table.reshape(self.n_devices, self.rows,
self.table_size // self.rows,
self.memory_key_dim)
memory_keys_sharded = jax.device_put_sharded(list(memory_keys), devices)
if separate_memory_values:
memory_values = memory_table.reshape(self.n_devices, self.table_size,
self.memory_key_dim)
memory_values = jax.device_put_sharded(list(memory_values), devices)
else:
memory_values = None
memory_entity_ids = np.arange(self.n_devices * self.table_size).reshape(
self.n_devices, self.table_size)
memory_entity_ids = jax.device_put_sharded(list(memory_entity_ids), devices)
# Use entity id as identifier here
memory_identifiers = memory_entity_ids
(encoded_output, loss_helpers, _), _ = pinit_with_output(
split_rng,
encoded_input,
mention_batch_positions,
mention_start_positions,
mention_end_positions,
mention_mask,
memory_keys_sharded,
memory_identifiers,
memory_entity_ids,
True, # deterministic
memory_values,
text_identifiers=None,
)
attention_weights = loss_helpers['memory_attention_weights']
entity_ids = loss_helpers['top_entity_ids']
normed_input = encoded_input - 1.0
# Check input was changed
self.assertFalse(jnp.allclose(encoded_output, normed_input))
# Check input was not changed where it should not be
all_indices = set(
itertools.product(jnp.arange(self.bsz), jnp.arange(self.seq_len)))
# Note that mention positions is the same across all of the devices
start_indices = set(
zip(mention_batch_positions[0], mention_start_positions[0]))
non_start_indices = all_indices.difference(start_indices)
non_start_indices_1, non_start_indices_2 = zip(*non_start_indices)
non_start_indices_1 = jnp.asarray(non_start_indices_1)
non_start_indices_2 = jnp.asarray(non_start_indices_2)
non_start_outputs = encoded_output[:, non_start_indices_1,
non_start_indices_2]
non_start_inputs = normed_input[:, non_start_indices_1, non_start_indices_2]
self.assertTrue(jnp.allclose(non_start_outputs, non_start_inputs))
# Check shapes as expected
self.assertSequenceEqual(
encoded_output.shape,
(self.n_devices, self.bsz, self.seq_len, self.input_dim))
self.assertSequenceEqual(
attention_weights.shape,
(self.n_devices, n_mentions, self.k_top_post_selection))
self.assertSequenceEqual(
entity_ids.shape,
(self.n_devices, n_mentions, self.k_top_post_selection))
# Check id 0 or 1 retrieved
self.assertTrue(
jnp.all((entity_ids[Ellipsis, 0] == 0) + (entity_ids[Ellipsis, 0] == 1)))
# Set some text identifiers to 0 and others to 1 so that some are binding
text_identifiers = np.zeros((n_mentions), dtype=np.int32)
text_identifiers[:n_mentions // 2] = 1
text_identifiers = jax.device_put_replicated(text_identifiers, devices)
# Initialize and run one forward pass of model
(_, loss_helpers, logging_helpers), _ = pinit_with_output(
split_rng,
encoded_input,
mention_batch_positions,
mention_start_positions,
mention_end_positions,
mention_mask,
memory_keys_sharded,
memory_identifiers,
memory_entity_ids,
True, # deterministic
memory_values, # memory_values
text_identifiers=text_identifiers,
)
attention_weights_wid = loss_helpers['memory_attention_weights']
entity_ids_wid = loss_helpers['top_entity_ids']
n_disallowed = logging_helpers['n_disallowed'][0]
# Check no effect on ids
self.assertTrue(jnp.all(entity_ids == entity_ids_wid))
# Check id 0 or 1 have 0 scores
text_identifiers = jnp.expand_dims(text_identifiers, -1)
score_masked = (text_identifiers == entity_ids_wid) * attention_weights_wid
self.assertAlmostEqual(score_masked.sum(), 0.0)
# Check number disallowed as expected
self.assertEqual(n_disallowed, n_mentions // 2)
def test_memory_attention_backward(self):
test_utils.force_multi_devices(self.n_devices)
devices = jax.local_devices()
model = memory_attention_layer.MemoryAttentionLayer(
memory_key_dim=self.memory_key_dim,
input_dim=self.input_dim,
memory_update_type=self.memory_update_type,
memory_update_config=self.memory_update_config,
k_top_device=self.k_top_device,
k_top_post_selection=self.k_top_post_selection,
splits=self.splits,
dtype=self.dtype)
pinit = jax.pmap(
model.init, axis_name='batch', static_broadcasted_argnums=(9, 10))
rng = jax.random.PRNGKey(0)
split_rng = jax.random.split(rng, self.n_devices)
encoded_input = jnp.ones(
shape=(self.bsz, self.seq_len, self.input_dim), dtype=self.dtype)
encoded_input = jax.device_put_replicated(encoded_input, devices)
mention_batch_positions = jnp.tile(
jnp.asarray([[0], [1], [2]]), (1, self.bsz)).reshape(-1)
mention_batch_positions = jax.device_put_replicated(mention_batch_positions,
devices)
mention_start_positions = jnp.tile(jnp.asarray([0, 5, 10]), (self.bsz))
mention_start_positions = jax.device_put_replicated(mention_start_positions,
devices)
mention_end_positions = jnp.tile(jnp.asarray([2, 7, 12]), (self.bsz))
mention_end_positions = jax.device_put_replicated(mention_end_positions,
devices)
mention_mask = jnp.tile(jnp.asarray([1, 1, 1]), (self.bsz))
mention_mask = jax.device_put_replicated(mention_mask, devices)
memory_table = np.ones(
(self.n_devices * self.table_size, self.memory_key_dim),
dtype=self.dtype)
memory_table = jnp.asarray(memory_table, dtype=self.dtype)
memory_table = memory_table.reshape(self.n_devices, self.rows,
self.table_size // self.rows,
self.memory_key_dim)
memory_table_sharded = jax.device_put_sharded(list(memory_table), devices)
memory_entity_ids = np.arange(self.n_devices * self.table_size).reshape(
self.n_devices, self.table_size)
memory_entity_ids = jax.device_put_sharded(list(memory_entity_ids), devices)
# Use entity id as identifier here
memory_identifiers = memory_entity_ids
initial_parameters = pinit(
split_rng,
encoded_input,
mention_batch_positions,
mention_start_positions,
mention_end_positions,
mention_mask,
memory_table_sharded,
memory_identifiers,
memory_entity_ids,
True, # deterministic
None, # memory_values
text_identifiers=None,
)
def step_fn(
params,
encoded_input,
mention_batch_positions,
mention_start_positions,
mention_end_positions,
mention_mask,
memory_keys,
memory_identifiers,
memory_entity_ids,
):
def loss_fn(params):
encoded_output, _, _ = model.apply(
{'params': params},
rngs=None,
encoded_input=encoded_input,
mention_batch_positions=mention_batch_positions,
mention_start_positions=mention_start_positions,
mention_end_positions=mention_end_positions,
mention_mask=mention_mask,
memory_keys=memory_keys,
memory_identifiers=memory_identifiers,
memory_entity_ids=memory_entity_ids,
deterministic=True,
text_identifiers=None,
)
return encoded_output.sum()
loss, grad = jax.value_and_grad(loss_fn)(params)
return loss, grad
pstep = jax.pmap(step_fn, axis_name='batch')
_ = pstep(
initial_parameters['params'],
encoded_input=encoded_input,
mention_batch_positions=mention_batch_positions,
mention_start_positions=mention_start_positions,
mention_end_positions=mention_end_positions,
mention_mask=mention_mask,
memory_keys=memory_table_sharded,
memory_identifiers=memory_identifiers,
memory_entity_ids=memory_entity_ids,
)
@parameterized.parameters(_gen_compare_retrievals_with_numpy_list())
def test_compare_retrievals_with_numpy(self, seed, k_top_post_selection,
max_text_identifiers,
same_passage_memory_policy):
"""Test whether retrieval results are correct."""
test_utils.force_multi_devices(self.n_devices)
devices = jax.local_devices()
n_text_entities_per_memory = 3
model = memory_attention_layer.MemoryAttentionLayer(
memory_key_dim=self.memory_key_dim,
input_dim=self.input_dim,
memory_update_type=self.memory_update_type,
memory_update_config=self.memory_update_config,
k_top_device=self.k_top_device,
k_top_post_selection=k_top_post_selection,
splits=self.splits,
dtype=self.dtype)
pinit_with_output = jax.pmap(
model.init_with_output,
axis_name='batch',
static_broadcasted_argnums=(9, 10, 13))
rng = jax.random.PRNGKey(seed)
split_rng = jax.random.split(rng, self.n_devices)
encoded_input = jax.random.uniform(
rng,
shape=(self.n_devices, self.bsz, self.seq_len, self.input_dim),
dtype=self.dtype)
mention_batch_positions = jax.random.randint(
rng, minval=0, maxval=self.bsz, shape=(self.n_devices, self.n_mentions))
mention_start_positions = jax.random.randint(
rng,
minval=0,
maxval=self.seq_len,
shape=(self.n_devices, self.n_mentions))
mention_end_positions = mention_start_positions
mention_mask = jnp.ones(shape=(self.n_devices, self.n_mentions))
memory_table = jax.random.uniform(
rng,
shape=(self.n_devices, self.rows, self.table_size // self.rows,
self.memory_key_dim))
memory_entity_ids = jax.random.randint(
rng,
minval=0,
maxval=self.entity_vocab_size,
shape=(self.n_devices, self.table_size))
if max_text_identifiers is not None:
memory_identifiers = jax.random.randint(
rng,
minval=0,
maxval=max_text_identifiers,
shape=(self.n_devices, self.table_size))
text_identifiers = jax.random.randint(
rng,
minval=0,
maxval=max_text_identifiers,
shape=(self.n_devices, self.n_mentions))
else:
text_identifiers = None
if n_text_entities_per_memory is not None:
memory_text_entities = jax.random.randint(
rng,
minval=0,
maxval=self.entity_vocab_size,
shape=(self.n_devices, self.table_size, n_text_entities_per_memory))
else:
memory_text_entities = None
encoded_input_sharded = jax.device_put_sharded(list(encoded_input), devices)
mention_batch_positions_sharded = jax.device_put_sharded(
list(mention_batch_positions), devices)
mention_start_positions_sharded = jax.device_put_sharded(
list(mention_start_positions), devices)
mention_end_positions_sharded = jax.device_put_sharded(
list(mention_end_positions), devices)
mention_mask_sharded = jax.device_put_sharded(list(mention_mask), devices)
memory_table_sharded = jax.device_put_sharded(list(memory_table), devices)
memory_entity_ids_sharded = jax.device_put_sharded(
list(memory_entity_ids), devices)
if max_text_identifiers is not None:
memory_identifiers_sharded = jax.device_put_sharded(
list(memory_identifiers), devices)
text_identifiers_sharded = jax.device_put_sharded(
list(text_identifiers), devices)
else:
memory_identifiers_sharded = None
text_identifiers_sharded = None
if memory_text_entities is not None:
memory_text_entities_sharded = jax.device_put_sharded(
list(memory_text_entities), devices)
else:
memory_text_entities_sharded = None
memory_ids = jnp.arange(self.n_devices * self.table_size)
memory_ids = memory_ids.reshape(self.n_devices, self.table_size)
(_, loss_helpers, logging_helpers), params = pinit_with_output(
split_rng,
encoded_input_sharded,
mention_batch_positions_sharded,
mention_start_positions_sharded,
mention_end_positions_sharded,
mention_mask_sharded,
memory_table_sharded,
memory_identifiers_sharded,
memory_entity_ids_sharded,
True,
None, # memory_values
text_identifiers_sharded,
memory_text_entities_sharded,
same_passage_memory_policy,
)
params = params.unfreeze()['params']
mention_encodings = []
for device_id in range(self.n_devices):
mention_start_encodings = encoded_input[device_id][
mention_batch_positions[device_id],
mention_start_positions[device_id]]
mention_end_encodings = encoded_input[device_id][
mention_batch_positions[device_id], mention_end_positions[device_id]]
mention_encodings_on_device = jnp.concatenate(
[mention_start_encodings, mention_end_encodings], axis=-1)
mention_encodings_on_device = np.matmul(
mention_encodings_on_device,
params['query_projector']['kernel'][device_id])
mention_encodings_on_device += params['query_projector']['bias'][
device_id]
mention_encodings.append(mention_encodings_on_device)
# [n_devices, n_mentions, memory_key_dim]
mention_encodings_stacked = jnp.stack(mention_encodings)
mention_encodings_stacked = mention_encodings_stacked.reshape(
[self.n_devices * self.n_mentions, self.memory_key_dim])
# Object which represents a single retrieval result with additional info.
RetrievedMemory = collections.namedtuple('RetrievedMemory', [
'device', 'row', 'rowwise_index', 'devicewise_index', 'global_index',
'score', 'memory', 'entity_id', 'memory_hash',
'memory_passage_text_entities'
])
num_disallowed_per_device = [0 for _ in range(self.n_devices)]
# Manually simulate retrieval per every query
for query_id in range(self.n_devices * self.n_mentions):
query = mention_encodings_stacked[query_id]
top_memories_query = []
# Collect retirevals for a single query on each devices separately
for device_id in range(self.n_devices):
top_memories_per_device = []
for row_id in range(self.rows):
scores = np.einsum('mh,h->m', memory_table[device_id, row_id], query)
top_index = np.argmax(scores)
devicewise_index = row_id * (self.table_size // self.rows) + top_index
global_index = memory_ids[device_id, devicewise_index]
self.assertEqual(global_index,
devicewise_index + device_id * self.table_size)
if max_text_identifiers is not None:
memory_hash = | |
= \
row[FeatureCounts.splice_out_all_colname] / mean_profile_3p
ratio_5p_spliced_in_border = \
row[FeatureCounts.splice_in_borders_colname] / mean_profile_5p
ratio_3p_spliced_out_border = \
row[FeatureCounts.splice_out_borders_colname] / mean_profile_3p
ratio_5p_unspliced = \
row[FeatureCounts.unspliced_5pSS_colname] / mean_profile_5p
ratio_3p_unspliced = \
row[FeatureCounts.unspliced_3pSS_colname] / mean_profile_3p
# _____________________________________________________________________
# ---------------------------------------------------------------------
# PROFILE-INDEPENDENT FEATURES
# ---------------------------------------------------------------------
# Create new feature that adds-up the splice-IN/OUT and crossing-IN/OUT
# borders
# ReadsIN_borders = row['SpliceInBorders'] + row['Unspliced_5pSS']
# ReadsOUT_borders = row['SpliceOutBorders'] + row['Unspliced_3pSS']
# ReadsOUTvsIN_borders = \
# ((ReadsOUT_borders+np.finfo(float).eps) /
# (ReadsIN_borders+np.finfo(float).eps))
# all
ReadsIN_all = \
(row[FeatureCounts.splice_in_all_colname] +
row[FeatureCounts.unspliced_5pSS_colname])
ReadsOUT_all = \
(row[FeatureCounts.splice_out_all_colname] +
row[FeatureCounts.unspliced_3pSS_colname])
ReadsOUTvsIN_all = \
((ReadsOUT_all + np.finfo(float).eps) /
(ReadsIN_all + np.finfo(float).eps))
# Calculate further IN-OUT ratios
# SpliceOUTvsIN_all = \
# ((row['SpliceOutAll'] + np.finfo(float).eps) /
# (row['SpliceInAll'] + np.finfo(float).eps))
# SpliceOUTvsIN_borders = \
# ((row['SpliceOutBorders'] + np.finfo(float).eps) /
# (row['SpliceInBorders'] + np.finfo(float).eps))
SpliceINbordersVSall = \
((row[FeatureCounts.splice_in_borders_colname] +
np.finfo(float).eps) /
(row[FeatureCounts.splice_in_all_colname] +
np.finfo(float).eps))
# get the gene expression
GeneExpressionPerKBApproximated = \
row[FeatureCounts.GeneExpressionPerKBApproximated_colname]
# add up all reads in order to get the region expression
RegionExpression = \
((row[FeatureCounts.splice_in_all_colname] +
row[FeatureCounts.splice_out_all_colname] +
row[FeatureCounts.unspliced_5pSS_colname] +
row[FeatureCounts.unspliced_3pSS_colname] +
row[FeatureCounts.unspliced_feature_colname]) /
float(region_length)) * 1000
# Calculate the ratio of the expression of the region to
# the total gene expression
RegionExpressionRatio = \
RegionExpression / GeneExpressionPerKBApproximated
# Finally, store everything in a dictionary.
results = \
{'SpliceInAll_vs_profile_ratio': ratio_5p_spliced_in_all,
'SpliceOutAll_vs_profile_ratio': ratio_3p_spliced_out_all,
'SpliceInBorders_vs_profile_ratio': ratio_5p_spliced_in_border,
'SpliceOutBorders_vs_profile_ratio': ratio_3p_spliced_out_border,
'CrossingInBorders_vs_profile_ratio': ratio_5p_unspliced,
'CrossingOutBorders_vs_profile_ratio': ratio_3p_unspliced,
'entropy_efficiency': entropy_efficiency,
'region_length': region_length,
'ReadsOUTvsIN_all': ReadsOUTvsIN_all,
'SpliceINbordersVSall': SpliceINbordersVSall,
'RegionExpressionRatio': RegionExpressionRatio}
# add the quantiles
for idx, quantile in enumerate(inverse_cdf_terminal_profile_norm):
results[("absCDF_quant" + str(inverse_cdf_quantiles[idx]))] = \
inverse_cdf_terminal_profile_norm[idx]
# We do not use them because we use the crossings to filter
# the positive training set
# and in introns the crossing will not be zero in introns
# 'CrossingInBorders_vs_profile_ratio' : ratio_5p_unspliced,
# 'CrossingOutBorders_vs_profile_ratio' : ratio_3p_unspliced,
# Tell the module which features we have
self.features = list(results.keys())
# return the geatures
return(pd.Series(results))
def calculate_features_for_training_dataframes(
self,
output_files_dir,
verbose=False
):
"""
Method that calculates features and adds it to the training dataframes.
"""
# _____________________________________________________________________
# ---------------------------------------------------------------------
# Calculate features for TERMINAL EXONS
# ---------------------------------------------------------------------
# get the features
if verbose:
sys.stdout.write("Calculating features for " +
"terminal exon training data...\n")
# calculate the features
TE_feat = \
self.terminal_exon_training_data.merge(
self.terminal_exon_training_data.apply(
self.calculate_features,
axis=1,
results_dir_path=os.path.join(
output_files_dir,
"terminal_exon_training_data")),
left_index=True,
right_index=True)
# drop Na values (might occure if it was not
# possible to calculate one or more features)
nr_TE_datasets = TE_feat.shape[0]
TE_feat.dropna(axis=0,
how='any',
thresh=None,
subset=None,
inplace=True)
if verbose:
data_fraction_with_features = \
float(TE_feat.shape[0]) / float(nr_TE_datasets)
sys.stdout.write(
" :: terminal exon training data set fraction for which features could be calculated: {} {} ".format(
data_fraction_with_features,
os.linesep
)
)
# overwrite the old version that lacks features
self.terminal_exon_training_data = TE_feat.copy()
# # write out to DEBUG
# self.terminal_exon_training_data.to_csv(
# os.path.join(output_files_dir,
# 'DEBUG_terminal_training.tsv'),
# sep='\t',
# index=True
# )
# clean up...
del(TE_feat)
# _____________________________________________________________________
# ---------------------------------------------------------------------
# Calculate features for INTERMEDIATE EXONS
# ---------------------------------------------------------------------
# get the features
if verbose:
sys.stdout.write(
"Calculating features for intermediate exon training data..." +
os.linesep
)
# calculate the features
IE_feat = self.intermediate_exon_training_data.merge(
self.intermediate_exon_training_data.apply(
self.calculate_features,
axis=1,
results_dir_path=os.path.join(
output_files_dir,
"intermediate_exon_training_data")),
left_index=True,
right_index=True)
# drop Na values (might occure if it was not possible
# to calculate one or more features)
nr_IE_datasets = IE_feat.shape[0]
IE_feat.dropna(axis=0,
how='any',
thresh=None,
subset=None,
inplace=True)
if verbose:
data_fraction_with_features = \
float(IE_feat.shape[0]) / float(nr_IE_datasets)
sys.stdout.write(
" :: intermediate exon training data set fraction for which features could be calculated: {} {} ".format(
data_fraction_with_features,
os.linesep
)
)
# overwrite the old version that lacks features
self.intermediate_exon_training_data = IE_feat.copy()
# write out to DEBUG
# self.intermediate_exon_training_data.to_csv(
# os.path.join(output_files_dir,
# 'DEBUG_intermediate_training.tsv'),
# sep='\t',
# index=True
# )
# clean up...
del(IE_feat)
# _____________________________________________________________________
# ---------------------------------------------------------------------
# Calculate features for BACKGROUND REGIONS
# ---------------------------------------------------------------------
# get the features
if verbose:
sys.stdout.write(
"Calculating features for background regions..." + os.linesep
)
# calculate the features
BG_feat = self.background_training_data.merge(
self.background_training_data.apply(
self.calculate_features,
axis=1,
results_dir_path=os.path.join(
output_files_dir,
"background_training_data")),
left_index=True,
right_index=True
)
# drop Na values (might occure if it was not possible to calculate
# one or more features)
nr_BG_datasets = BG_feat.shape[0]
BG_feat.dropna(axis=0,
how='any',
thresh=None,
subset=None,
inplace=True)
if verbose:
data_fraction_with_features = \
float(BG_feat.shape[0]) / float(nr_BG_datasets)
sys.stdout.write(
" :: background region training data set fraction for which features could be calculated: {} {} ".format(
data_fraction_with_features,
os.linesep
)
)
# overwrite the old version that lacks features
self.background_training_data = BG_feat.copy()
# write out to DEBUG
# self.background_training_data.to_csv(
# os.path.join(output_files_dir,
# 'DEBUG_background_training.tsv'),
# sep='\t',
# index=True
# )
# clean up...
del(BG_feat)
# def plot_confusion_matrix(
# self,
# cm,
# file_path,
# normalize=False,
# title='Confusion matrix',
# cmap=plt.cm.Blues
# ):
# """
# Method that plots a confusion matrix.
# Normalization can be applied by setting "normalize=True".
# """
# plt.figure()
# plt.imshow(cm, interpolation='nearest', cmap=cmap)
# plt.title(title)
# plt.colorbar()
# tick_marks = np.arange(len(self.region_classes))
# plt.xticks(tick_marks, self.region_classes, rotation=45)
# plt.yticks(tick_marks, self.region_classes)
# if normalize:
# cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
# thresh = cm.max() / 2.0
# for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
# plt.text(j, i, str("%0.2f" % cm[i, j]),
# horizontalalignment="center",
# color="white" if cm[i, j] > thresh else "black")
# plt.tight_layout()
# plt.ylabel('True label')
# plt.xlabel('Predicted label')
# plt.savefig(file_path)
# plt.close('all')
def train_classifier(self, results_dir_path, nr_of_train_vs_test_runs = 25, verbose = False):
"""
Method to train the classifier. 'nr_of_train_vs_test_runs' runs will be done
and the results will be reported in the results_dir_path directory. However, only the
last classifier will be stored in the MachineLearningUnit object for subsequent use.
"""
if verbose: sys.stdout.write("Training classifier...\n")
# Check whether we have data to train the classifier on
if self.training_df is None:
sys.stderr.write("ERROR: no training data ('training_df') available!")
sys.exit(-1)
# _____________________________________________________________________________
# -----------------------------------------------------------------------------
# Training
# -----------------------------------------------------------------------------
n_neighbors = 3
weights = 'uniform'
##weights = 'distance'
# create results directory name
results_dir = os.path.join(results_dir_path, ('KNeighborsClassifier_%s_%sNodes' % (weights, str(n_neighbors))))
if not os.path.exists(results_dir):
os.makedirs(results_dir)
# create lists that we can use for printing multiple results
accuracy_scores_list = list()
precision_scores_list = list()
recall_scores_list = list()
f1score_scores_list = list()
# _____________________________________________________________________
# ---------------------------------------------------------------------
# create multiple runs so that we see how stable our results are
for i in range(nr_of_train_vs_test_runs):
# _________________________________________________________________
# -----------------------------------------------------------------
# Split data
# -----------------------------------------------------------------
X_train, X_test, y_train, y_test = \
train_test_split(self.training_df[self.features],
self.training_df[self.class_col],
test_size = 0.2,
random_state = random.randint(0,1000))
# _________________________________________________________________
# -----------------------------------------------------------------
# Model training
# -----------------------------------------------------------------
# get the classifier
self.classifier = neighbors.KNeighborsClassifier(n_neighbors,
weights=weights)
# fit the classifier
self.classifier.fit(X_train, y_train)
# _________________________________________________________________
# -----------------------------------------------------------------
# Model validation
# -----------------------------------------------------------------
# TODO:
# Suggestion from Andrea
# Use predict_proba() -> returns a list of the propabilities for each class
# -----------------------------------------------------------------
# perform predictions on the test set
y_pred = self.classifier.predict(X_test)
y_true = y_test.tolist()
# -----------------------------------------------------------------
# calculate the accuracy
# -----------------------------------------------------------------
accuracy = accuracy_score(y_true = y_true, y_pred = y_pred,
normalize = True)
accuracy_scores_list.append(accuracy)
# -----------------------------------------------------------------
# create confusion matrixes
# -----------------------------------------------------------------
cnf_matrix = confusion_matrix(y_true = y_true, y_pred = y_pred,
labels = self.region_classes)
# Plot non-normalized confusion matrix
# -----------------------------------------------------------------
# TODO:
# Suggestion from Andrea
# use .roc_score
# use .roc_auc_score
# -----------------------------------------------------------------
cm_file_name = ("normalized_confusion_matrix_RUN_%s.png" % str(i))
# self.plot_confusion_matrix(cnf_matrix,
# file_path=os.path.join(results_dir, cm_file_name),
# normalize=True,
# title='Confusion matrix')
# -----------------------------------------------------------------
# create precission, recall and F1-scores
# -----------------------------------------------------------------
precision_scores_list.append(metrics.precision_score(y_true = y_true, y_pred = y_pred, average='macro'))
recall_scores_list.append(metrics.recall_score(y_true = y_true, y_pred = y_pred, average='micro'))
f1score_scores_list.append(metrics.f1_score(y_true = y_true, y_pred = y_pred, average='weighted'))
# _____________________________________________________________________
# ---------------------------------------------------------------------
# print accuracy
plt.hist(accuracy_scores_list, bins = np.arange(0.0, 1.005, 0.005))
plt.savefig(os.path.join(results_dir, "accuracy.png"))
plt.close('all')
# print precission
plt.hist(precision_scores_list, bins = np.arange(0.0, 1.005, 0.005))
plt.savefig(os.path.join(results_dir, "precission.png"))
plt.close('all')
# print recall
plt.hist(recall_scores_list, bins = np.arange(0.0, 1.005, 0.005))
plt.savefig(os.path.join(results_dir, "recall.png"))
plt.close('all')
# print F1-score
plt.hist(f1score_scores_list, bins = np.arange(0.0, 1.005, 0.005))
plt.savefig(os.path.join(results_dir, "f1.png"))
plt.close('all')
def training(
self,
classifier,
features_for_training,
number_of_randomization=10
):
"""
perfomance function on specific subset of
features, number_of_randomization times
"""
# create lists that we can use for printing multiple results
training_scores_list = list()
# create multiple runs so that we see how stable our results are
for i in range(number_of_randomization):
X_train, | |
<gh_stars>1000+
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import redirect_stderr
from io import StringIO
from re import escape
from unittest.mock import Mock
import pytest
from torch.utils.data import BatchSampler, DataLoader, DistributedSampler, Sampler, SequentialSampler
from pytorch_lightning import Trainer
from pytorch_lightning.strategies import DDPSpawnStrategy
from pytorch_lightning.trainer.connectors.data_connector import _DataHookSelector, _DataLoaderSource, warning_cache
from pytorch_lightning.trainer.states import RunningStage, TrainerFn
from pytorch_lightning.trainer.supporters import CombinedLoader
from pytorch_lightning.utilities.data import _update_dataloader
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.warnings import PossibleUserWarning
from tests.helpers.boring_model import BoringDataModule, BoringModel, RandomDataset
from tests.helpers.runif import RunIf
from tests.helpers.utils import no_warning_call
@RunIf(skip_windows=True)
@pytest.mark.parametrize("mode", (1, 2))
def test_replace_distributed_sampler(tmpdir, mode):
class IndexedRandomDataset(RandomDataset):
def __getitem__(self, index):
return self.data[index]
class CustomDataLoader(DataLoader):
def __init__(self, num_features, dataset, *args, **kwargs):
# argument `num_features` unused on purpose
# it gets automatically captured by _replace_dataloader_init_method()
super().__init__(dataset, *args, **kwargs)
class CustomBatchSampler(BatchSampler):
pass
class TestModel(BoringModel):
def __init__(self, numbers_test_dataloaders, mode):
super().__init__()
self._numbers_test_dataloaders = numbers_test_dataloaders
self._mode = mode
def test_step(self, batch, batch_idx, dataloader_idx=0):
return super().test_step(batch, batch_idx)
def on_test_start(self) -> None:
dataloader = self.trainer.test_dataloaders[0]
assert isinstance(dataloader, CustomDataLoader)
batch_sampler = dataloader.batch_sampler
if self._mode == 1:
assert isinstance(batch_sampler, CustomBatchSampler)
# the batch_size is set on the batch sampler
assert dataloader.batch_size is None
elif self._mode == 2:
assert type(batch_sampler) is BatchSampler
assert dataloader.batch_size == self._mode
assert batch_sampler.batch_size == self._mode
assert batch_sampler.drop_last
# the sampler has been replaced
assert isinstance(batch_sampler.sampler, DistributedSampler)
def create_dataset(self):
dataset = IndexedRandomDataset(32, 64)
if self._mode == 1:
# with a custom batch sampler
batch_sampler = CustomBatchSampler(SequentialSampler(dataset), batch_size=1, drop_last=True)
return CustomDataLoader(32, dataset, batch_sampler=batch_sampler)
elif self._mode == 2:
# with no batch sampler provided
return CustomDataLoader(32, dataset, batch_size=2, drop_last=True)
def test_dataloader(self):
return [self.create_dataset()] * self._numbers_test_dataloaders
model = TestModel(2, mode)
model.test_epoch_end = None
trainer = Trainer(
default_root_dir=tmpdir,
limit_test_batches=2,
accelerator="cpu",
devices=1,
strategy="ddp_find_unused_parameters_false",
)
trainer.test(model)
class TestSpawnBoringModel(BoringModel):
def __init__(self, num_workers):
super().__init__()
self.num_workers = num_workers
def train_dataloader(self):
return DataLoader(RandomDataset(32, 64), num_workers=self.num_workers)
def on_fit_start(self):
self._resout = StringIO()
self.ctx = redirect_stderr(self._resout)
self.ctx.__enter__()
def on_train_end(self):
def _get_warning_msg():
dl = self.trainer.train_dataloader.loaders
if hasattr(dl, "persistent_workers"):
if self.num_workers == 0:
warn_str = "Consider setting num_workers>0 and persistent_workers=True"
else:
warn_str = "Consider setting persistent_workers=True"
else:
warn_str = "Consider setting strategy=ddp"
return warn_str
if self.trainer.is_global_zero:
self.ctx.__exit__(None, None, None)
msg = self._resout.getvalue()
warn_str = _get_warning_msg()
assert warn_str in msg
@RunIf(skip_windows=True)
@pytest.mark.parametrize("num_workers", [0, 1])
def test_dataloader_warnings(tmpdir, num_workers):
trainer = Trainer(default_root_dir=tmpdir, accelerator="cpu", devices=2, strategy="ddp_spawn", fast_dev_run=4)
assert isinstance(trainer.strategy, DDPSpawnStrategy)
trainer.fit(TestSpawnBoringModel(num_workers))
def test_update_dataloader_raises():
with pytest.raises(ValueError, match="needs to subclass `torch.utils.data.DataLoader"):
_update_dataloader(object(), object(), mode="fit")
def test_dataloaders_with_missing_keyword_arguments():
ds = RandomDataset(10, 20)
class TestDataLoader(DataLoader):
def __init__(self, dataset):
super().__init__(dataset)
loader = TestDataLoader(ds)
sampler = SequentialSampler(ds)
match = escape("missing arguments are ['batch_sampler', 'sampler', 'shuffle']")
with pytest.raises(MisconfigurationException, match=match):
_update_dataloader(loader, sampler, mode="fit")
match = escape("missing arguments are ['batch_sampler', 'batch_size', 'drop_last', 'sampler', 'shuffle']")
with pytest.raises(MisconfigurationException, match=match):
_update_dataloader(loader, sampler, mode="predict")
class TestDataLoader(DataLoader):
def __init__(self, dataset, *args, **kwargs):
super().__init__(dataset)
loader = TestDataLoader(ds)
sampler = SequentialSampler(ds)
_update_dataloader(loader, sampler, mode="fit")
_update_dataloader(loader, sampler, mode="predict")
class TestDataLoader(DataLoader):
def __init__(self, *foo, **bar):
super().__init__(*foo, **bar)
loader = TestDataLoader(ds)
sampler = SequentialSampler(ds)
_update_dataloader(loader, sampler, mode="fit")
_update_dataloader(loader, sampler, mode="predict")
class TestDataLoader(DataLoader):
def __init__(self, num_feat, dataset, *args, shuffle=False):
self.num_feat = num_feat
super().__init__(dataset)
loader = TestDataLoader(1, ds)
sampler = SequentialSampler(ds)
match = escape("missing arguments are ['batch_sampler', 'sampler']")
with pytest.raises(MisconfigurationException, match=match):
_update_dataloader(loader, sampler, mode="fit")
match = escape("missing arguments are ['batch_sampler', 'batch_size', 'drop_last', 'sampler']")
with pytest.raises(MisconfigurationException, match=match):
_update_dataloader(loader, sampler, mode="predict")
class TestDataLoader(DataLoader):
def __init__(self, num_feat, dataset, **kwargs):
self.feat_num = num_feat
super().__init__(dataset)
loader = TestDataLoader(1, ds)
sampler = SequentialSampler(ds)
match = escape("missing attributes are ['num_feat']")
with pytest.raises(MisconfigurationException, match=match):
_update_dataloader(loader, sampler, mode="fit")
match = escape("missing attributes are ['num_feat']")
with pytest.raises(MisconfigurationException, match=match):
_update_dataloader(loader, sampler, mode="predict")
def test_update_dataloader_with_multiprocessing_context():
"""This test verifies that replace_sampler conserves multiprocessing context."""
train = RandomDataset(32, 64)
context = "spawn"
train = DataLoader(train, batch_size=32, num_workers=2, multiprocessing_context=context, shuffle=True)
new_data_loader = _update_dataloader(train, SequentialSampler(train.dataset))
assert new_data_loader.multiprocessing_context == train.multiprocessing_context
def test_dataloader_reinit_for_subclass():
class CustomDataLoader(DataLoader):
def __init__(
self,
dataset,
batch_size=1,
shuffle=False,
sampler=None,
batch_sampler=None,
num_workers=0,
collate_fn=None,
pin_memory=False,
drop_last=False,
timeout=0,
worker_init_fn=None,
dummy_kwarg=None,
):
super().__init__(
dataset,
batch_size,
shuffle,
sampler,
batch_sampler,
num_workers,
collate_fn,
pin_memory,
drop_last,
timeout,
worker_init_fn,
)
self.dummy_kwarg = dummy_kwarg
self.something_unrelated = 1
trainer = Trainer(accelerator="cpu", devices=2, strategy="ddp_spawn")
class CustomDummyObj:
sampler = None
result = trainer._data_connector._prepare_dataloader(CustomDummyObj(), shuffle=True)
assert isinstance(result, CustomDummyObj), "Wrongly reinstantiated data loader"
dataset = list(range(10))
result = trainer._data_connector._prepare_dataloader(CustomDataLoader(dataset), shuffle=True)
assert isinstance(result, DataLoader)
assert isinstance(result, CustomDataLoader)
assert result.dummy_kwarg is None
# Shuffled DataLoader should also work
result = trainer._data_connector._prepare_dataloader(CustomDataLoader(dataset, shuffle=True), shuffle=True)
assert isinstance(result, DataLoader)
assert isinstance(result, CustomDataLoader)
assert result.dummy_kwarg is None
class CustomSampler(Sampler):
pass
# Should raise an error if existing sampler is being replaced
dataloader = CustomDataLoader(dataset, sampler=CustomSampler(dataset))
with pytest.raises(MisconfigurationException, match="will be replaced by `DistributedSampler`"):
trainer._data_connector._prepare_dataloader(dataloader, shuffle=True)
class LoaderTestModel(BoringModel):
def training_step(self, batch, batch_idx):
assert len(self.trainer.train_dataloader.loaders) == 10
return super().training_step(batch, batch_idx)
def validation_step(self, batch, batch_idx):
assert len(self.trainer.val_dataloaders[0]) == 10
return super().validation_step(batch, batch_idx)
def test_step(self, batch, batch_idx):
assert len(self.trainer.test_dataloaders[0]) == 10
return super().test_step(batch, batch_idx)
def predict_step(self, batch, batch_idx, dataloader_idx=0):
assert len(self.trainer.predict_dataloaders[0]) == 10
return super().predict_step(batch, batch_idx, dataloader_idx=dataloader_idx)
def test_loader_detaching():
"""Checks that the loader has been reset after the entrypoint."""
loader = DataLoader(RandomDataset(32, 10), batch_size=1)
model = LoaderTestModel()
assert len(model.train_dataloader()) == 64
assert len(model.val_dataloader()) == 64
assert len(model.predict_dataloader()) == 64
assert len(model.test_dataloader()) == 64
trainer = Trainer(fast_dev_run=1)
trainer.fit(model, loader, loader)
assert len(model.train_dataloader()) == 64
assert len(model.val_dataloader()) == 64
assert len(model.predict_dataloader()) == 64
assert len(model.test_dataloader()) == 64
trainer.validate(model, loader)
assert len(model.train_dataloader()) == 64
assert len(model.val_dataloader()) == 64
assert len(model.predict_dataloader()) == 64
assert len(model.test_dataloader()) == 64
trainer.predict(model, loader)
assert len(model.train_dataloader()) == 64
assert len(model.val_dataloader()) == 64
assert len(model.predict_dataloader()) == 64
assert len(model.test_dataloader()) == 64
trainer.test(model, loader)
assert len(model.train_dataloader()) == 64
assert len(model.val_dataloader()) == 64
assert len(model.predict_dataloader()) == 64
assert len(model.test_dataloader()) == 64
def test_pre_made_batches():
"""Check that loader works with pre-made batches."""
loader = DataLoader(RandomDataset(32, 10), batch_size=None)
trainer = Trainer(fast_dev_run=1)
trainer.predict(LoaderTestModel(), loader)
def test_error_raised_with_float_limited_eval_batches():
"""Test that an error is raised if there are not enough batches when passed with float value of
limit_eval_batches."""
model = BoringModel()
dl_size = len(model.val_dataloader())
limit_val_batches = 1 / (dl_size + 2)
trainer = Trainer(limit_val_batches=limit_val_batches)
trainer._data_connector.attach_data(model)
with pytest.raises(
MisconfigurationException,
match=rf"{limit_val_batches} \* {dl_size} < 1. Please increase the `limit_val_batches`",
):
trainer._data_connector._reset_eval_dataloader(RunningStage.VALIDATING, model)
@pytest.mark.parametrize(
"val_dl,warns",
[
(DataLoader(dataset=RandomDataset(32, 64), shuffle=True), True),
(DataLoader(dataset=RandomDataset(32, 64), sampler=list(range(64))), False),
(CombinedLoader(DataLoader(dataset=RandomDataset(32, 64), shuffle=True)), True),
(
CombinedLoader(
[DataLoader(dataset=RandomDataset(32, 64)), DataLoader(dataset=RandomDataset(32, 64), shuffle=True)]
),
True,
),
(
CombinedLoader(
{
"dl1": DataLoader(dataset=RandomDataset(32, 64)),
"dl2": DataLoader(dataset=RandomDataset(32, 64), shuffle=True),
}
),
True,
),
],
)
def test_non_sequential_sampler_warning_is_raised_for_eval_dataloader(val_dl, warns):
trainer = Trainer()
model = BoringModel()
trainer._data_connector.attach_data(model, val_dataloaders=val_dl)
context = pytest.warns if warns else no_warning_call
with context(PossibleUserWarning, match="recommended .* turn shuffling off for val/test/predict"):
trainer._data_connector._reset_eval_dataloader(RunningStage.VALIDATING, model)
class NoDataLoaderModel(BoringModel):
def __init__(self):
super().__init__()
self.train_dataloader = None
self.val_dataloader = None
self.test_dataloader = None
self.predict_dataloader = None
@pytest.mark.parametrize(
"instance,available",
[
(None, True),
(BoringModel().train_dataloader(), True),
(BoringModel(), True),
(NoDataLoaderModel(), False),
(BoringDataModule(), True),
],
)
def test_dataloader_source_available(instance, available):
"""Test the availability check for _DataLoaderSource."""
source = _DataLoaderSource(instance=instance, name="train_dataloader")
assert source.is_defined() is available
def test_dataloader_source_direct_access():
"""Test requesting a dataloader when the source is already a dataloader."""
dataloader = BoringModel().train_dataloader()
source = _DataLoaderSource(instance=dataloader, name="any")
assert not source.is_module()
assert source.is_defined()
assert source.dataloader() is dataloader
def test_dataloader_source_request_from_module():
"""Test requesting a dataloader from a module works."""
module = BoringModel()
module.trainer = Trainer()
module.foo = Mock(return_value=module.train_dataloader())
source = _DataLoaderSource(module, "foo")
assert source.is_module()
module.foo.assert_not_called()
assert isinstance(source.dataloader(), DataLoader)
module.foo.assert_called_once()
@pytest.mark.parametrize(
"hook_name", ("on_before_batch_transfer", "transfer_batch_to_device", "on_after_batch_transfer")
)
class TestDataHookSelector:
def overridden_func(self, batch, *args, **kwargs):
return batch
def reset_instances(self):
warning_cache.clear()
return BoringDataModule(), BoringModel(), Trainer()
def test_no_datamodule_no_overridden(self, hook_name):
model, _, trainer = self.reset_instances()
trainer._data_connector.attach_datamodule(model, datamodule=None)
with no_warning_call(match=f"have overridden `{hook_name}` in"):
hook = trainer._data_connector._datahook_selector.get_hook(hook_name)
assert hook == getattr(model, hook_name)
def test_with_datamodule_no_overridden(self, hook_name):
model, dm, trainer = self.reset_instances()
trainer._data_connector.attach_datamodule(model, datamodule=dm)
with no_warning_call(match=f"have overridden `{hook_name}` in"):
hook = trainer._data_connector._datahook_selector.get_hook(hook_name)
assert hook == getattr(model, hook_name)
def test_override_model_hook(self, hook_name):
model, dm, trainer = self.reset_instances()
trainer._data_connector.attach_datamodule(model, datamodule=dm)
with no_warning_call(match=f"have overridden `{hook_name}` in"):
hook = trainer._data_connector._datahook_selector.get_hook(hook_name)
assert hook == getattr(model, hook_name)
def test_override_datamodule_hook(self, hook_name):
model, dm, trainer = self.reset_instances()
trainer._data_connector.attach_datamodule(model, datamodule=dm)
setattr(dm, hook_name, self.overridden_func)
with no_warning_call(match=f"have overridden `{hook_name}` in"):
hook = trainer._data_connector._datahook_selector.get_hook(hook_name)
assert hook == getattr(dm, hook_name)
def test_override_both_model_and_datamodule(self, hook_name):
model, dm, trainer = self.reset_instances()
trainer._data_connector.attach_datamodule(model, datamodule=dm)
setattr(model, hook_name, self.overridden_func)
setattr(dm, hook_name, self.overridden_func)
| |
<reponame>Soton-Song/valentine
import operator
import time
from collections import defaultdict
from enum import Enum
import numpy as np
from datasketch import MinHash, MinHashLSH
from nltk.corpus import stopwords
import algorithms.sem_prop.ontomatch.matcher_lib_utils as utils
from algorithms.sem_prop.dataanalysis import dataanalysis as da
from algorithms.sem_prop.dataanalysis import nlp_utils as nlp
from algorithms.sem_prop.knowledgerepr.networkbuilder import LSHRandomProjectionsIndex
from algorithms.sem_prop.ontomatch import glove_api
from algorithms.sem_prop.ontomatch import ss_utils as SS
from algorithms.sem_prop.ontomatch.matching import Matching
from algorithms.sem_prop.ontomatch.simple_trie import SimpleTrie
class MatchingType(Enum):
L1_CLASSNAME_ATTRVALUE = 0
L2_CLASSVALUE_ATTRVALUE = 1
L3_CLASSCTX_RELATIONCTX = 2
L4_CLASSNAME_RELATIONNAME_SYN = 3
L42_CLASSNAME_RELATIONNAME_SEM = 4
L5_CLASSNAME_ATTRNAME_SYN = 5
L52_CLASSNAME_ATTRNAME_SEM = 6
L6_CLASSNAME_RELATION_SEMSIG = 7
L7_CLASSNAME_ATTRNAME_FUZZY = 8
# double check for better recall
def double_check_sem_signal_attr_sch_sch(attribute1, attribute2,
penalize_unknown_word=True,
add_exact_matches=True):
def getSVS(attribute):
svs = []
field_name = attribute
field_name = nlp.camelcase_to_snakecase(field_name)
field_name = field_name.lower()
field_name = field_name.replace('_', ' ')
for token in field_name.split():
if token not in stopwords.words('english'):
sv = glove_api.get_embedding_for_word(token)
if sv is not None:
svs.append(sv)
return svs, field_name
svs1, field_name1 = getSVS(attribute1)
svs2, field_name2 = getSVS(attribute2)
if not add_exact_matches:
ban_index1, ban_index2 = get_ban_indexes(field_name1, field_name2)
svs_rel = remove_banned_vectors(ban_index1, svs1)
svs_cla = remove_banned_vectors(ban_index2, svs2)
else:
svs_rel = svs1
svs_cla = svs2
semantic_sim, neg_signal = SS.compute_semantic_similarity(svs_rel, svs_cla,
penalize_unknown_word=penalize_unknown_word,
add_exact_matches=add_exact_matches)
return semantic_sim, neg_signal
def remove_intuitive_description(attribute1, attribute2):
intuitive_description = ['_id', '_name', '_type', '_class', '_parameters', '_units', '_desc', 'res_']
tokens1 = attribute1.lower().replace('_', ' ').split()
tokens2 = attribute2.lower().replace('_', ' ').split()
if attribute2.lower() in attribute1.lower() and len(tokens1) > 1 and len(tokens2) == 1:
for el in intuitive_description:
if not (attribute2.lower() in el.replace('_', '').lower()):
attribute1 = attribute1.replace(el, '')
return attribute1
def summarize_matchings_to_ancestor(om, matchings, threshold_to_summarize=2, summarize_or_remove=True, summary_ratio=0.8):
def get_sem_similar_matchings_from(matchings):
matchings_to_keep = []
for el in matchings:
# double check using the semantic
attribute2 = el[1][1]
if el[0][2] == '_':
attribute1 = el[0][1]
else:
attribute1 = el[0][2]
attribute1 = remove_intuitive_description(attribute1, attribute2)
semantic_sim, signal = double_check_sem_signal_attr_sch_sch(attribute1, attribute2, False)
if signal and semantic_sim >= 0.85:
matchings_to_keep.append(el)
return matchings_to_keep
def summarize(matchings, handler):
sequences = list()
seq_corresponding_matching = defaultdict(list)
for el in matchings:
try:
sch, cla = el
except ValueError:
sch, cla, mtype = el
class_name = cla[1]
onto_name = cla[0]
if handler is None:
handler = om.kr_handlers[onto_name]
root_to_class_name = handler.ancestors_of_class(class_name)
root_to_class_name = handler.name_of_sequence(root_to_class_name)
seq_corresponding_matching[str(root_to_class_name)].append(el)
sequences.append(root_to_class_name)
trie = SimpleTrie()
trie.add_sequences(sequences, seq_corresponding_matching)
matching_to_be_summarized, cutter = trie.summarize(len(sequences))
summ_matchings = []
if (len(matching_to_be_summarized) / len(matchings)) > summary_ratio: # good summarization
# get level of class
root_to_class_name = handler.ancestors_of_class(cutter)
root_to_class_name = handler.name_of_sequence(root_to_class_name)
if len(root_to_class_name) > 2:
try:
sch, cla, mtype = list(matching_to_be_summarized)[0]
except ValueError:
sch, cla = list(matching_to_be_summarized)[0]
new_match = (sch, (cla[0], cutter)) # the match that summarizes the previous
if summarize_or_remove:
summ_matchings.append(new_match)
semantically_similar_matchings = get_sem_similar_matchings_from(matchings)
for el in semantically_similar_matchings:
summ_matchings.append(el)
return summ_matchings
else:
summ_matchings = [m for m in matchings if m not in set(matching_to_be_summarized)]
summ_matchings.append(new_match)
return summ_matchings
if summarize_or_remove:
try:
sch, cla = list(matching_to_be_summarized)[0]
except ValueError:
sch, cla, mtype = list(matching_to_be_summarized)[0]
new_match = (sch, (cla[0], cutter)) # don't add -> breaking precision...
# return [] # could not summarize -> remove
semantically_similar_matchings = get_sem_similar_matchings_from(matchings)
return semantically_similar_matchings # could not summarize -> remove
else:
summ_matchings = [m for m in matchings if m not in set(matching_to_be_summarized)]
try:
sch, cla, mtype = list(matching_to_be_summarized)[0]
except ValueError:
sch, cla = list(matching_to_be_summarized)[0]
new_match = (sch, (cla[0], cutter)) # the match that summarizes the previous
summ_matchings.append(new_match)
semantically_similar_matchings = get_sem_similar_matchings_from(matchings)
for el in semantically_similar_matchings:
summ_matchings.append(el)
return summ_matchings # could not summarize, return original
def compute_fanout(matchings):
fanout = defaultdict(lambda: defaultdict(list))
for m in matchings:
try:
sch, cla = m
except ValueError:
sch, cla, mtype = m
onto_name = cla[0]
fanout[sch][onto_name].append(m)
ordered = sorted(fanout.items(), key=lambda x: len(x[1].values()), reverse=True)
ordered_dict = dict()
for key, value in ordered:
ordered_dict[key] = value
return ordered_dict
handler = None # the handler for the ontology
summarized_matchings = []
fanout = compute_fanout(matchings)
for k, v in fanout.items():
for onto_name, el in v.items():
if len(el) > threshold_to_summarize:
s_matching = summarize(el, handler) # [sch - class] -> returns only 1 !
for m in s_matching:
summarized_matchings.append(m)
else: # just propagate matchings
for matching in el:
summarized_matchings.append(matching)
return summarized_matchings
def combine_matchings(all_matchings):
def process_matching(building_matching_objects, m, matching_type, attr=False):
sch, krn = m
db_name, source_name, field_name = sch
kr_name, class_name = krn
mobj = building_matching_objects.get((db_name, source_name), None)
if mobj is None:
mobj = Matching(db_name, source_name)
if attr:
mobj.add_attribute_correspondence(field_name, kr_name, class_name, matching_type)
else:
mobj.add_relation_correspondence(kr_name, class_name, matching_type)
building_matching_objects[(db_name, source_name)] = mobj
l1_matchings = all_matchings[MatchingType.L1_CLASSNAME_ATTRVALUE]
l2_matchings = all_matchings[MatchingType.L2_CLASSVALUE_ATTRVALUE]
l4_matchings = all_matchings[MatchingType.L4_CLASSNAME_RELATIONNAME_SYN]
l42_matchings = all_matchings[MatchingType.L42_CLASSNAME_RELATIONNAME_SEM]
l5_matchings = all_matchings[MatchingType.L5_CLASSNAME_ATTRNAME_SYN]
l52_matchings = all_matchings[MatchingType.L52_CLASSNAME_ATTRNAME_SEM]
l6_matchings = all_matchings[MatchingType.L6_CLASSNAME_RELATION_SEMSIG]
l7_matchings = all_matchings[MatchingType.L7_CLASSNAME_ATTRNAME_FUZZY]
building_matching_objects = defaultdict(None) # (db_name, source_name) -> stuff
if l1_matchings is not None:
for m in l1_matchings:
process_matching(building_matching_objects, m, MatchingType.L1_CLASSNAME_ATTRVALUE, True)
if l2_matchings is not None:
for m in l2_matchings:
process_matching(building_matching_objects, m, MatchingType.L2_CLASSVALUE_ATTRVALUE, True)
if l4_matchings is not None:
for m in l4_matchings:
process_matching(building_matching_objects, m, MatchingType.L4_CLASSNAME_RELATIONNAME_SYN)
if l42_matchings is not None:
for m in l42_matchings:
process_matching(building_matching_objects, m, MatchingType.L42_CLASSNAME_RELATIONNAME_SEM)
if l5_matchings is not None:
for m in l5_matchings:
process_matching(building_matching_objects, m, MatchingType.L5_CLASSNAME_ATTRNAME_SYN, True)
if l52_matchings is not None:
for m in l52_matchings:
process_matching(building_matching_objects, m, MatchingType.L52_CLASSNAME_ATTRNAME_SEM, True)
if l6_matchings is not None:
for m in l6_matchings:
process_matching(building_matching_objects, m, MatchingType.L6_CLASSNAME_RELATION_SEMSIG)
if l7_matchings is not None:
for m in l7_matchings:
process_matching(building_matching_objects, m, MatchingType.L7_CLASSNAME_ATTRNAME_FUZZY, True)
return building_matching_objects
def combine_matchings2(all_matchings):
# TODO: divide running score, based on whether content was available or not (is it really necessary?)
# L1 creates its own matchings
l1_matchings = all_matchings[MatchingType.L1_CLASSNAME_ATTRVALUE]
# L2, L5, L52 and L6 create another set of matchings
l2_matchings = all_matchings[MatchingType.L2_CLASSVALUE_ATTRVALUE]
l5_matchings = all_matchings[MatchingType.L5_CLASSNAME_ATTRNAME_SYN]
l52_matchings = all_matchings[MatchingType.L52_CLASSNAME_ATTRNAME_SEM]
l6_matchings = all_matchings[MatchingType.L6_CLASSNAME_RELATION_SEMSIG]
l7_matchings = all_matchings[MatchingType.L7_CLASSNAME_ATTRNAME_FUZZY]
l_combined = dict()
for schema, kr in l1_matchings:
db_name, src_name, attr_name = schema
kr_name, cla_name = kr
l_combined[(db_name, src_name, attr_name, kr_name, cla_name)] = (
(schema, kr), [MatchingType.L1_CLASSNAME_ATTRVALUE])
for schema, kr in l7_matchings:
db_name, src_name, attr_name = schema
kr_name, cla_name = kr
if (db_name, src_name, attr_name, kr_name, cla_name) in l_combined:
l_combined[(db_name, src_name, attr_name, kr_name, cla_name)][1].append(
MatchingType.L7_CLASSNAME_ATTRNAME_FUZZY)
for schema, kr in l2_matchings:
db_name, src_name, attr_name = schema
kr_name, cla_name = kr
if (db_name, src_name, attr_name, kr_name, cla_name) in l_combined:
l_combined[(db_name, src_name, attr_name, kr_name, cla_name)][1].append(
MatchingType.L2_CLASSNAME_ATTRNAME_SYN)
else:
l_combined[(db_name, src_name, attr_name, kr_name, cla_name)] = (
(schema, kr), [MatchingType.L2_CLASSVALUE_ATTRVALUE])
for schema, kr in l5_matchings:
db_name, src_name, attr_name = schema
kr_name, cla_name = kr
if (db_name, src_name, attr_name, kr_name, cla_name) in l_combined:
l_combined[(db_name, src_name, attr_name, kr_name, cla_name)][1].append(
MatchingType.L5_CLASSNAME_ATTRNAME_SYN)
else:
l_combined[(db_name, src_name, attr_name, kr_name, cla_name)] = (
(schema, kr), [MatchingType.L5_CLASSNAME_ATTRNAME_SYN])
for schema, kr in l52_matchings:
db_name, src_name, attr_name = schema
kr_name, cla_name = kr
if (db_name, src_name, attr_name, kr_name, cla_name) in l_combined:
l_combined[(db_name, src_name, attr_name, kr_name, cla_name)][1].append(
MatchingType.L52_CLASSNAME_ATTRNAME_SEM)
else:
l_combined[(db_name, src_name, attr_name, kr_name, cla_name)] = (
(schema, kr), [MatchingType.L52_CLASSNAME_ATTRNAME_SEM])
for schema, kr in l6_matchings:
db_name, src_name, attr_name = schema
kr_name, cla_name = kr
if (db_name, src_name, attr_name, kr_name, cla_name) in l_combined:
# TODO: only append in the matching types are something except L1?
l_combined[(db_name, src_name, attr_name, kr_name, cla_name)][1].append(
MatchingType.L6_CLASSNAME_RELATION_SEMSIG)
# L4 and L42 have their own matching too
l4_matchings = all_matchings[MatchingType.L4_CLASSNAME_RELATIONNAME_SYN]
combined_matchings = []
for key, values in l_combined.items():
matching = values[0]
matching_types = values[1]
# for el in values:
# matching = el[0]
# matching_types = el[1]
combined_matchings.append((matching, matching_types))
combined_matchings = sorted(combined_matchings, key=lambda x: len(x[1]), reverse=True)
return combined_matchings, l4_matchings
def remove_banned_vectors(ban_index, svs):
nSVS = []
for iter1 in range(0, len(ban_index)):
if ban_index[iter1] == 0: # the corresponding vector is not banned
if iter1 < len(svs):
nSVS.append(svs[iter1])
iter1 += 1
return nSVS
def get_ban_indexes(relation1, relation2):
relation1 = nlp.camelcase_to_snakecase(relation1)
relation1 = relation1.replace('-', ' ')
relation1 = relation1.replace('_', ' ')
relation1 = relation1.lower()
relation2 = nlp.camelcase_to_snakecase(relation2)
relation2 = relation2.replace('-', ' ')
relation2 = relation2.replace('_', ' ')
relation2 = relation2.lower()
if relation1 is not None and relation2 is not None:
ban_index1 = [0] * len(relation1.split())
ban_index2 = [0] * len(relation2.split())
iter1 = 0
for token1 in relation1.split():
iter2 = 0
for token2 in relation2.split():
if token1 == token2:
ban_index1[iter1] = 1
ban_index2[iter2] = 1
iter2 += 1
iter1 += 1
return ban_index1, ban_index2
def find_relation_class_attr_name_sem_matchings(network, kr_handlers,
semantic_sim_threshold=0.5,
sensitivity_neg_signal=0.5,
negative_signal_threshold=0.4,
penalize_unknown_word=False,
add_exact_matches=True):
# Retrieve relation names
st = time.time()
names = []
seen_fields = set()
for (db_name, source_name, field_name, _) in network.iterate_values():
orig_field_name = field_name
key_seen = source_name + field_name
if key_seen not in seen_fields:
seen_fields.add(key_seen) # seen already
field_name = nlp.camelcase_to_snakecase(field_name)
field_name = field_name.replace('-', ' ')
field_name = field_name.replace('_', ' | |
a tuple of the distance and por - a dictionary keyed by edge
"""
# help cython static type
#
similarity: cython.double = 0.0
distance: cython.double = 0.0
max_dist: cython.double = 0.0
por: dict = {}
edge_key: EdgeKeyType
edges_to_process: Set[EdgeKeyType]
# filter edge_keys as required
#
edges_to_process = ({edge_key for edge_key in neuro_column.edges
if (edge_type_filters is None or neuro_column.edges[edge_key]['edge_type'] in edge_type_filters) and
(neuron_id_filters is None or neuro_column.edges[edge_key]['neuron_id'] in neuron_id_filters)} |
{edge_key for edge_key in self.edges
if (edge_type_filters is None or self.edges[edge_key]['edge_type'] in edge_type_filters) and
(neuron_id_filters is None or self.edges[edge_key]['neuron_id'] in neuron_id_filters)
})
# compare each edge_key
#
for edge_key in edges_to_process:
# assume every edge has 2 values to be compared
#
max_dist += 2.0
# edge_key in both NeuroColumns
#
if edge_key in self.edges and edge_key in neuro_column.edges:
# por keyed by neuron_id
#
if self.edges[edge_key]['neuron_id'] not in por:
por[self.edges[edge_key]['neuron_id']] = {'distance': 0.0, 'edges': {}}
# the distance between probabilities
#
edge_dist = abs(self.edges[edge_key]['prob'] - neuro_column.edges[edge_key]['prob'])
por[self.edges[edge_key]['neuron_id']]['distance'] += edge_dist
por[self.edges[edge_key]['neuron_id']]['edges'][edge_key] = {'prob': {'distance': edge_dist,
'nc': self.edges[edge_key]['prob'],
'compare_nc': neuro_column.edges[edge_key]['prob']},
'numeric': {'distance': 0.0,
'nc': 1.0,
'compare_nc': 1.0},
'distance': edge_dist
}
if 'numeric' in neuro_column.edges[edge_key]:
# the distance between numerics
#
edge_dist = abs(self.edges[edge_key]['numeric'] - neuro_column.edges[edge_key]['numeric'])
por[self.edges[edge_key]['neuron_id']]['distance'] += edge_dist
por[self.edges[edge_key]['neuron_id']]['edges'][edge_key]['distance'] += edge_dist
por[self.edges[edge_key]['neuron_id']]['edges'][edge_key]['numeric']['distance'] = edge_dist
por[self.edges[edge_key]['neuron_id']]['edges'][edge_key]['numeric']['nc'] = self.edges[edge_key]['numeric']
por[self.edges[edge_key]['neuron_id']]['edges'][edge_key]['numeric']['compare_nc'] = neuro_column.edges[edge_key]['numeric']
distance += por[self.edges[edge_key]['neuron_id']]['edges'][edge_key]['distance']
# edge key only in this NeuroColumn
#
elif edge_key in self.edges:
# por keyed by neuron_id
#
if self.edges[edge_key]['neuron_id'] not in por:
por[self.edges[edge_key]['neuron_id']] = {'distance': 0.0, 'edges': {}}
# the distance between probabilities
#
edge_dist = self.edges[edge_key]['prob']
por[self.edges[edge_key]['neuron_id']]['distance'] += edge_dist
por[self.edges[edge_key]['neuron_id']]['edges'][edge_key] = {'prob': {'distance': edge_dist,
'nc': edge_dist,
'compare_nc': 0.0},
'numeric': {'distance': 0.0,
'nc': 1.0,
'compare_nc': 1.0},
'distance': edge_dist}
if 'numeric' in self.edges[edge_key]:
edge_dist = self.edges[edge_key]['numeric']
por[self.edges[edge_key]['neuron_id']]['distance'] += edge_dist
por[self.edges[edge_key]['neuron_id']]['edges'][edge_key]['distance'] += edge_dist
por[self.edges[edge_key]['neuron_id']]['edges'][edge_key]['numeric']['nc'] = edge_dist
por[self.edges[edge_key]['neuron_id']]['edges'][edge_key]['numeric']['compare_nc'] = 0.0
distance += por[self.edges[edge_key]['neuron_id']]['edges'][edge_key]['distance']
# edge_key in the NeuroColumn to compare to
#
else:
# por keyed by neuron_id
#
if neuro_column.edges[edge_key]['neuron_id'] not in por:
por[neuro_column.edges[edge_key]['neuron_id']] = {'distance': 0.0, 'edges': {}}
# the distance between probabilities
#
edge_dist = neuro_column.edges[edge_key]['prob']
por[neuro_column.edges[edge_key]['neuron_id']]['distance'] += edge_dist
por[neuro_column.edges[edge_key]['neuron_id']]['edges'][edge_key] = {'prob': {'distance': edge_dist,
'nc': 0.0,
'compare_nc': edge_dist},
'numeric': {'distance': 0.0,
'nc': 1.0,
'max': 1.0},
'distance': edge_dist
}
if 'numeric' in neuro_column.edges[edge_key]:
# the distance between numeric
#
edge_dist = neuro_column.edges[edge_key]['numeric']
por[neuro_column.edges[edge_key]['neuron_id']]['distance'] += edge_dist
por[neuro_column.edges[edge_key]['neuron_id']]['edges'][edge_key]['distance'] += edge_dist
por[neuro_column.edges[edge_key]['neuron_id']]['edges'][edge_key]['numeric']['nc'] = 0.0
por[neuro_column.edges[edge_key]['neuron_id']]['edges'][edge_key]['numeric']['compare_nc'] = edge_dist
distance += por[neuro_column.edges[edge_key]['neuron_id']]['edges'][edge_key]['distance']
if max_dist > 0.0:
similarity = 1.0 - (distance / max_dist)
return distance, similarity, por
def learn(self, neuro_column, learn_rate: cython.double, is_bmu: bool = True, hebbian_edges: Optional[FilterType] = None) -> None:
"""
method to learn from the specified SDR
:param neuro_column: the neuron_column to learn from
:param learn_rate: the hebbian learning rate to apply
:param is_bmu: if true then will also learn non-hebbian edges
:param hebbian_edges: a set of edge types to perform hebbian learning on
:return: None
"""
# help cython static type
#
edge_key: EdgeKeyType
edges_to_process: Set[EdgeKeyType]
edges_to_delete: Set[EdgeKeyType]
# filter edge_keys as required
#
edges_to_process = set(self.edges.keys()) | set(neuro_column.edges.keys())
edges_to_delete = set()
for edge_key in edges_to_process:
# edge_key in both self and neuro_column
#
if edge_key in self.edges and edge_key in neuro_column.edges:
if hebbian_edges is None or self.edges[edge_key]['edge_type'] in hebbian_edges:
# this edge has been updated
#
self.edges[edge_key]['updated'] = True
# learn new prob and numeric
#
self.edges[edge_key]['prob'] += (neuro_column.edges[edge_key]['prob'] - self.edges[edge_key]['prob']) * learn_rate
if 'numeric' in self.edges[edge_key] and 'numeric' in neuro_column.edges[edge_key]:
self.edges[edge_key]['numeric'] += (neuro_column.edges[edge_key]['numeric'] - self.edges[edge_key]['numeric']) * learn_rate
# edge_key only in self
#
elif edge_key in self.edges:
if hebbian_edges is None or self.edges[edge_key]['edge_type'] in hebbian_edges:
# this edge has been updated
#
self.edges[edge_key]['updated'] = True
# learn to forget prob
#
self.edges[edge_key]['prob'] *= (1.0 - learn_rate)
# edge_key only in the neuro_column to learn from
#
else:
if hebbian_edges is None or neuro_column.edges[edge_key]['edge_type'] in hebbian_edges:
self.edges[edge_key] = {'edge_type': neuro_column.edges[edge_key]['edge_type'],
'edge_uid': neuro_column.edges[edge_key]['edge_uid'],
'source_type': neuro_column.edges[edge_key]['source_type'],
'source_uid': neuro_column.edges[edge_key]['source_uid'],
'target_type': neuro_column.edges[edge_key]['target_type'],
'target_uid': neuro_column.edges[edge_key]['target_uid'],
'neuron_id': neuro_column.edges[edge_key]['neuron_id'],
'prob': neuro_column.edges[edge_key]['prob'] * learn_rate,
'updated': True}
# if numeric exists then just copy as it is a new edge in self
#
if 'numeric' in neuro_column.edges[edge_key]:
self.edges[edge_key]['numeric'] = neuro_column.edges[edge_key]['numeric']
if 'numeric_min' in neuro_column.edges[edge_key] and 'numeric_max' in neuro_column.edges[edge_key]:
self.edges[edge_key]['numeric_min'] = neuro_column.edges[edge_key]['numeric_min']
self.edges[edge_key]['numeric_max'] = neuro_column.edges[edge_key]['numeric_max']
elif is_bmu:
self.edges[edge_key] = {'edge_type': neuro_column.edges[edge_key]['edge_type'],
'edge_uid': neuro_column.edges[edge_key]['edge_uid'],
'source_type': neuro_column.edges[edge_key]['source_type'],
'source_uid': neuro_column.edges[edge_key]['source_uid'],
'target_type': neuro_column.edges[edge_key]['target_type'],
'target_uid': neuro_column.edges[edge_key]['target_uid'],
'neuron_id': neuro_column.edges[edge_key]['neuron_id'],
'prob': neuro_column.edges[edge_key]['prob'],
'updated': True}
# if numeric exists then just copy as it is a new edge in self
#
if 'numeric' in neuro_column.edges[edge_key]:
self.edges[edge_key]['numeric'] = neuro_column.edges[edge_key]['numeric']
if 'numeric_min' in neuro_column.edges[edge_key] and 'numeric_max' in neuro_column.edges[edge_key]:
self.edges[edge_key]['numeric_min'] = neuro_column.edges[edge_key]['numeric_min']
self.edges[edge_key]['numeric_max'] = neuro_column.edges[edge_key]['numeric_max']
# add edge to delete list if small enough
#
if edge_key in self.edges and self.edges[edge_key]['prob'] < self.prune_threshold:
edges_to_delete.add(edge_key)
# delete any edges with close to zero probability
#
for edge_key in edges_to_delete:
del self.edges[edge_key]
def merge(self, neuro_column, merge_factor: cython.double) -> None:
"""
method to merge with a NeuroColumn using a merge_factor
:param neuro_column: the neuro_column to merge with
:param merge_factor: the merge factor
:return: None
"""
# help cython to static type
#
edge_key: EdgeKeyType
# process edges
#
edges_to_process: set = set(neuro_column.edges.keys()) | set(self.edges.keys())
for edge_key in edges_to_process:
# edge_key in both SDRs
if edge_key in self.edges and edge_key in neuro_column.edges:
self.edges[edge_key]['updated'] = True
self.edges[edge_key]['prob'] += (neuro_column.edges[edge_key]['prob'] * merge_factor)
if 'numeric' in self.edges[edge_key] and 'numeric' in neuro_column.edges[edge_key]:
self.edges[edge_key]['numeric'] += (neuro_column.edges[edge_key]['numeric'] * merge_factor)
# edge_key only in the SDR to merge with
#
elif edge_key in neuro_column.edges:
self.edges[edge_key] = {'edge_type': neuro_column.edges[edge_key]['edge_type'],
'edge_uid': neuro_column.edges[edge_key]['edge_uid'],
'source_type': neuro_column.edges[edge_key]['source_type'],
'source_uid': neuro_column.edges[edge_key]['source_uid'],
'target_type': neuro_column.edges[edge_key]['target_type'],
'target_uid': neuro_column.edges[edge_key]['target_uid'],
'neuron_id': neuro_column.edges[edge_key]['neuron_id'],
'prob': neuro_column.edges[edge_key]['prob'] * merge_factor,
'updated': True}
if 'numeric' in neuro_column.edges[edge_key]:
self.edges[edge_key]['numeric'] = (neuro_column.edges[edge_key]['numeric'] * merge_factor)
if 'numeric_min' in neuro_column.edges[edge_key] and 'numeric_max' in neuro_column.edges[edge_key]:
self.edges[edge_key]['numeric_min'] = neuro_column.edges[edge_key]['numeric_min']
self.edges[edge_key]['numeric_max'] = neuro_column.edges[edge_key]['numeric_max']
def randomize(self, neuro_column, edges_to_randomise: set = None) -> None:
"""
method to randomise this SDR based on the example SDR
:param neuro_column: the example SDR
:param edges_to_randomise: optional set of edges to randomise
:return: None
"""
# help cython to static type
#
edge_key: EdgeKeyType
for edge_key in neuro_column.edges:
if edges_to_randomise is None or neuro_column.edges[edge_key]['edge_type'] in edges_to_randomise:
rnd_numeric = None
numeric_min = None
numeric_max = None
# calculate a random numeric if required
#
if 'numeric' in neuro_column.edges[edge_key]:
# use the normalisation boundaries to calc a random number - which will be normalised when upserted...
#
if 'numeric_min' in neuro_column.edges[edge_key] and 'numeric_max' in neuro_column.edges[edge_key]:
rnd_numeric = (random.random() * (neuro_column.edges[edge_key]['numeric_max'] - neuro_column.edges[edge_key]['numeric_min'])) + neuro_column.edges[edge_key]['numeric_min']
numeric_min = neuro_column.edges[edge_key]['numeric_min']
numeric_max = neuro_column.edges[edge_key]['numeric_max']
else:
rnd_numeric = random.random()
self.upsert(edge_type=neuro_column.edges[edge_key]['edge_type'],
edge_uid=neuro_column.edges[edge_key]['edge_uid'],
source_type=neuro_column.edges[edge_key]['source_type'],
source_uid=neuro_column.edges[edge_key]['source_uid'],
target_type=neuro_column.edges[edge_key]['target_type'],
target_uid=neuro_column.edges[edge_key]['target_uid'],
neuron_id=neuro_column.edges[edge_key]['neuron_id'],
# Randomise probability
#
prob=random.random(),
numeric=rnd_numeric,
numeric_min=numeric_min,
numeric_max=numeric_max
)
def get_edge_by_max_probability(self) -> Optional[Dict[EdgeFeatureKeyType, EdgeFeatureType]]:
"""
method to return the edge with the maximum prob
:return: Dictionary with keys: 'edge_type', 'edge_uid', 'source_type', 'source_uid', 'target_type', 'target_uid', 'neuron_id', 'prob', Optional['numeric', 'numeric_min', 'numeric_max']
"""
# help Cython to static type
#
max_prob: cython.double = 0.0
max_edge_key: Optional[EdgeKeyType] = None
edge_key: EdgeKeyType
for edge_key in self.edges:
if max_edge_key is None or self.edges[edge_key]['prob'] >= max_prob:
max_edge_key = edge_key
max_prob = self.edges[edge_key]['prob']
return self.edges[max_edge_key]
def __str__(self):
"""
method to display the NeuroColumn as a string
:return: a string representation of the SDR attributes
"""
# help cython static type
#
txt: str = ''
edge_key: EdgeKeyType
for edge_key in self.edges:
if 'numeric' in self.edges[edge_key]:
if 'numeric_min' in self.edges[edge_key]:
numeric = (self.edges[edge_key]['numeric'] * (self.edges[edge_key]['numeric_min'] - self.edges[edge_key]['numeric_max'])) + self.edges[edge_key]['numeric_min']
else:
numeric = self.edges[edge_key]['numeric']
else:
numeric = None
txt = '{}Sce: {}:{}\nEdge: {}:{}:{}\nTrg: {}:{}\nProb: {}\n'.format(txt,
self.edges[edge_key]['source_type'], self.edges[edge_key]['source_uid'],
self.edges[edge_key]['edge_type'], self.edges[edge_key]['edge_uid'],
self.edges[edge_key]['neuron_id'],
self.edges[edge_key]['target_type'], self.edges[edge_key]['target_uid'],
self.edges[edge_key]['prob'])
if numeric is not None:
txt = '{}Numeric: {}\n'.format(txt, numeric)
return txt
def __contains__(self, edge_key: EdgeKeyType) -> bool:
"""
method to check if an edge_key exists in the NeuroColumn
:param edge_key: edge key to check
:return: True if it exists else False
"""
return edge_key in self.edges
def __iter__(self) -> iter:
"""
method to return an iterable of the NeuroColumn edge keys
:return: iterable of neuro_column keys
"""
return iter(self.edges)
def __getitem__(self, edge_key: EdgeKeyType) -> Dict[EdgeFeatureKeyType, EdgeFeatureType]:
"""
method to access the NeuroColumn edge attributes
:param edge_key: the | |
get_policy_version(self, policy_name, version_id):
policy = self.get_policy(policy_name)
if not policy:
raise ResourceNotFoundException()
for version in policy.versions:
if version.version_id == version_id:
return version
raise ResourceNotFoundException()
def list_policy_versions(self, policy_name):
policy = self.get_policy(policy_name)
if not policy:
raise ResourceNotFoundException()
return policy.versions
def delete_policy_version(self, policy_name, version_id):
policy = self.get_policy(policy_name)
if not policy:
raise ResourceNotFoundException()
if version_id == policy.default_version_id:
raise InvalidRequestException(
"Cannot delete the default version of a policy"
)
for i, v in enumerate(policy.versions):
if v.version_id == version_id:
del policy.versions[i]
return
raise ResourceNotFoundException()
def _get_principal(self, principal_arn):
"""
raise ResourceNotFoundException
"""
if ":cert/" in principal_arn:
certs = [_ for _ in self.certificates.values() if _.arn == principal_arn]
if len(certs) == 0:
raise ResourceNotFoundException()
principal = certs[0]
return principal
from moto.cognitoidentity import cognitoidentity_backends
cognito = cognitoidentity_backends[self.region_name]
identities = []
for identity_pool in cognito.identity_pools:
pool_identities = cognito.pools_identities.get(identity_pool, None)
identities.extend(
[pi["IdentityId"] for pi in pool_identities.get("Identities", [])]
)
if principal_arn in identities:
return {"IdentityId": principal_arn}
raise ResourceNotFoundException()
def attach_principal_policy(self, policy_name, principal_arn):
principal = self._get_principal(principal_arn)
policy = self.get_policy(policy_name)
k = (principal_arn, policy_name)
if k in self.principal_policies:
return
self.principal_policies[k] = (principal, policy)
def detach_principal_policy(self, policy_name, principal_arn):
# this may raises ResourceNotFoundException
self._get_principal(principal_arn)
self.get_policy(policy_name)
k = (principal_arn, policy_name)
if k not in self.principal_policies:
raise ResourceNotFoundException()
del self.principal_policies[k]
def list_principal_policies(self, principal_arn):
policies = [
v[1] for k, v in self.principal_policies.items() if k[0] == principal_arn
]
return policies
def list_policy_principals(self, policy_name):
principals = [
k[0] for k, v in self.principal_policies.items() if k[1] == policy_name
]
return principals
def attach_thing_principal(self, thing_name, principal_arn):
principal = self._get_principal(principal_arn)
thing = self.describe_thing(thing_name)
k = (principal_arn, thing_name)
if k in self.principal_things:
return
self.principal_things[k] = (principal, thing)
def detach_thing_principal(self, thing_name, principal_arn):
# this may raises ResourceNotFoundException
self._get_principal(principal_arn)
self.describe_thing(thing_name)
k = (principal_arn, thing_name)
if k not in self.principal_things:
raise ResourceNotFoundException()
del self.principal_things[k]
def list_principal_things(self, principal_arn):
thing_names = [
k[1] for k, v in self.principal_things.items() if k[0] == principal_arn
]
return thing_names
def list_thing_principals(self, thing_name):
things = [_ for _ in self.things.values() if _.thing_name == thing_name]
if len(things) == 0:
raise ResourceNotFoundException(
"Failed to list principals for thing %s because the thing does not exist in your account"
% thing_name
)
principals = [
k[0] for k, v in self.principal_things.items() if k[1] == thing_name
]
return principals
def describe_thing_group(self, thing_group_name):
thing_groups = [
_
for _ in self.thing_groups.values()
if _.thing_group_name == thing_group_name
]
if len(thing_groups) == 0:
raise ResourceNotFoundException()
return thing_groups[0]
def create_thing_group(
self, thing_group_name, parent_group_name, thing_group_properties
):
thing_group = FakeThingGroup(
thing_group_name,
parent_group_name,
thing_group_properties,
self.region_name,
self.thing_groups,
)
self.thing_groups[thing_group.arn] = thing_group
return thing_group.thing_group_name, thing_group.arn, thing_group.thing_group_id
def delete_thing_group(self, thing_group_name):
"""
The ExpectedVersion-parameter is not yet implemented
"""
child_groups = [
thing_group
for _, thing_group in self.thing_groups.items()
if thing_group.parent_group_name == thing_group_name
]
if len(child_groups) > 0:
raise InvalidRequestException(
" Cannot delete thing group : "
+ thing_group_name
+ " when there are still child groups attached to it"
)
try:
thing_group = self.describe_thing_group(thing_group_name)
del self.thing_groups[thing_group.arn]
except ResourceNotFoundException:
# AWS returns success even if the thing group does not exist.
pass
def list_thing_groups(self, parent_group, name_prefix_filter, recursive):
if recursive is None:
recursive = True
if name_prefix_filter is None:
name_prefix_filter = ""
if parent_group and parent_group not in [
_.thing_group_name for _ in self.thing_groups.values()
]:
raise ResourceNotFoundException()
thing_groups = [
_ for _ in self.thing_groups.values() if _.parent_group_name == parent_group
]
if recursive:
for g in thing_groups:
thing_groups.extend(
self.list_thing_groups(
parent_group=g.thing_group_name,
name_prefix_filter=None,
recursive=False,
)
)
# thing_groups = groups_to_process.values()
return [
_ for _ in thing_groups if _.thing_group_name.startswith(name_prefix_filter)
]
def update_thing_group(
self, thing_group_name, thing_group_properties, expected_version
):
thing_group = self.describe_thing_group(thing_group_name)
if expected_version and expected_version != thing_group.version:
raise VersionConflictException(thing_group_name)
attribute_payload = thing_group_properties.get("attributePayload", None)
if attribute_payload is not None and "attributes" in attribute_payload:
do_merge = attribute_payload.get("merge", False)
attributes = attribute_payload["attributes"]
if not do_merge:
thing_group.thing_group_properties["attributePayload"][
"attributes"
] = attributes
else:
thing_group.thing_group_properties["attributePayload"][
"attributes"
].update(attributes)
elif attribute_payload is not None and "attributes" not in attribute_payload:
thing_group.attributes = {}
thing_group.version = thing_group.version + 1
return thing_group.version
def _identify_thing_group(self, thing_group_name, thing_group_arn):
# identify thing group
if thing_group_name is None and thing_group_arn is None:
raise InvalidRequestException(
" Both thingGroupArn and thingGroupName are empty. Need to specify at least one of them"
)
if thing_group_name is not None:
thing_group = self.describe_thing_group(thing_group_name)
if thing_group_arn and thing_group.arn != thing_group_arn:
raise InvalidRequestException(
"ThingGroupName thingGroupArn does not match specified thingGroupName in request"
)
elif thing_group_arn is not None:
if thing_group_arn not in self.thing_groups:
raise InvalidRequestException()
thing_group = self.thing_groups[thing_group_arn]
return thing_group
def _identify_thing(self, thing_name, thing_arn):
# identify thing
if thing_name is None and thing_arn is None:
raise InvalidRequestException(
"Both thingArn and thingName are empty. Need to specify at least one of them"
)
if thing_name is not None:
thing = self.describe_thing(thing_name)
if thing_arn and thing.arn != thing_arn:
raise InvalidRequestException(
"ThingName thingArn does not match specified thingName in request"
)
elif thing_arn is not None:
if thing_arn not in self.things:
raise InvalidRequestException()
thing = self.things[thing_arn]
return thing
def add_thing_to_thing_group(
self, thing_group_name, thing_group_arn, thing_name, thing_arn
):
thing_group = self._identify_thing_group(thing_group_name, thing_group_arn)
thing = self._identify_thing(thing_name, thing_arn)
if thing.arn in thing_group.things:
# aws ignores duplicate registration
return
thing_group.things[thing.arn] = thing
def remove_thing_from_thing_group(
self, thing_group_name, thing_group_arn, thing_name, thing_arn
):
thing_group = self._identify_thing_group(thing_group_name, thing_group_arn)
thing = self._identify_thing(thing_name, thing_arn)
if thing.arn not in thing_group.things:
# aws ignores non-registered thing
return
del thing_group.things[thing.arn]
def list_things_in_thing_group(self, thing_group_name):
"""
Pagination and the recursive-parameter is not yet implemented
"""
thing_group = self.describe_thing_group(thing_group_name)
return thing_group.things.values()
def list_thing_groups_for_thing(self, thing_name):
"""
Pagination is not yet implemented
"""
thing = self.describe_thing(thing_name)
all_thing_groups = self.list_thing_groups(None, None, None)
ret = []
for thing_group in all_thing_groups:
if thing.arn in thing_group.things:
ret.append(
{
"groupName": thing_group.thing_group_name,
"groupArn": thing_group.arn,
}
)
return ret
def update_thing_groups_for_thing(
self, thing_name, thing_groups_to_add, thing_groups_to_remove
):
thing = self.describe_thing(thing_name)
for thing_group_name in thing_groups_to_add:
thing_group = self.describe_thing_group(thing_group_name)
self.add_thing_to_thing_group(
thing_group.thing_group_name, None, thing.thing_name, None
)
for thing_group_name in thing_groups_to_remove:
thing_group = self.describe_thing_group(thing_group_name)
self.remove_thing_from_thing_group(
thing_group.thing_group_name, None, thing.thing_name, None
)
def create_job(
self,
job_id,
targets,
document_source,
document,
description,
presigned_url_config,
target_selection,
job_executions_rollout_config,
document_parameters,
):
job = FakeJob(
job_id,
targets,
document_source,
document,
description,
presigned_url_config,
target_selection,
job_executions_rollout_config,
document_parameters,
self.region_name,
)
self.jobs[job_id] = job
for thing_arn in targets:
thing_name = thing_arn.split(":")[-1].split("/")[-1]
job_execution = FakeJobExecution(job_id, thing_arn)
self.job_executions[(job_id, thing_name)] = job_execution
return job.job_arn, job_id, description
def describe_job(self, job_id):
jobs = [_ for _ in self.jobs.values() if _.job_id == job_id]
if len(jobs) == 0:
raise ResourceNotFoundException()
return jobs[0]
def delete_job(self, job_id, force):
job = self.jobs[job_id]
if job.status == "IN_PROGRESS" and force:
del self.jobs[job_id]
elif job.status != "IN_PROGRESS":
del self.jobs[job_id]
else:
raise InvalidStateTransitionException()
def cancel_job(self, job_id, reason_code, comment, force):
job = self.jobs[job_id]
job.reason_code = reason_code if reason_code is not None else job.reason_code
job.comment = comment if comment is not None else job.comment
job.force = force if force is not None and force != job.force else job.force
job.status = "CANCELED"
if job.status == "IN_PROGRESS" and force:
self.jobs[job_id] = job
elif job.status != "IN_PROGRESS":
self.jobs[job_id] = job
else:
raise InvalidStateTransitionException()
return job
def get_job_document(self, job_id):
return self.jobs[job_id]
def list_jobs(self, max_results, token):
"""
The following parameter are not yet implemented: Status, TargetSelection, ThingGroupName, ThingGroupId
"""
all_jobs = [_.to_dict() for _ in self.jobs.values()]
filtered_jobs = all_jobs
if token is None:
jobs = filtered_jobs[0:max_results]
next_token = str(max_results) if len(filtered_jobs) > max_results else None
else:
token = int(token)
jobs = filtered_jobs[token : token + max_results]
next_token = (
str(token + max_results)
if len(filtered_jobs) > token + max_results
else None
)
return jobs, next_token
def describe_job_execution(self, job_id, thing_name, execution_number):
try:
job_execution = self.job_executions[(job_id, thing_name)]
except KeyError:
raise ResourceNotFoundException()
if job_execution is None or (
execution_number is not None
and job_execution.execution_number != execution_number
):
raise ResourceNotFoundException()
return job_execution
def cancel_job_execution(self, job_id, thing_name, force):
"""
The parameters ExpectedVersion and StatusDetails are not yet implemented
"""
job_execution = self.job_executions[(job_id, thing_name)]
if job_execution is None:
raise ResourceNotFoundException()
job_execution.force_canceled = (
force if force is not None else job_execution.force_canceled
)
# TODO: implement expected_version and status_details (at most 10 can be specified)
if job_execution.status == "IN_PROGRESS" and force:
job_execution.status = "CANCELED"
self.job_executions[(job_id, thing_name)] = job_execution
elif job_execution.status != "IN_PROGRESS":
job_execution.status = "CANCELED"
self.job_executions[(job_id, thing_name)] = job_execution
else:
raise InvalidStateTransitionException()
def delete_job_execution(self, job_id, thing_name, execution_number, force):
job_execution = self.job_executions[(job_id, thing_name)]
if job_execution.execution_number != | |
has_select_all(self, max_len: Optional[int] = None):
if max_len is None or max_len > len(self.children):
max_len = len(self.children)
for i in range(max_len):
if isinstance(self.children[i], SelectItemAllColumns):
return True
return False
def is_valid_group_by_index(self, index: int):
return (0 < index <= len(self.children) and
not self.has_select_all(index) and
self.children[index - 1].name)
def tree_str(self, indent: str = ''):
if not self.children:
return ''
sub_indent = indent + _INDENT
with StringIO() as s:
s.write(f'{indent}SELECT')
if self.keyword:
s.write(f' {self.keyword}')
s.write('\n')
s.write(''.join(si.tree_str(sub_indent) for si in self.children))
return s.getvalue()
def recompose(self, sep: str = ''):
with StringIO() as s:
s.write('SELECT')
if self.keyword:
s.write(f' {self.keyword}')
s.write(sep)
s.write(f',{sep}'.join(si.recompose(sep) for si in self.children))
return s.getvalue()
def resolve_name(self, name: str, in_scope: bool, find_source: bool):
del find_source
if in_scope:
for child in self.children:
if child.name == name:
return child
else:
for child in self.children:
if (isinstance(child, SelectItemAllColumns) and
(child.name is None or name.starts_with(f'{child.name}.'))):
return child
return None
def link_graph(self, graph: Graph, resolvers: List['Source'],
processed: Set['Source']):
if self._is_processed(processed):
return
node = graph.node_by_source(self)
for child in self.children:
node.add_child(graph.node_by_source(child), LinkType.OUTPUT)
child.link_graph(graph, resolvers, processed)
class QueryClause(Source):
"""Side clauses that can appear along select statement (e.g. `ORDER BY`)."""
def __init__(self,
parent: Optional[Source] = None,
name: Optional[str] = None):
super().__init__(parent, name)
def is_sql_construct(self):
return True
def add_child(self, child: Source):
assert isinstance(child, Expression)
child.role = self.name
super().add_child(child)
def recompose(self, sep: str = ''):
expressions = ', '.join(child.recompose(sep) for child in self.children)
return f'{self.name} {expressions}'
def link_graph(self, graph: Graph, resolvers: List['Source'],
processed: Set['Source']):
if self._is_processed(processed):
return
node = graph.node_by_source(self)
for child in self.children:
node.add_child(graph.node_by_source(child), LinkType.EXPRESSION_AUX)
child.link_graph(graph, resolvers, processed)
class QueryLimit:
"""Contains the limits for a query.
Attributes:
limit: numeric LIMIT of the query.
offset: numeric OFFSET of the limit clause.
extra_limits: some dialects support multiple limit values (which complement
the limit)
postfix: some dialects support some postfix keywords after the limit clause.
"""
def __init__(self,
limit: str,
offset: Optional[str] = None,
extra_limits: Optional[List[str]] = None):
self.limit = limit
self.offset = offset
self.extra_limits = extra_limits
self.postfix: Optional[str] = None
def recompose(self):
postfix = ''
if self.postfix:
postfix = f' {self.postfix}'
if self.offset is not None:
return f'LIMIT {self.limit} OFFSET {self.offset}{postfix}'
if self.extra_limits:
extra = ', '.join(str(l) for l in self.extra_limits)
return f'LIMIT {self.limit}, {extra}{postfix}'
return f'LIMIT {self.limit}{postfix}'
@classmethod
def with_offset(cls, limit: str, offset: str):
return cls(limit, offset=offset)
@classmethod
def with_numbers(cls, limits: List[str]):
return cls(limits[0], extra_limits=limits[1:])
class GroupByClause(Source):
"""Contains the group by clauses in a query.
Attibutes:
keyword: any initial keyword contained after the `GROUP BY`
(e.g. `CUBE`, `ROLLUP` etc).
post_keyword: posterior keywords in the `GROUP BY` (e.g. `WITH CUBE`).
with_parens: if grouping expressions are to be reconstructed in parenthesis.
post_syntax: place the decoration keywords after the group by clause
grouping_sets: list of GROUPING SETS expressions.
"""
def __init__(self, parent: Optional[Source] = None):
super().__init__(parent, 'GROUP BY')
self.group_by: List[Expression] = []
self.keyword: Optional[str] = None
self.post_keyword: Optional[str] = None
self.with_parens = False
self.post_syntax = False
self.grouping_sets: List[Expression] = []
def is_sql_construct(self):
return True
def add_group_by(self, expression: Expression):
expression.role = 'GROUP BY'
assert expression is not None
expression.set_parent(self)
self.group_by.append(expression)
def add_grouping_set(self, expression: Expression):
expression.role = 'GROUPING SET'
assert expression is not None
expression.set_parent(self)
self.grouping_sets.append(expression)
def tree_str(self, indent: str = ''):
with StringIO() as s:
s.write(f'{indent}GROUP BY\n')
sub_indent = indent + _INDENT
for exp in self.group_by:
s.write(exp.tree_str(sub_indent))
for exp in self.grouping_sets:
s.write(exp.tree_str(sub_indent))
return s.getvalue()
def recompose(self, sep: str = ' '):
with StringIO() as s:
kw = f' {self.keyword}' if self.keyword else ''
if self.group_by:
s.write(f'{sep}GROUP BY')
if not self.post_syntax:
s.write(kw)
s.write(sep)
if self.with_parens:
s.write('(')
s.write(', '.join(gb.recompose(sep) for gb in self.group_by))
if self.with_parens:
s.write(')')
if self.post_syntax:
s.write(kw)
if self.post_keyword:
s.write(f'{sep}{self.post_keyword}')
if self.grouping_sets:
s.write(f'{sep}GROUPING SETS (')
s.write(f',{sep}'.join(
gs.recompose(sep) for gs in self.grouping_sets))
s.write(')')
return s.getvalue()
def link_graph(self, graph: Graph, resolvers: List['Source'],
processed: Set['Source']):
if self._is_processed(processed):
return
node = graph.node_by_source(self)
for child in self.children:
node.add_child(graph.node_by_source(child), LinkType.EXPRESSION)
child.link_graph(graph, resolvers, processed)
class Query(Source):
"""A SELECT statement.
Attributes:
withs: queries or expressions introduced with a `WITH` clause. Their name
denote their alias implied by associated `AS`.
source: the main data source of the query. Can be composed with `JoinSource`.
destination: if producing a table or a view, this contains the
produced source.
select_items: the expression and names selected in this query.
where: the main `WHERE` filter expression.
group_by: any `GROUP BY` clause contained.
having: the `HAVING` post select filter expression.
clauses: any associated clauses (`ORDER BY` and such).
set_ops: associated queries, in a set-like operation w/ this one
(e.g. `UNION`).
limit: any associated `LIMIT` clause.
"""
def __init__(self,
parent: Optional[Source] = None,
name: Optional[str] = None):
super().__init__(parent, name)
self.withs: List[Source] = []
self.source: Source = None
self.destination: Optional[Source] = None
self.select_items = SelectItems(self)
self.where: Optional[Expression] = None
self.group_by: Optional[GroupByClause] = None
self.having: Optional[Expression] = None
self.clauses: List[QueryClause] = []
self.set_ops: List['SetOpSelect'] = []
self.limit: Optional[QueryLimit] = None
def ensure_group_by(self):
if not self.group_by:
self.group_by = GroupByClause(self)
return self.group_by
def add_group_by(self,
exp: Optional[Expression] = None,
exp_num: Optional[int] = None):
if exp_num is not None:
assert exp is None
if self.select_items.is_valid_group_by_index(exp_num):
exp = Expression(self, None,
self.select_items.children[exp_num - 1].name)
else:
exp = Expression(self, None, f'{exp_num}')
self.ensure_group_by().add_group_by(exp)
def tree_str(self, indent: str = ''):
name_indent = indent + _INDENT
sub_indent = name_indent + _INDENT
with StringIO() as s:
s.write(self.name_str(indent=indent))
s.write('\n')
for ws in self.withs:
s.write(ws.name_str('WITH', name_indent))
s.write('\n')
s.write(ws.tree_str(sub_indent))
if self.source:
s.write(self.source.name_str('FROM', name_indent))
s.write('\n')
s.write(self.source.tree_str(sub_indent))
s.write(self.select_items.tree_str(name_indent))
if self.where:
s.write(self.where.name_str('WHERE', name_indent))
s.write('\n')
s.write(self.where.tree_str(sub_indent))
if self.group_by:
s.write(self.group_by.tree_str(name_indent))
if self.having:
s.write(self.having.name_str('HAVING', name_indent))
s.write('\n')
s.write(self.having.tree_str(sub_indent))
for clause in self.clauses:
s.write(clause.name_str(indent=name_indent))
s.write('\n')
s.write(clause.tree_str(sub_indent))
for set_op in self.set_ops:
s.write(set_op.tree_str(name_indent))
for lateral in self.laterals:
s.write(lateral.tree_str(name_indent))
return s.getvalue()
def recompose(self, sep: str = ' '):
with StringIO() as s:
if self.withs:
s.write(f'WITH{sep}')
withs = []
for ws in self.withs:
if isinstance(ws, Query):
withs.append(f'{ws.name} AS ({ws.recompose(sep)})')
else:
withs.append(f'{ws.recompose(sep)} AS {ws.name}')
s.write(f',{sep}'.join(withs))
s.write(sep)
s.write(self.select_items.recompose(sep))
s.write(f'{sep}')
if self.source:
s.write(f'FROM{sep}')
if isinstance(self.source, Query):
s.write(f'({self.source.recompose(sep)})')
if self.source.name:
s.write(f'{sep}AS self.source.name')
s.write(self.source.laterals_recompose(sep))
else:
s.write(self.source.recompose(sep))
if self.where:
s.write(f'{sep}WHERE{sep}{self.where.recompose(sep)}')
if self.group_by:
s.write(self.group_by.recompose(sep))
if self.having:
s.write(f'{sep}HAVING{sep}{self.having.recompose(sep)}')
if self.limit:
s.write(f'{sep}{self.limit.recompose()}')
for clause in self.clauses:
s.write(f'{sep}{clause.recompose(sep)}')
for set_op in self.set_ops:
s.write(f'{sep}{set_op.recompose(sep)}')
return s.getvalue()
def _recompose_source(self, source: Source, sep: str = ' '):
if isinstance(source, Query):
return f'({source.recompose(sep)}) AS {source.name}'
def _local_resolve(self, name: str, in_scope: bool):
if in_scope:
resolved = self.select_items.resolve_name(name, True, False)
if resolved:
return resolved
if self.source:
return self.source.resolve_name(name, True, False)
return self.select_items.resolve_name(name, False, False)
def _find_source(self, name: str):
for ws in self.withs:
if ws.name == name:
return ws
if self.source:
return self.source.resolve_name(name, False, True)
return None
def resolve_name(self, name: str, in_scope: bool, find_source: bool):
if name == self.name or not name:
return self
if find_source:
return self._find_source(name)
parent_name, field_name = _split_name(name)
local_resolve = (parent_name is not None and parent_name == self.name)
if parent_name is None or local_resolve or in_scope:
return self._local_resolve(field_name if local_resolve else name,
in_scope or local_resolve)
sub_resolver = self._find_source(parent_name)
if not sub_resolver:
return self._local_resolve(name, True)
return sub_resolver.resolve_name(field_name, True, False)
def link_graph(self, graph: Graph, resolvers: List['Source'],
processed: Set['Source']):
if self._is_processed(processed):
return
node = graph.node_by_source(self)
sub_resolvers = [self]
sub_resolvers.extend(resolvers)
for ws in self.withs:
node.add_inlink(graph.node_by_source(ws), LinkType.WITH)
ws.link_graph(graph, sub_resolvers, processed)
if self.source:
node.add_inlink(graph.node_by_source(self.source), LinkType.FROM)
node.add_child(graph.node_by_source(self.source))
self.source.link_graph(graph, sub_resolvers, processed)
self.select_items.link_graph(graph, sub_resolvers, processed)
node.add_child(graph.node_by_source(self.select_items), LinkType.OUTPUT)
if self.where:
node.add_child(graph.node_by_source(self.where),
LinkType.EXPRESSION_AUX)
self.where.link_graph(graph, sub_resolvers, processed)
if self.group_by:
node.add_child(graph.node_by_source(self.group_by),
LinkType.EXPRESSION_AUX)
self.group_by.link_graph(graph, sub_resolvers, processed)
if self.having:
node.add_child(graph.node_by_source(self.having),
LinkType.EXPRESSION_AUX)
self.having.link_graph(graph, sub_resolvers, processed)
for set_op in self.set_ops:
node.add_inlink(graph.node_by_source(set_op), LinkType.SET_OP)
set_op.link_graph(graph, resolvers, processed)
for clause in self.clauses:
node.add_child(graph.node_by_source(clause),
LinkType.EXPRESSION_AUX)
clause.link_graph(graph, resolvers, processed)
for lateral in self.laterals:
node.add_child(graph.node_by_source(lateral), LinkType.SOURCE)
lateral.link_graph(graph, resolvers, processed)
def apply_schemas(self, schema_info: SchemaInfo):
if (schema_info.output and self.destination and
isinstance(self.destination, Table)):
self.destination.set_schema(schema_info.output)
schema_info.output = None
super().apply_schemas(schema_info)
class SetOpQuery(Query):
"""A select statment which is an operand to a set operation w/ the main select.
Attributes:
set_op: the set operation to apply to this query when processing e.g. `UNION`
"""
def __init__(self, set_op: str, parent: Source, name: Optional[str] = None):
super().__init__(parent, name)
self.set_op = set_op.upper()
def tree_str(self, indent: str = ''):
return (f'{indent}SET OPERATION: {self.set_op}\n' +
super().tree_str(indent))
def recompose(self, sep: str = ' '):
return f'{self.set_op}{sep}{super().recompose(sep)}'
class GeneralStatement(Source):
"""A general SQL statement, w/o specific information extracted.
Attributes:
statement_tokens: the list of tokens in this statement.
statement: the statement tokens as a string.
"""
def __init__(self, parent: Optional[Source], name: Optional[str],
| |
import sys
import numpy as np
from .orcadaq import OrcaDecoder, get_ccc
class ORCAStruck3302(OrcaDecoder):
"""
ORCA decoder for Struck 3302 digitizer data
"""
def __init__(self, *args, **kwargs):
self.decoder_name = 'ORSIS3302DecoderForEnergy'
self.orca_class_name = 'ORSIS3302Model'
self.decoded_values_template = {
'packet_id': {
'dtype': 'uint32',
},
'ievt': {
'dtype': 'uint32',
},
'energy': {
'dtype': 'uint32',
'units': 'adc',
},
'energy_first': {
'dtype': 'uint32',
},
'timestamp': {
'dtype': 'uint64',
'units': 'clock_ticks',
},
'crate': {
'dtype': 'uint8',
},
'card': {
'dtype': 'uint8',
},
'channel': {
'dtype': 'uint8',
},
'waveform': {
'dtype': 'uint16',
'datatype': 'waveform',
'length': 65532, # max value. override this before initalizing buffers to save RAM
'sample_period': 10, # override if a different clock rate is used
'sample_period_units': 'ns',
'units': 'adc',
},
}
super().__init__(*args, **kwargs) # also initializes the garbage df
self.decoded_values = {}
self.ievt = 0
self.skipped_channels = {}
# self.enabled_cccs = []
def get_decoded_values(self, channel=None):
if channel is None:
dec_vals_list = self.decoded_values.items()
if len(dec_vals_list) == 0:
print("ORSIS3302Model: Error: decoded_values not built yet!")
return None
return list(dec_vals_list)[0][1] # Get first thing we find
if channel in self.decoded_values: return self.decoded_values[channel]
print("ORSIS3302Model: Error: No decoded values for channel", channel)
return None
def set_object_info(self, object_info):
self.object_info = object_info
# parse object_info for important info
for card_dict in self.object_info:
crate = card_dict['Crate']
card = card_dict['Card']
int_enabled_mask = card_dict['internalTriggerEnabledMask']
ext_enabled_mask = card_dict['externalTriggerEnabledMask']
enabled_mask = int_enabled_mask | ext_enabled_mask
for channel in range(8):
# only care about enabled channels
if not ((enabled_mask >> channel) & 0x1): continue
ccc = get_ccc(crate, card, channel)
# save list of enabled channels
#self.enabled_cccs.append(ccc)
self.decoded_values[ccc] = {}
self.decoded_values[ccc].update(self.decoded_values_template)
sd = self.decoded_values[ccc] # alias
# get trace length(s). Should all be the same until
# multi-buffer mode is implemented AND each channel has its
# own buffer
trace_length = card_dict['sampleLengths'][int(channel/2)]
if trace_length <= 0 or trace_length > 2**16:
print('SIS3316ORCADecoder Error: invalid trace_length', trace_length)
sys.exit()
self.decoded_values[ccc]['waveform']['length'] = trace_length
def max_n_rows_per_packet(self):
return 1
def decode_packet(self, packet, lh5_tables, packet_id, header_dict, verbose=False):
"""
see README for the 32-bit data word diagram
"""
# interpret the raw event data into numpy arrays of 16 and 32 bit ints
# does not copy data. p32 and p16 are read-only
p32 = np.frombuffer(packet, dtype=np.uint32)
p16 = np.frombuffer(packet, dtype=np.uint16)
# read the crate/card/channel first
crate = (p32[0] >> 21) & 0xF
card = (p32[0] >> 16) & 0x1F
channel = (p32[0] >> 8) & 0xFF
ccc = get_ccc(crate, card, channel)
# aliases for brevity
tb = lh5_tables
# if the first key is an int, then there are different tables for
# each channel.
if isinstance(list(tb.keys())[0], int):
if ccc not in lh5_tables:
if ccc not in self.skipped_channels:
self.skipped_channels[ccc] = 0
self.skipped_channels[ccc] += 1
return
tb = lh5_tables[ccc]
ii = tb.loc
# store packet id
tb['packet_id'].nda[ii] = packet_id
# read the rest of the record
n_lost_msb = (p32[0] >> 25) & 0x7F
n_lost_lsb = (p32[0] >> 2) & 0x7F
n_lost_records = (n_lost_msb << 7) + n_lost_lsb
tb['crate'].nda[ii] = (p32[0] >> 21) & 0xF
tb['card'].nda[ii] = (p32[0] >> 16) & 0x1F
tb['channel'].nda[ii] = (p32[0] >> 8) & 0xFF
buffer_wrap = p32[0] & 0x1
wf_length32 = p32[1]
ene_wf_length32 = p32[2]
evt_header_id = p32[3] & 0xFF
tb['timestamp'].nda[ii] = p32[4] + ((p32[3] & 0xFFFF0000) << 16)
last_word = p32[-1]
# get the footer
tb['energy'].nda[ii] = p32[-4]
tb['energy_first'].nda[ii] = p32[-3]
extra_flags = p32[-2]
# compute expected and actual array dimensions
wf_length16 = 2 * wf_length32
orca_helper_length16 = 2
sis_header_length16 = 12 if buffer_wrap else 8
header_length16 = orca_helper_length16 + sis_header_length16
ene_wf_length16 = 2 * ene_wf_length32
footer_length16 = 8
expected_wf_length = len(p16) - orca_helper_length16 - sis_header_length16 - \
footer_length16 - ene_wf_length16
# error check: waveform size must match expectations
if wf_length16 != expected_wf_length or last_word != 0xdeadbeef:
print(len(p16), orca_helper_length16, sis_header_length16,
footer_length16)
print("ERROR: Waveform size %d doesn't match expected size %d." %
(wf_length16, expected_wf_length))
print(" The Last Word (should be 0xdeadbeef):",
hex(last_word))
exit()
# indexes of stuff (all referring to the 16 bit array)
i_wf_start = header_length16
i_wf_stop = i_wf_start + wf_length16
i_ene_start = i_wf_stop + 1
i_ene_stop = i_ene_start + ene_wf_length16
if buffer_wrap:
# start somewhere in the middle of the record
i_start_1 = p32[6] + header_length16 + 1
i_stop_1 = i_wf_stop # end of the wf record
i_start_2 = i_wf_start # beginning of the wf record
i_stop_2 = i_start_1
# handle the waveform(s)
#energy_wf = np.zeros(ene_wf_length16) # not used rn
tbwf = tb['waveform']['values'].nda[ii]
if wf_length32 > 0:
if not buffer_wrap:
if i_wf_stop - i_wf_start != expected_wf_length:
print("ERROR: event %d, we expected %d WF samples and only got %d" %
(ievt, expected_wf_length, i_wf_stope - i_wf_start))
tbwf[:expected_wf_length] = p16[i_wf_start:i_wf_stop]
else:
len1 = i_stop_1-i_start_1
len2 = i_stop_2-i_start_2
if len1+len2 != expected_wf_length:
print("ERROR: event %d, we expected %d WF samples and only got %d" %
(ievt, expected_wf_length, len1+len2))
exit()
tbwf[:len1] = p16[i_start_1:i_stop_1]
tbwf[len1:len1+len2] = p16[i_start_2:i_stop_2]
# set the event number (searchable HDF5 column)
tb['ievt'].nda[ii] = self.ievt
self.ievt += 1
tb.push_row()
class ORCAGretina4M(OrcaDecoder):
"""
Decoder for Majorana Gretina4M digitizer data
NOTE: <NAME> made some nice new summary slides on a 2019 LEGEND call
https://indico.legend-exp.org/event/117/contributions/683/attachments/467/717/mjd_data_format.pdf
"""
def __init__(self, *args, **kwargs):
self.decoder_name = 'ORGretina4MWaveformDecoder'
self.orca_class_name = 'ORGretina4MModel'
self.decoded_values_template = {
'packet_id': {
'dtype': 'uint32',
},
'ievt': {
'dtype': 'uint32',
},
'energy': {
'dtype': 'uint32',
'units': 'adc',
},
'timestamp': {
'dtype': 'uint32',
'units': 'clock_ticks',
},
'crate': {
'dtype': 'uint8',
},
'card': {
'dtype': 'uint8',
},
'channel': {
'dtype': 'uint8',
},
"board_id": {
'dtype': 'uint32',
},
'waveform': {
'dtype': 'int16',
'datatype': 'waveform',
'length': 2016, # shorter if multispampling is used
'sample_period': 10,
'sample_period_units': 'ns',
'units': 'adc',
},
}
super().__init__(*args, **kwargs)
self.decoded_values = {}
self.ievt = 0
self.skipped_channels = {}
self.use_MS = False
self.wf_skip = 16 # the first ~dozen samples are sometimes junk
# channel pars for multisampling mode
self.ft_len = {}
self.ps = {}
self.div = {}
self.rises = np.zeros(2016)
self.remainders = np.zeros(2016)
def get_decoded_values(self, channel=None):
if channel is None:
dec_vals_list = self.decoded_values.items()
if len(dec_vals_list) == 0:
print("ORGretina4MModel: Error: decoded_values not built yet!")
return None
return list(dec_vals_list)[0][1] # Get first thing we find
if channel in self.decoded_values: return self.decoded_values[channel]
print("ORGretina4MModel: Error: No decoded values for channel", channel)
return None
def max_n_rows_per_packet(self):
return 1
def set_object_info(self, object_info):
self.object_info = object_info
# parse object_info for important info
for card_dict in self.object_info:
crate = card_dict['Crate']
card = card_dict['Card']
is_enabled = card_dict['Enabled']
ftcnt = card_dict['FtCnt']
presum_rates = [ 2, 4, 8, 10 ] # number presummed in MS
mrpsrt = card_dict['Mrpsrt'] # index for channel's presum rate
dividers = [1, 2, 4, 8 ] # dividers for presummed data
mrpsdv = card_dict['Mrpsdv'] # index for channel's divider
for channel in range(8):
# only care about enabled channels
if not is_enabled[channel]: continue
ccc = get_ccc(crate, card, channel)
self.decoded_values[ccc] = {}
self.decoded_values[ccc].update(self.decoded_values_template)
sd = self.decoded_values[ccc] # alias
# find MS parameters
# MS is on if FtCnt > 0
# forget pre-rising-edge MS: it's broken so MJ doesn't use it
# Skip samples at beginning, fully sample, then FtCnt samples of
# pre-sampled, divided by div. Make one long fully-sampled wf.
ft_len = ftcnt[channel]
self.ft_len[ccc] = ft_len
if self.is_multisampled(ccc):
ps = presum_rates[mrpsrt[channel]]
self.ps[ccc] = ps
self.div[ccc] = dividers[mrpsdv[channel]]
# chop off 3 values at the end because 2 are bad and we need
# one for interpolation
min_len = 2018 - ft_len - self.wf_skip + (ps-1)*(ft_len-3)
sd['waveform']['length'] = min_len
if min_len > len(self.remainders):
self.remainders.resize(min_len)
else: sd['waveform']['length'] = 2016 - self.wf_skip
def is_multisampled(self, ccc):
if ccc in self.ft_len: return self.ft_len[ccc] > 0
else: print('channel', ccc, 'not in ft_len...')
return False
def decode_packet(self, packet, lh5_tables, packet_id, header_dict, verbose=False):
"""
Parse the header for an individual event
"""
pu16 = np.frombuffer(packet, dtype=np.uint16)
p16 = np.frombuffer(packet, dtype=np.int16)
crate = (pu16[1] >> 5) & 0xF
card = pu16[1] & 0x1F
channel = pu16[4] & 0xf
ccc = get_ccc(crate, card, channel)
# aliases for brevity
tb = lh5_tables
if isinstance(tb, dict):
if ccc not in lh5_tables:
if ccc not in self.skipped_channels:
self.skipped_channels[ccc] = 0
self.skipped_channels[ccc] += 1
return
tb = | |
<reponame>maxtaylordavies/BigGAN-PyTorch<filename>datasets.py
''' Datasets
This file contains definitions for our CIFAR, ImageFolder, and HDF5 datasets
'''
import os
import os.path
import sys
from PIL import Image
import numpy as np
from tqdm import tqdm, trange
import h5py as h5
import torch
import torchvision.datasets as dset
import torchvision.transforms as transforms
from torchvision.datasets.utils import download_url, check_integrity
import torch.utils.data as data
from torch.utils.data import DataLoader
IMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm']
def is_image_file(filename):
"""Checks if a file is an image.
Args:
filename (string): path to a file
Returns:
bool: True if the filename ends with a known image extension
"""
filename_lower = filename.lower()
return any(filename_lower.endswith(ext) for ext in IMG_EXTENSIONS)
def find_classes(dir):
classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]
classes.sort()
class_to_idx = {classes[i]: i for i in range(len(classes))}
return classes, class_to_idx
def make_dataset(dir, class_to_idx):
images = []
dir = os.path.expanduser(dir)
for target in tqdm(sorted(os.listdir(dir))):
d = os.path.join(dir, target)
if not os.path.isdir(d):
continue
for root, _, fnames in sorted(os.walk(d)):
for fname in sorted(fnames):
if is_image_file(fname):
path = os.path.join(root, fname)
item = (path, class_to_idx[target])
images.append(item)
return images
def pil_loader(path):
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
def accimage_loader(path):
import accimage
try:
return accimage.Image(path)
except IOError:
# Potentially a decoding problem, fall back to PIL.Image
return pil_loader(path)
def default_loader(path):
from torchvision import get_image_backend
if get_image_backend() == 'accimage':
return accimage_loader(path)
else:
return pil_loader(path)
class SWET(data.Dataset):
def __init__(self, root, transform=None, target_transform=None,
loader=default_loader, load_in_mem=False,
index_filename='swet_imgs.npz', **kwargs):
classes, class_to_idx = find_classes(root)
# Load pre-computed image directory walk
if os.path.exists(index_filename):
print('Loading pre-saved Index file %s...' % index_filename)
imgs = np.load(index_filename)['imgs']
# If first time, walk the folder directory and save the
# results to a pre-computed file.
else:
print('Generating Index file %s...' % index_filename)
imgs = make_dataset(root, class_to_idx)
np.savez_compressed(index_filename, **{'imgs': imgs})
if len(imgs) == 0:
raise (RuntimeError("Found 0 images in subfolders of: " + root + "\n"
"Supported image extensions are: " + ",".join(
IMG_EXTENSIONS)))
self.root = root
self.imgs = imgs
self.classes = classes
self.class_to_idx = class_to_idx
self.transform = transform
self.target_transform = target_transform
self.loader = loader
self.load_in_mem = load_in_mem
if self.load_in_mem:
print('Loading all images into memory...')
self.data, self.labels = [], []
for index in tqdm(range(len(self.imgs))):
path, target = imgs[index][0], imgs[index][1]
self.data.append(self.transform(self.loader(path)))
self.labels.append(target)
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
if self.load_in_mem:
img = self.data[index]
target = self.labels[index]
else:
path, target = self.imgs[index]
img = self.loader(str(path))
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
# print(img.size(), target)
return img, int(target)
def __len__(self):
return len(self.imgs)
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
tmp = ' Target Transforms (if any): '
fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str
class SWET_HDF5(data.Dataset):
def __init__(self, root, transform=None, target_transform=None,
load_in_mem=False, train=True, download=False, validate_seed=0,
val_split=0, **kwargs): # last four are dummies
self.root = root
self.num_imgs = len(h5.File(root, 'r')['labels'])
# self.transform = transform
self.target_transform = target_transform
# Set the transform here
self.transform = transform
# load the entire dataset into memory?
self.load_in_mem = load_in_mem
# If loading into memory, do so now
if self.load_in_mem:
print('Loading %s into memory...' % root)
with h5.File(root, 'r') as f:
self.data = f['imgs'][:]
self.labels = f['labels'][:]
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
# If loaded the entire dataset in RAM, get image from memory
if self.load_in_mem:
img = self.data[index]
target = self.labels[index]
# Else load it from disk
else:
with h5.File(self.root, 'r') as f:
img = f['imgs'][index]
target = f['labels'][index]
# if self.transform is not None:
# img = self.transform(img)
# Apply my own transform
img = ((torch.from_numpy(img).float() / 255) - 0.5) * 2
if self.target_transform is not None:
target = self.target_transform(target)
return img, int(target)
def __len__(self):
return self.num_imgs
# return len(self.f['imgs'])
class ImageFolder(data.Dataset):
"""A generic data loader where the images are arranged in this way: ::
root/dogball/xxx.png
root/dogball/xxy.png
root/dogball/xxz.png
root/cat/123.png
root/cat/nsdf3.png
root/cat/asd932_.png
Args:
root (string): Root directory path.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
loader (callable, optional): A function to load an image given its path.
Attributes:
classes (list): List of the class names.
class_to_idx (dict): Dict with items (class_name, class_index).
imgs (list): List of (image path, class_index) tuples
"""
def __init__(self, root, transform=None, target_transform=None,
loader=default_loader, load_in_mem=False,
index_filename='imagenet_imgs.npz', **kwargs):
classes, class_to_idx = find_classes(root)
# Load pre-computed image directory walk
if os.path.exists(index_filename):
print('Loading pre-saved Index file %s...' % index_filename)
imgs = np.load(index_filename)['imgs']
# If first time, walk the folder directory and save the
# results to a pre-computed file.
else:
print('Generating Index file %s...' % index_filename)
imgs = make_dataset(root, class_to_idx)
np.savez_compressed(index_filename, **{'imgs': imgs})
if len(imgs) == 0:
raise (RuntimeError("Found 0 images in subfolders of: " + root + "\n"
"Supported image extensions are: " + ",".join(
IMG_EXTENSIONS)))
self.root = root
self.imgs = imgs
self.classes = classes
self.class_to_idx = class_to_idx
self.transform = transform
self.target_transform = target_transform
self.loader = loader
self.load_in_mem = load_in_mem
if self.load_in_mem:
print('Loading all images into memory...')
self.data, self.labels = [], []
for index in tqdm(range(len(self.imgs))):
path, target = imgs[index][0], imgs[index][1]
self.data.append(self.transform(self.loader(path)))
self.labels.append(target)
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
if self.load_in_mem:
img = self.data[index]
target = self.labels[index]
else:
path, target = self.imgs[index]
img = self.loader(str(path))
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
# print(img.size(), target)
return img, int(target)
def __len__(self):
return len(self.imgs)
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
tmp = ' Target Transforms (if any): '
fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str
''' ILSVRC_HDF5: A dataset to support I/O from an HDF5 to avoid
having to load individual images all the time. '''
class ILSVRC_HDF5(data.Dataset):
def __init__(self, root, transform=None, target_transform=None,
load_in_mem=False, train=True, download=False, validate_seed=0,
val_split=0, **kwargs): # last four are dummies
self.root = root
self.num_imgs = len(h5.File(root, 'r')['labels'])
# self.transform = transform
self.target_transform = target_transform
# Set the transform here
self.transform = transform
# load the entire dataset into memory?
self.load_in_mem = load_in_mem
# If loading into memory, do so now
if self.load_in_mem:
print('Loading %s into memory...' % root)
with h5.File(root, 'r') as f:
self.data = f['imgs'][:]
self.labels = f['labels'][:]
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
# If loaded the entire dataset in RAM, get image from memory
if self.load_in_mem:
img = self.data[index]
target = self.labels[index]
# Else load it from disk
else:
with h5.File(self.root, 'r') as f:
img = f['imgs'][index]
target = f['labels'][index]
# if self.transform is not None:
# img = self.transform(img)
# Apply my own transform
img = ((torch.from_numpy(img).float() / 255) - 0.5) * 2
if self.target_transform is not None:
target = self.target_transform(target)
return img, int(target)
def __len__(self):
return self.num_imgs
# return len(self.f['imgs'])
import pickle
class CIFAR10(dset.CIFAR10):
def __init__(self, root, train=True,
transform=None, target_transform=None,
download=True, validate_seed=0,
val_split=0, load_in_mem=True, **kwargs):
self.root = os.path.expanduser(root)
self.transform = transform
self.target_transform = target_transform
self.train = train # training set or test set
self.val_split = val_split
if download:
self.download()
if not self._check_integrity():
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
# now load the picked numpy arrays
self.data = []
self.labels = []
for fentry in self.train_list:
f = fentry[0]
file = os.path.join(self.root, self.base_folder, f)
fo = open(file, 'rb')
if sys.version_info[0] == 2:
entry = pickle.load(fo)
| |
modify summary with #cont keyword
summary = ' '.join(tags_list) + ":" + title
# Call new function
# NOTE: Weird syntax in new_command requires passing a list
return new_command([summary])
cont_command.parser = subparsers.add_parser(
'cont',
description="Same as lifelogger new, but copies summary from previous event.")
cont_command.parser.add_argument(
'num_prev_events',
nargs="?",
type=int,
default=10,
help="Number of previous events to display.",
)
cont_command.parser.set_defaults(func=cont_command)
def sync_nomie():
"""Synchronize Nomie backup file with corresponding Calendar
:return:
"""
# Imports that are used only in this function
"""
Keep here to make it cleaner and easier moving this command
to its own file in the future
"""
import os
from ..config import NOMIE_BACKUP_PATH
import json
# Define function locally
# TODO: move this to tools/nomie.py module
def parse_events(backup_data):
"""Parse all events from Nomie backup into a list
:param backup_data: json-like backup data
:return: list of events data
"""
# Store tracker metadata, keyed by Nomie id
trackers = backup_data['trackers']
trackers_dict = {}
# Save human-readable label
for tracker in trackers:
trackers_dict[tracker['_id']] = dict()
trackers_dict[tracker['_id']]['label'] = tracker['label']
# Save groups trackers belong to
groups = backup_data['meta'][1]['groups']
# NOTE: Below is not really necessary
# for group, ids in groups.iteritems():
# for tracker_id in ids:
# if 'groups' not in trackers_dict[tracker_id]:
# # ensure groups list is initialized
# trackers_dict[tracker_id]['groups'] = list()
# # add current group to list for this tracker
# trackers_dict[tracker_id]['groups'].append(group)
# Set special group colors
colors_dict = {
'green': '2',
'cocoa': '7' # check log
}
# Support for changing the name of a tracker for a substitute
substitutes = {}
# Event fields: title, startdate, enddate, description
events = backup_data['events']
calendarEvents = []
corruptedCount = 0
addedCount = 0
for event in events:
# Extract needed data
try:
tracker_id = event['parent']
trackername = trackers_dict[tracker_id]['label']
# Substitute tracker name if substitute is defined
try:
trackername = substitutes[trackername]
except:
doNothing = True
# As Nomie 3 doesn't support spaces in tracker names, substitute with underscores
trackername = trackername.replace(' ', '_')
print(trackername)
# Value should be time in seconds of the event
# Note there is one single event for timer (at the end of timer)
event_duration = event['value']
# Currently automatically convert lack of value to 0
if event_duration == None:
event_duration = 0
timestamp_in_millisecs = event['time']
timestamp_in_secs = timestamp_in_millisecs / 1000.0
# Now build event fields
# Time stored is that of end
enddate = datetime.fromtimestamp(timestamp_in_secs)
# Start date is <value> seconds before the end
startdate = enddate - timedelta(seconds=event_duration)
duration_str = str(timedelta(seconds=event_duration)).split(".")[0] # drop microseconds
title = '#nomie: ' + trackername
description = trackername + ' for ' + duration_str
# Set event color according to group
if tracker_id in groups['Exercise']:
color_id = colors_dict['green']
else:
color_id = None
toAdd = {
'title': title,
'startdate': startdate,
'enddate': enddate,
'description': description,
'colorId': color_id,
# Metadata
'time': timestamp_in_millisecs,
'tag': trackername
}
calendarEvents += [toAdd]
addedCount += 1
except:
corruptedCount += 1
print("Shoot! This record seems to be corrupted. Try manually adding it or fixing the file.")
print(event)
print("Corrupted record count: " + str(corruptedCount))
print("Events successfully added: " + str(addedCount))
# Add notes into corresponding event
notes = backup_data["notes"]
# NOTE: By construction, calendarEvents list is ordered by enddate
endtimes = [event['enddate'] for event in calendarEvents]
assert all(a < b for a, b in zip(endtimes, endtimes[1:]))
event_iter = iter(calendarEvents)
current_event = event_iter.next()
for note in notes:
# Advance event until timestamp is larger or raise exemption
while current_event['time'] < note['time']:
previous_event = current_event
try:
current_event = event_iter.next()
except StopIteration as err:
# End of events list reached
break
# At this point, previous_event should match current note
# Parse note value
lines = note['value'].splitlines()
if len(lines) < 2:
print("Bad note value (single line? -> Empty content?): \n %s" % note['value'])
continue
note_header = lines[0]
note_short = lines[1]
note_long = '\n'.join(lines[2:])
# Check tag
r = re.compile('#(?P<tag>\w+) ((?P<h>\d+)h )*((?P<m>\d+)m )*((?P<s>\d+)s )*\s+at (?P<time_str>\d\d:\d\d)')
out = r.match(note_header)
if out is None:
print("ERROR: Bad parsing of %s" % note_header)
parsed_values = out.groupdict()
assert parsed_values['tag'].lower() == previous_event['tag'].lower()
# Add note content to event summary and description
previous_event['title'] += " " + note_short
previous_event['description'] += "\n" + note_long
return calendarEvents
# Ensure Nomie backup file exists
if not os.path.exists(NOMIE_BACKUP_PATH):
print("Failed - No available backup in %s" % NOMIE_BACKUP_PATH)
return False
# Ensure Nomie calendar id is set in config
if 'Nomie' not in config['calendars']:
print("Error: Calendar Nomie not available in config file")
return False
# Load backup file (json format)
backup_data = json.loads(open(NOMIE_BACKUP_PATH).read())
# Parse Nomie events into Calendar-like event list
events = parse_events(backup_data)
# Get Calendar service (entrypoint to API)
service = connect()
# Ensure Nomie calendar exists
all_cals = service.calendarList().list().execute()['items']
calendar_names = [cal['summary'] for cal in all_cals]
if 'Nomie' not in calendar_names:
from termcolor import colored
print(colored("Warning: Nomie calendar missing, creating it!", 'yellow'))
created_calendar = service.calendars().insert(
body={
'summary': 'Nomie'
}
).execute()
# Set color of calendar
new_id = created_calendar['id']
calendar_list_entry = service.calendarList().get(calendarId=new_id).execute()
calendar_list_entry['colorId'] = '1' # cocoa
updated_calendar_list_entry = service.calendarList().update(
calendarId=new_id,
body=calendar_list_entry
).execute()
# Save id of new calendar to local config
config['calendars']['Nomie']['id'] = created_calendar['id']
# Get and save iCal address
new_ical_url = input("Paste new Secret address in iCal format (from settings) --> ")
config['calendars']['Nomie']['ical_url'] = new_ical_url
# Ensure new config is stored
config.save()
# Ensure local database is up to date
from .local import download_all
download_all()
# Keep only new events
new_events = list()
from ..database import Event
for event in events:
# Generate unique Nomie event Id based on data
nomie_id = 'nomie' + event['startdate'].strftime('%Y%m%d%H%M%S')
try:
# If event exists, ignore this in new list
Event.get(Event.uid == nomie_id + "@google.com")
except Event.DoesNotExist as exc:
# Event not found, add it to new list
new_events.append(event)
# Insert new Nomie events into Calendar
new_entries_counter = 0
for event in new_events:
# Generate unique Nomie event Id based on data
nomie_id = 'nomie' + event['startdate'].strftime('%Y%m%d%H%M%S')
# TODO: Check event does not already exist
# TODO: Maybe find last non-synced event, or iterate backwards until reaching already-synced id
body = {
'summary': event['title'],
'description': event['description'],
'start': {
'dateTime': event['startdate'].isoformat(),
'timeZone': config['timezone']
},
'end': {
'dateTime': event['enddate'].isoformat(),
'timeZone': config['timezone']
},
'id': nomie_id
}
# Add color option if custom color
if event['colorId'] is not None:
body['colorId'] = event['colorId']
try:
result = service.events().insert(
calendarId=config['calendars']['Nomie']['id'],
body=body
).execute()
except HttpError as err:
if int(err.resp['status']) == 409:
from termcolor import colored
print(colored("Error: event already exists, delete Nomie calendar to reset!", 'red'))
# # Event already exists in chosen calendar
# body['status'] = "confirmed" # set visible again
# result = service.events().update(
# calendarId=config['calendars']['Nomie']['id'],
# eventId=body['id'],
# body=body
# ).execute()
else:
raise
if result['status'] == 'confirmed':
print("Added new entry! Link: ", result['htmlLink'])
else:
sys.stdout.write("Failed :( - status %s\n" % result['status'])
return False
new_entries_counter += 1
print("Added %d new entries!" % new_entries_counter)
return True
sync_nomie.parser = subparsers.add_parser(
'sync-nomie',
description="Synchronize Nomie backup events to its own Calendar.")
sync_nomie.parser.set_defaults(func=sync_nomie)
def for_command(duration, summary):
summary = ' '.join(summary)
service = connect()
times = [
datetime.now(),
datetime.now() + timedelta(minutes=duration)
]
times.sort()
start, end = times
print("Adding %i-minute event >> %s" % (abs(duration), summary))
result = service.events().insert(
calendarId=config['calendar_id'],
body={
'summary': summary,
'start': {
'dateTime': start.isoformat(),
'timeZone': config['timezone']
},
'end': {
'dateTime': end.isoformat(),
'timeZone': config['timezone']
}
}
).execute()
if result['status'] == 'confirmed':
print("Added! Link: ", result['htmlLink'])
return True
else:
sys.stdout.write("Failed :( - status %s\n" % result['status'])
return False
for_command.parser = subparsers.add_parser(
'for',
description="Adds an event that lasts *for* the specified number of "
"minutes, relative to now."
)
for_command.parser.add_argument(
'duration',
type=int,
help="The duration of the event. Give a negative number, and the event "
"will be set to have started 'duration' minutes ago, and end now; "
"otherwise it starts now and ends in 'duration' minutes time."
)
for_command.parser.add_argument(
'summary',
help="The summary for the event.",
nargs='*'
)
for_command.parser.set_defaults(func=for_command)
def add(summary, start=None, end=None, duration=None):
summary = ' '.join(summary)
if start is None:
start = datetime.now()
else:
start = dateutil.parser.parse(start)
if end is not None:
end = dateutil.parser.parse(end)
if duration is None:
duration = 0
if end is None:
end | |
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 8 14:27:24 2018
@author: Meagatron
"""
#Matrix Profile Version 1.4.0
#A Python implementation of the matrix profile algorithm described in <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME> (2016): 'Matrix Profile I: All Pairs Similarity Joins for Time Series: A Unifying View that Includes Motifs, Discords and Shapelets', available at http://www.cs.ucr.edu/~eamonn/MatrixProfile.html
#Currently, this implementation supports parallel processing and early termination. A planned update will support the updating of the matrix profile when either time series in the comparison is updated. A GPU implementation is also planned.
#The latest version of this code can be found at https://github.com/javidlakha/matrix-profile
import pandas as pd
import numpy as np
import itertools
import time
import random
import os
import multiprocessing as mp
from scipy.fftpack import fft, ifft
def sliding_dot_product(time_series, query): #Time complexity: O(n log n)
#This function computes the dot products of a 'query' sequence of length M and every contiguous subsequence of
#length M in the time series. It is used in the distance calculations in MASS (below). The heart of the matrix
#profile algorithm is the insight that whilst the complexity of calculating the dot product of every 'instigating'
#subsequence that starts at position 1, 2, ..., N in the time series with every other 'target' subsequence of equal
#length is O(n^2), the dot product of two vectors is the inverse Fourier transform of the dot product of their
#Fourier transforms. The time complexity of the Fast Fourier Transform is O(n log n).
#NB. Computational complexity depends only on the length of the time series - not on the length of the 'query'
#sequence. This is a useful property: short patterns do not take more time to identify than long patterns.
#Based on the pseudocode - Keogh et al (2016): http://www.cs.ucr.edu/~eamonn/PID4481997_extend_Matrix%20Profile_I.pdf
n = time_series.shape[0]
m = query.shape[0]
query = query[::-1] #Reverse query
query = np.append(query,np.zeros(n-m)) #Append reversed query with n-m zeroes
query = fft(query) #Fast Fourier Transform of reversed query
time_series = fft(time_series) #Fast Fourier Transform of time_series
QT = np.multiply(query, time_series) #Element-wise multiplication of time_series and reversed query
dot_product = np.real(ifft(QT)) #Inverse Fast Fourier Transform
return dot_product
def MASS(time_series, query):
#Calculates the normalised distances between every 'query' sequence of length M with every contiguous subsequence
#of M in the time series. Except for the sliding dot product (which is O(n log n)) the time complexity of this
#algorithm is O(n).
#Based on the Matlab code - Mueen at al (2015): http://www.cs.unm.edu/~mueen/FastestSimilaritySearch.html
n = time_series.shape[0]
m = query.shape[0]
query_mean = np.mean(query) #Query mean (scalar)
query_std = np.std(query) #Query standard deviation (scalar)
time_series_mean = pd.rolling_mean(time_series,m) #Time series rolling mean; window is the length of the query
time_series_std = pd.rolling_std(time_series,m,ddof=0) #Time series rolling standard deviation; window is the length of the query. No degrees of freedom correction.
dot_product = sliding_dot_product(time_series, query)
distances = 2 * (m - (dot_product[m-1:n] - m * query_mean * time_series_mean[m-1:n]) / (query_std * time_series_std[m-1:n]))
distances = np.sqrt(distances + 0j) #Normalised Euclidean distance. See page 4 of http://www.cs.ucr.edu/~eamonn/PID4481997_extend_Matrix%20Profile_I.pdf
return distances
def STAMP_single(target_series, query_series=None, subsequence_length=10, max_time=600, self_join=False, verbose=False):
#THIS IS THE SINGLE-THREADED VERSION OF THE ALGORITHM. IT IS BETTER TO USE 'STAMP_parallel'.
#Based on the pseudocode - Keogh et al (2016): http://www.cs.ucr.edu/~eamonn/PID4481997_extend_Matrix%20Profile_I.pdf
n_original = query_series.shape[0]
m = target_series.shape[0]
if n_original > m:
raise ValueError('Query series should not be larger than target series.')
if m > n_original:
query_series = np.concatenate([query_series,np.zeros(m - n_original)])
n = query_series.shape[0]
#Initialise the matrix profile distances to be very high
matrix_profile = 999999999 * np.ones(n - subsequence_length + 1)
matrix_profile_index = np.zeros(n - subsequence_length + 1)
#Matrix profile is an anytime algorithm: its accuracy improves (at a diminishing rate) the longer it runs, but its
#output is useful even if it is terminated early. However, if the algorithm is terminated early, it is desirable to
#have compared (to every other 'target' subsequence in the time series) 'instigating' subsequences starting at
#random points which are evenly distributed throughout the time series rather than the first M 'instigating'
#subsequences in the time series. Hence, the indices (the position in the time series from which 'instigating'
#subsequences begin) are shuffled.
indices = [i for i in range(0, n_original - subsequence_length + 1)]
random.shuffle(indices)
#Matrix profile is an anytime algorithm. Consequently, considerations of computational time and expense mean that
#for large time series it may be desirable to terminate the algorithm after it has run for a user-specified time.
start_time = time.time()
update_time = time.time()
max_time = time.time() + max_time
iteration = 0
for index in indices:
#Stop updating the matrix profile once time is up
if time.time() > max_time:
break
#Compute progress update it at most once per second
if verbose == True:
if time.time() - update_time > 1:
os.system('cls')
print('{}% complete'.format(round(iteration/len(indices)*100,3)))
print('Elapsed time: {} seconds'.format(round(time.time() - start_time,1)))
update_time = time.time()
iteration += 1
#Compute the distances between the subsequence starting at a particular point in the time series and every
#other sub-sequence of equal length in the time series.
distances = MASS(target_series, query_series[index : index + subsequence_length])
#Exclude trivial cases where the matrix profile will be very low because the sequence is being matched to
#itself. These occur when the subsequence being compared is within a distance of (subsequence_length / 2)
#of the position in the time series.
if self_join == True:
exclusion_range = (int(max(0, index - subsequence_length/2)), int(min(index + subsequence_length/2 + 1, n)))
distances[exclusion_range[0]:exclusion_range[1]] = 99999
#Update the matrix profile and the matrix profile index if a subsequence which is a closer match is discovered
matrix_profile_index = np.where(matrix_profile <= distances, matrix_profile_index, index)
matrix_profile = np.minimum(matrix_profile,distances)
output = pd.DataFrame([np.real(matrix_profile_index), np.real(matrix_profile)]).T
output.columns = ['Matrix_Profile_Index','Matrix_Profile']
return output
def STAMP_parallel(target_series, query_series, subsequence_length=10, max_time=600, self_join=False, verbose=False):
#Based on the pseudocode - Keogh et al (2016): http://www.cs.ucr.edu/~eamonn/PID4481997_extend_Matrix%20Profile_I.pdf
n_original = query_series.shape[0]
m = target_series.shape[0]
if n_original > m:
raise ValueError('Query series should not be larger than target series.')
if m > n_original:
query_series = np.concatenate([query_series,np.zeros(m - n_original)])
n = query_series.shape[0]
processes = mp.cpu_count()
matrix_profile = {}
matrix_profile_index = {}
#Matrix profile is an anytime algorithm: its accuracy improves (at a diminishing rate) the longer it runs, but its
#output is useful even if it is terminated early. However, if the algorithm is terminated early, it is desirable to
#have compared (to every other 'target' subsequence in the time series) 'instigating' subsequences starting at
#random points which are evenly distributed throughout the time series rather than the first M 'instigating'
#subsequences in the time series. Hence, the indices (the position in the time series from which 'instigating'
#subsequences begin) are shuffled.
indices = [i for i in range(0, n_original - subsequence_length + 1)]
random.shuffle(indices)
#The indices are then divided by the number of CPUs. The algorithm is easy to parallelise because each element of
#the matrix profile is minimum distance between the 'instigating' subsequence (of user-specified length) which
#starts at that particular position in the time series and every other 'target' subsequence in the time series.
#Hence, if the 'instigating' time series are divided between CPUs and sub-matrix profiles computed, the overall
#matrix profile will be the element-wise minimum of the sub-profiles.
indices = np.array_split(np.array(indices), processes)
pool = mp.Pool(processes=processes)
results = [pool.apply_async(update_matrix_profile, args=(target_series, query_series, self_join, subsequence_length, indices[s], s, n, max_time, verbose)) for s in range(0,processes)]
output = [p.get() for p in results]
pool.close()
#The overall matrix profile is the element-wise minimum of each sub-profile, and each element of the overall
#matrix profile index is the time series position of the corresponding sub-profile.
s = 0
for subindices in indices:
matrix_profile[s] = output[s][0]
matrix_profile_index[s] = output[s][1]
if s != 0:
matrix_profile_index[s] = np.where(matrix_profile[s-1] <= matrix_profile[s], matrix_profile_index[s-1], matrix_profile_index[s])
matrix_profile[s] = np.minimum(matrix_profile[s-1],matrix_profile[s])
s += 1
output = pd.DataFrame([np.real(matrix_profile_index[s-1]), np.real(matrix_profile[s-1])]).T
output.columns = ['Matrix_Profile_Index','Matrix_Profile']
| |
mask_val=np.nan,
edge_blend=None, interp_zeros=False):
""" Rotates an cube (3d array or image sequence) providing a vector or
corresponding angles. Serves for rotating an ADI sequence to a common north
given a vector with the corresponding parallactic angles for each frame. By
default bicubic interpolation is used (opencv).
Parameters
----------
array : numpy ndarray
Input 3d array, cube.
angle_list : list
Vector containing the parallactic angles.
imlib : str, optional
See the documentation of the ``vip_hci.preproc.frame_rotate`` function.
interpolation : str, optional
See the documentation of the ``vip_hci.preproc.frame_rotate`` function.
cxy : tuple of int, optional
Coordinates X,Y of the point with respect to which the rotation will be
performed. By default the rotation is done with respect to the center
of the frames, as it is returned by the function
vip_hci.var.frame_center.
nproc : int, optional
Whether to rotate the frames in the sequence in a multi-processing
fashion. Only useful if the cube is significantly large (frame size and
number of frames).
border_mode : str, optional
See the documentation of the ``vip_hci.preproc.frame_rotate`` function.
edge_blend : str, optional
See the documentation of the ``vip_hci.preproc.frame_rotate`` function.
interp_zeros : str, optional
See the documentation of the ``vip_hci.preproc.frame_rotate`` function.
Returns
-------
array_der : numpy ndarray
Resulting cube with de-rotated frames.
"""
if array.ndim != 3:
raise TypeError('Input array is not a cube or 3d array.')
n_frames = array.shape[0]
if nproc is None:
nproc = cpu_count() // 2 # Hyper-threading doubles the # of cores
if nproc == 1:
array_der = np.zeros_like(array)
for i in range(n_frames):
array_der[i] = frame_rotate(array[i], -angle_list[i], imlib=imlib,
interpolation=interpolation, cxy=cxy,
border_mode=border_mode,
mask_val=mask_val,
edge_blend=edge_blend,
interp_zeros=interp_zeros)
elif nproc > 1:
global data_array
data_array = array
res = pool_map(nproc, _frame_rotate_mp, iterable(range(n_frames)),
angle_list, imlib, interpolation, cxy, border_mode,
mask_val, edge_blend, interp_zeros)
array_der = np.array(res)
return array_der
def _frame_rotate_mp(num_fr, angle_list, imlib, interpolation, cxy,
border_mode, mask_val, edge_blend, interp_zeros):
framerot = frame_rotate(data_array[num_fr], -angle_list[num_fr], imlib,
interpolation, cxy, border_mode, mask_val,
edge_blend, interp_zeros)
return framerot
def _find_indices_adi(angle_list, frame, thr, nframes=None, out_closest=False,
truncate=False, max_frames=200):
""" Returns the indices to be left in frames library for annular ADI median
subtraction, LOCI or annular PCA.
Parameters
----------
angle_list : numpy ndarray, 1d
Vector of parallactic angle (PA) for each frame.
frame : int
Index of the current frame for which we are applying the PA threshold.
thr : float
PA threshold.
nframes : int or None, optional
Exact number of indices to be left. For annular median-ADI subtraction,
where we keep the closest frames (after the PA threshold). If None then
all the indices are returned (after the PA threshold).
out_closest : bool, optional
If True then the function returns the indices of the 2 closest frames.
truncate : bool, optional
Useful for annular PCA, when we want to discard too far away frames and
avoid increasing the computational cost.
max_frames : int, optional
Max number of indices to be left. To be provided if ``truncate`` is
True (used e.g. in pca_annular).
Returns
-------
indices : numpy ndarray, 1d
Vector with the indices left.
If ``out_closest`` is True then the function returns instead:
index_prev, index_foll
"""
n = angle_list.shape[0]
index_prev = 0
index_foll = frame
for i in range(0, frame):
if np.abs(angle_list[frame] - angle_list[i]) < thr:
index_prev = i
break
else:
index_prev += 1
for k in range(frame, n):
if np.abs(angle_list[k] - angle_list[frame]) > thr:
index_foll = k
break
else:
index_foll += 1
if out_closest:
return index_prev, index_foll - 1
else:
if nframes is not None:
# For annular ADI median subtraction, returning n_frames closest
# indices (after PA thresholding)
window = nframes // 2
ind1 = index_prev - window
ind1 = max(ind1, 0)
ind2 = index_prev
ind3 = index_foll
ind4 = index_foll + window
ind4 = min(ind4, n)
indices = np.array(list(range(ind1, ind2)) +
list(range(ind3, ind4)), dtype='int32')
else:
# For annular PCA, returning all indices (after PA thresholding)
half1 = range(0, index_prev)
half2 = range(index_foll, n)
indices = np.array(list(half1) + list(half2), dtype='int32')
# The goal is to keep min(num_frames/2, ntrunc) in the library after
# discarding those based on the PA threshold
if truncate:
thr = min(n-1, max_frames)
all_indices = np.array(list(half1)+list(half2))
if len(all_indices) > thr:
# then truncate and update indices
# first sort by dPA
dPA = np.abs(angle_list[all_indices]-angle_list[frame])
sort_indices = all_indices[np.argsort(dPA)]
# keep the ntrunc first ones
good_indices = sort_indices[:thr]
# sort again, this time by increasing indices
indices = np.sort(good_indices)
return indices
def _compute_pa_thresh(ann_center, fwhm, delta_rot=1):
""" Computes the parallactic angle threshold [degrees]
Replacing approximation: delta_rot * (fwhm/ann_center) / np.pi * 180
"""
return np.rad2deg(2 * np.arctan(delta_rot * fwhm / (2 * ann_center)))
def _define_annuli(angle_list, ann, n_annuli, fwhm, radius_int, annulus_width,
delta_rot, n_segments, verbose, strict=False):
""" Function that defines the annuli geometry using the input parameters.
Returns the parallactic angle threshold, the inner radius and the annulus
center for each annulus.
"""
if ann == n_annuli - 1:
inner_radius = radius_int + (ann * annulus_width - 1)
else:
inner_radius = radius_int + ann * annulus_width
ann_center = inner_radius + (annulus_width / 2)
pa_threshold = _compute_pa_thresh(ann_center, fwhm, delta_rot)
mid_range = np.abs(np.amax(angle_list) - np.amin(angle_list)) / 2
if pa_threshold >= mid_range - mid_range * 0.1:
new_pa_th = float(mid_range - mid_range * 0.1)
msg = 'WARNING: PA threshold {:.2f} is too big, recommended '
msg+=' value for annulus {:.0f}: {:.2f}'
if strict:
print(msg.format(pa_threshold,ann, new_pa_th))
#raise ValueError(msg.format(pa_threshold,ann, new_pa_th))
else:
print('PA threshold {:.2f} is likely too big, will be set to '
'{:.2f}'.format(pa_threshold, new_pa_th))
pa_threshold = new_pa_th
if verbose:
if pa_threshold > 0:
print('Ann {} PA thresh: {:5.2f} Ann center: '
'{:3.0f} N segments: {} '.format(ann + 1, pa_threshold,
ann_center, n_segments))
else:
print('Ann {} Ann center: {:3.0f} N segments: '
'{} '.format(ann + 1, ann_center, n_segments))
return pa_threshold, inner_radius, ann_center
def rotate_fft(array, angle):
""" Rotates a frame or 2D array using Fourier transform phases:
Rotation = 3 consecutive lin. shears = 3 consecutive FFT phase shifts
See details in Larkin et al. (1997) and Hagelberg et al. (2016).
Note: this is significantly slower than interpolation methods
(e.g. opencv/lanczos4 or ndimage), but preserves the flux better
(by construction it preserves the total power). It is more prone to
large-scale Gibbs artefacts, so make sure no sharp edge is present in
the image to be rotated.
! Warning: if input frame has even dimensions, the center of rotation
will NOT be between the 4 central pixels, instead it will be on the top
right of those 4 pixels. Make sure your images are centered with
respect to that pixel before rotation.
Parameters
----------
array : numpy ndarray
Input image, 2d array.
angle : float
Rotation angle.
Returns
-------
array_out : numpy ndarray
Resulting frame.
"""
y_ori, x_ori = array.shape
while angle<0:
angle+=360
while angle>360:
angle-=360
if angle>45:
dangle = angle%90
if dangle>45:
dangle = -(90-dangle)
nangle = np.rint(angle/90)
array_in = np.rot90(array, nangle)
else:
dangle = angle
array_in = array.copy()
if y_ori%2 or x_ori%2:
# NO NEED TO SHIFT BY 0.5px: FFT assumes rot. center on cx+0.5, cy+0.5!
array_in = array_in[:-1,:-1]
a = np.tan(np.deg2rad(dangle)/2)
b = -np.sin(np.deg2rad(dangle))
ori_y, ori_x = array_in.shape
cy, cx = frame_center(array)
arr_xy = np.mgrid[0:ori_y,0:ori_x]
arr_y = arr_xy[0]-cy
arr_x = arr_xy[1]-cx
# TODO: FFT padding not currently working properly. Only option '0' works.
s_x = _fft_shear(array_in, arr_x, a, ax=1, pad=0)
s_xy = _fft_shear(s_x, arr_y, b, ax=0, pad=0)
s_xyx = _fft_shear(s_xy, arr_x, a, ax=1, pad=0)
if y_ori%2 or x_ori%2:
# shift + crop back to odd dimensions , using FFT
array_out = np.zeros([s_xyx.shape[0]+1,s_xyx.shape[1]+1])
# NO NEED TO SHIFT BY 0.5px: FFT assumes rot. center on cx+0.5, cy+0.5!
array_out[:-1,:-1] = np.real(s_xyx)
else:
array_out = np.real(s_xyx)
return array_out
def _fft_shear(arr, arr_ori, c, ax, pad=0, shift_ini=True):
ax2=1-ax%2
freqs = fftfreq(arr_ori.shape[ax2])
sh_freqs = fftshift(freqs)
arr_u = np.tile(sh_freqs, (arr_ori.shape[ax],1))
if ax==1:
arr_u = arr_u.T
s_x = fftshift(arr)
s_x = fft(s_x, axis=ax)
| |
<gh_stars>10-100
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 18 14:34:01 2018
@author: <NAME>
"""
import numpy as np
import itertools
import scipy.stats
from sklearn.utils.validation import check_random_state
class AbstractHyper(object):
""" abstract class representing an hyperparameter (or a set of hyperparametre) """
def __init__(self, random_state=None):
self.random_state = random_state
@property
def random_state(self):
return self._random_state
@random_state.setter
def random_state(self, new_random_state):
self._random_state = check_random_state(new_random_state)
self._set_random_state()
def _set_random_state(self):
# Do nothing in that class
return self
def get_rand(self):
"""
generate one random sample
Returns
-------
one sample
"""
index = self._random_state.choice(len(self.values))
# I'd rather not use directly np.random.choice because it is making conversion to np.int/np.float
return self.values[index]
# Example type(np.random.choice([0,1])) == np.int32
def get_rands(self, n):
""" generates n samples """
return [self.get_rand() for _ in range(n)]
def get_size(self):
""" return the number of choices, or a proxy if parameter is continuous """
if hasattr(self, "values"):
return len(self.values)
else:
# Rmk : in case of non uniform choice, entropy can be used to derive an equivalent number of choice
raise NotImplementedError("Please implement that in classes")
@property
def size(self):
if not hasattr(self, "_size"):
setattr(self, "_size", self.get_size())
return self._size
def distplot(self):
""" plot the distribution """
import seaborn as sns
sns.distplot([self.get_rand() for _ in range(10000)])
class HyperChoice(AbstractHyper):
""" Random Choice among a set of values
Examples
--------
>>> hp = HyperChoice(('a','b','c'))
>>> hp.get_rand()
Remark: use 'HyperComposition' to make weighting choices
"""
def __init__(self, values, random_state=None):
self.values = values
super().__init__(random_state=random_state)
class HyperMultipleChoice(AbstractHyper):
""" Select one or more item among a choice
Examples
--------
>>> hp = HyperMultipleChoice(("a","b","c","d","e"))
>>> hp.get_rand()
"""
def __init__(self, possible_choices, min_number=1, max_number=None, proba_choice=0.9, random_state=None):
self.possible_choices = possible_choices
self.proba_choice = proba_choice
self.min_number = min_number
self.max_number = max_number
if not isinstance(self.proba_choice, (list, tuple)):
self.proba_choice = [self.proba_choice] * len(self.possible_choices)
if self.min_number is None:
self.min_number = 0
if self.min_number < 0:
raise ValueError("min_number (%d) should be >= 0" % self.min_number)
if self.min_number > len(self.possible_choices):
raise ValueError(
"min_number (%d) should be <= len of choice (%d)" % (self.min_number, len(self.possible_choices))
)
if self.max_number is not None and self.max_number < self.min_number:
raise ValueError("max_number (%d) should be > than min_number (%d)" % (self.max_number, self.min_number))
if self.max_number is not None and self.max_number > len(self.possible_choices):
raise ValueError(
"max_number (%d) should be <= len of choice (%d)" % (self.max_number, len(self.possible_choices))
)
self._precomputed_choices = None
self._precomputed_probas = None
self._use_precomputed = len(self.possible_choices) <= 10
super().__init__(random_state=random_state)
def _precompute(self):
if self._precomputed_choices is not None:
return
all_choices = []
for choice in itertools.product(*[[0, 1] for _ in range(len(self.possible_choices))]):
achoice = np.array(choice)
nb = achoice.sum()
if nb < self.min_number:
continue
if self.max_number is not None and nb > self.max_number:
continue
probas = [(1 - p) + (2 * p - 1) * c for p, c in zip(self.proba_choice, choice)]
probas = np.product(probas)
all_choices.append((choice, probas))
choices, probas = zip(*all_choices)
probas = np.array(probas)
probas /= probas.sum()
self._precomputed_choices = choices
self._precomputed_probas = probas
def get_size(self):
if self._use_precomputed:
self._precompute()
return len(self._precomputed_choices)
else:
return 2 ** (len(self.possible_choices)) # upper bound : because there are inpossible cases
def get_rand(self):
if self._use_precomputed:
self._precompute()
return self._get_rand_precomputed()
else:
return self._get_rand()
def _get_rand_precomputed(self):
""" generate using precomputed values """
choice = self._precomputed_choices[
self.random_state.choice(len(self._precomputed_choices), p=self._precomputed_probas)
]
return tuple([self.possible_choices[i] for i, c in enumerate(choice) if c == 1])
def _get_rand(self):
""" generic using reject """
# There is probably a more efficient way to draw from that distributions
MAX_ITER = 1000
iter_ = 0
goon = True
while goon:
if iter_ >= MAX_ITER + 1:
break
iter_ += 1
goon = False
to_take = self.random_state.uniform(0, 1, len(self.possible_choices)) <= self.proba_choice
# to_take = np.random.uniform(0, 1, len(self.possible_choices)) <= self.proba_choice
ii = np.arange(len(self.possible_choices))
nb = ii.sum()
if self.min_number is not None and nb < self.min_number:
continue
if self.max_number is not None and nb > self.max_number:
continue
return tuple([self.possible_choices[i] for i in ii[to_take]])
# to_take = np.random.choice(len(self.possible_choices), replace=False, size=self.min_number)
to_take = self.random_state.choice(len(self.possible_choices), replace=False, size=self.min_number)
return tuple([self.possible_choices[i] for i in to_take])
class HyperRangeInt(AbstractHyper):
""" Integers between start and end """
def __init__(self, start, end, step=1, random_state=None):
if end <= start:
raise ValueError("end can't be lower than start")
self.values = [x for x in range(start, end + step, step)] # end included
super().__init__(random_state=random_state)
class HyperRangeFloat(AbstractHyper):
""" Float between start and end """
def __init__(self, start, end, n=100, step=None, random_state=None):
if end <= start:
raise ValueError("end can't be lower than start")
if step is not None:
n = int(np.floor((end - start) / step) + 1)
self.values = [start + i * step for i in range(n)]
else:
self.values = [start + (end - start) * i / n for i in range(n + 1)] # +1 to include start and end
super().__init__(random_state=random_state)
class HyperRangeBetaFloat(AbstractHyper):
""" Float between start and end but with a Beta Law distribution """
def __init__(self, start=0, end=1, alpha=3, beta=1, random_state=None):
if end <= start:
raise ValueError("end can't be lower than start")
if alpha <= 0:
raise ValueError("alpha can't be less than 0")
if beta <= 0:
raise ValueError("beta can't be less than 0")
self.start = start
self.end = end
self.alpha = alpha
self.beta = beta
self._beta_dist = scipy.stats.beta(a=self.alpha, b=self.beta)
self._beta_dist.random_state = random_state
super().__init__(random_state=random_state)
def _set_random_state(self):
self._beta_dist.random_state = self._random_state
return self
def get_rand(self):
return self._beta_dist.rvs() * (self.end - self.start) + self.start
def get_size(self):
beta_std = np.sqrt(self.alpha * self.beta / ((self.alpha + self.beta) ** 2 * (self.alpha + self.beta + 1)))
return (
100 * beta_std * np.sqrt(12)
) # So that for a uniform law I assume '100' different values (and I scale down based on standard deviation)
class HyperRangeBetaInt(HyperRangeBetaFloat):
__doc__ = HyperRangeBetaFloat.__doc__
__doc__ += "\nForce integer result " ""
def get_rand(self):
return int(super(HyperRangeBetaInt, self).get_rand() + 0.5)
class HyperLogRangeFloat(AbstractHyper):
""" Float log Uniform between start and end """
def __init__(self, start, end, n=100, random_state=None):
if start <= 0:
raise ValueError("start can't be negative or null")
if end <= 0:
raise ValueError("end can't be negative or null")
if end <= start:
raise ValueError("end can't be lower than start")
values = [
np.log10(start) + (np.log10(end) - np.log10(start)) * i / n for i in range(n + 1)
] # +1 to include start and end
self.values = [np.exp(np.log(10) * l) for l in values]
super().__init__(random_state=random_state)
def _try_set_random_state(dist, random_state):
if hasattr(dist, "random_state"):
dist.random_state = random_state
return dist
def _get_rand(hyper, random_state=None):
""" function to draw a random sample from something
something can be either an hyper-parameter but also a constant or a tuple
This allow implicite use of list/tuple to model a choice and object to model a constant
Parameters
----------
hyper : list, tuple, hyper-parameters class or constant
"""
if random_state is not None:
_try_set_random_state(hyper, random_state) # Will modify the object
if hasattr(hyper, "get_rand"):
return hyper.get_rand()
elif isinstance(hyper, (list, tuple)):
gen = check_random_state(random_state) # Here I won't have the seed setted...
return hyper[gen.choice(len(hyper))]
# Carefull : never use np.random.choice(hyper) as it put everything into a numpy array and create wrong casting of type
elif hasattr(hyper, "rvs"):
return hyper.rvs(random_state=random_state)
else:
return hyper # constant
def _get_size(hyper):
""" return the number of choices from 'something',
something can be either an hyper-parameter but also a constant or a tuple
This allow implicite use of list/tuple to model a choice and object to model a constant
"""
if hasattr(hyper, "size"):
return hyper.size
elif isinstance(hyper, (list, tuple)):
return len(hyper)
elif hasattr(hyper, "rvs"):
return 10 # heuristic
else:
return 1
class HyperListOfDifferentSizes(AbstractHyper):
""" to draw list of different sizes
Examples
--------
>>> hp = HyperListOfDifferentSizes(HyperRangeInt(1, 5), HyperRangeInt(50, 150))
>>> hp.get_rand()
"""
def __init__(self, nb_distrib, value_distrib, random_state=None):
self.nb_distrib = nb_distrib
self.value_distrib = value_distrib
super().__init__(random_state=random_state)
def _set_random_state(self):
_try_set_random_state(self.nb_distrib, random_state=self._random_state)
_try_set_random_state(self.value_distrib, random_state=self._random_state)
return self
def get_rand(self):
list_len = _get_rand(self.nb_distrib, self.random_state)
return [_get_rand(self.value_distrib, self.random_state) for _ in range(list_len)]
def get_size(self):
return _get_size(self.nb_distrib) * _get_size(self.vale_distrib)
class HyperComposition(AbstractHyper):
""" Composition of Distributions : randomly choice among several distributions
the size of the values can be :
* if size 1 : list of HyperParameters
* if size 2 : list of weight * HyperParameters
Parameters
----------
dict_vals : list or tuple of size 2 or 1
Examples
--------
>>> hp = HyperComposition([ HyperRangeInt(0,100) , HyperRangeInt(100,1000)])
>>> hp.get_rand()
>>> hp = HyperComposition([ (0.9,HyperRangeInt(0,100)) ,(0.1,HyperRangeInt(100,1000)) ])
>>> | |
value for the enrichment property.
:type value: str
:param endpoint_names: Required. The list of endpoints for which the enrichment is applied to
the message.
:type endpoint_names: list[str]
"""
_validation = {
'key': {'required': True},
'value': {'required': True},
'endpoint_names': {'required': True, 'min_items': 1},
}
_attribute_map = {
'key': {'key': 'key', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
'endpoint_names': {'key': 'endpointNames', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(EnrichmentProperties, self).__init__(**kwargs)
self.key = kwargs['key']
self.value = kwargs['value']
self.endpoint_names = kwargs['endpoint_names']
class ErrorDetails(msrest.serialization.Model):
"""Error details.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: The error code.
:vartype code: str
:ivar http_status_code: The HTTP status code.
:vartype http_status_code: str
:ivar message: The error message.
:vartype message: str
:ivar details: The error details.
:vartype details: str
"""
_validation = {
'code': {'readonly': True},
'http_status_code': {'readonly': True},
'message': {'readonly': True},
'details': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'http_status_code': {'key': 'httpStatusCode', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'details': {'key': 'details', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ErrorDetails, self).__init__(**kwargs)
self.code = None
self.http_status_code = None
self.message = None
self.details = None
class EventHubConsumerGroupInfo(msrest.serialization.Model):
"""The properties of the EventHubConsumerGroupInfo object.
Variables are only populated by the server, and will be ignored when sending a request.
:param properties: The tags.
:type properties: dict[str, str]
:ivar id: The Event Hub-compatible consumer group identifier.
:vartype id: str
:ivar name: The Event Hub-compatible consumer group name.
:vartype name: str
:ivar type: the resource type.
:vartype type: str
:ivar etag: The etag.
:vartype etag: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'properties': {'key': 'properties', 'type': '{str}'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EventHubConsumerGroupInfo, self).__init__(**kwargs)
self.properties = kwargs.get('properties', None)
self.id = None
self.name = None
self.type = None
self.etag = None
class EventHubConsumerGroupsListResult(msrest.serialization.Model):
"""The JSON-serialized array of Event Hub-compatible consumer group names with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: List of consumer groups objects.
:type value: list[~azure.mgmt.iothub.v2019_11_04.models.EventHubConsumerGroupInfo]
:ivar next_link: The next link.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[EventHubConsumerGroupInfo]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EventHubConsumerGroupsListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class EventHubProperties(msrest.serialization.Model):
"""The properties of the provisioned Event Hub-compatible endpoint used by the IoT hub.
Variables are only populated by the server, and will be ignored when sending a request.
:param retention_time_in_days: The retention time for device-to-cloud messages in days. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#device-to-cloud-messages.
:type retention_time_in_days: long
:param partition_count: The number of partitions for receiving device-to-cloud messages in the
Event Hub-compatible endpoint. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-
messaging#device-to-cloud-messages.
:type partition_count: int
:ivar partition_ids: The partition ids in the Event Hub-compatible endpoint.
:vartype partition_ids: list[str]
:ivar path: The Event Hub-compatible name.
:vartype path: str
:ivar endpoint: The Event Hub-compatible endpoint.
:vartype endpoint: str
"""
_validation = {
'partition_ids': {'readonly': True},
'path': {'readonly': True},
'endpoint': {'readonly': True},
}
_attribute_map = {
'retention_time_in_days': {'key': 'retentionTimeInDays', 'type': 'long'},
'partition_count': {'key': 'partitionCount', 'type': 'int'},
'partition_ids': {'key': 'partitionIds', 'type': '[str]'},
'path': {'key': 'path', 'type': 'str'},
'endpoint': {'key': 'endpoint', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EventHubProperties, self).__init__(**kwargs)
self.retention_time_in_days = kwargs.get('retention_time_in_days', None)
self.partition_count = kwargs.get('partition_count', None)
self.partition_ids = None
self.path = None
self.endpoint = None
class ExportDevicesRequest(msrest.serialization.Model):
"""Use to provide parameters when requesting an export of all devices in the IoT hub.
All required parameters must be populated in order to send to Azure.
:param export_blob_container_uri: Required. The export blob container URI.
:type export_blob_container_uri: str
:param exclude_keys: Required. The value indicating whether keys should be excluded during
export.
:type exclude_keys: bool
"""
_validation = {
'export_blob_container_uri': {'required': True},
'exclude_keys': {'required': True},
}
_attribute_map = {
'export_blob_container_uri': {'key': 'exportBlobContainerUri', 'type': 'str'},
'exclude_keys': {'key': 'excludeKeys', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(ExportDevicesRequest, self).__init__(**kwargs)
self.export_blob_container_uri = kwargs['export_blob_container_uri']
self.exclude_keys = kwargs['exclude_keys']
class FailoverInput(msrest.serialization.Model):
"""Use to provide failover region when requesting manual Failover for a hub.
All required parameters must be populated in order to send to Azure.
:param failover_region: Required. Region the hub will be failed over to.
:type failover_region: str
"""
_validation = {
'failover_region': {'required': True},
}
_attribute_map = {
'failover_region': {'key': 'failoverRegion', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(FailoverInput, self).__init__(**kwargs)
self.failover_region = kwargs['failover_region']
class FallbackRouteProperties(msrest.serialization.Model):
"""The properties of the fallback route. IoT Hub uses these properties when it routes messages to the fallback endpoint.
All required parameters must be populated in order to send to Azure.
:param name: The name of the route. The name can only include alphanumeric characters, periods,
underscores, hyphens, has a maximum length of 64 characters, and must be unique.
:type name: str
:param source: Required. The source to which the routing rule is to be applied to. For example,
DeviceMessages. Possible values include: "Invalid", "DeviceMessages", "TwinChangeEvents",
"DeviceLifecycleEvents", "DeviceJobLifecycleEvents".
:type source: str or ~azure.mgmt.iothub.v2019_11_04.models.RoutingSource
:param condition: The condition which is evaluated in order to apply the fallback route. If the
condition is not provided it will evaluate to true by default. For grammar, See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-query-language.
:type condition: str
:param endpoint_names: Required. The list of endpoints to which the messages that satisfy the
condition are routed to. Currently only 1 endpoint is allowed.
:type endpoint_names: list[str]
:param is_enabled: Required. Used to specify whether the fallback route is enabled.
:type is_enabled: bool
"""
_validation = {
'source': {'required': True},
'endpoint_names': {'required': True, 'max_items': 1, 'min_items': 1},
'is_enabled': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'source': {'key': 'source', 'type': 'str'},
'condition': {'key': 'condition', 'type': 'str'},
'endpoint_names': {'key': 'endpointNames', 'type': '[str]'},
'is_enabled': {'key': 'isEnabled', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(FallbackRouteProperties, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.source = kwargs['source']
self.condition = kwargs.get('condition', None)
self.endpoint_names = kwargs['endpoint_names']
self.is_enabled = kwargs['is_enabled']
class FeedbackProperties(msrest.serialization.Model):
"""The properties of the feedback queue for cloud-to-device messages.
:param lock_duration_as_iso8601: The lock duration for the feedback queue. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:type lock_duration_as_iso8601: ~datetime.timedelta
:param ttl_as_iso8601: The period of time for which a message is available to consume before it
is expired by the IoT hub. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-
messaging#cloud-to-device-messages.
:type ttl_as_iso8601: ~datetime.timedelta
:param max_delivery_count: The number of times the IoT hub attempts to deliver a message on the
feedback queue. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-
to-device-messages.
:type max_delivery_count: int
"""
_validation = {
'max_delivery_count': {'maximum': 100, 'minimum': 1},
}
_attribute_map = {
'lock_duration_as_iso8601': {'key': 'lockDurationAsIso8601', 'type': 'duration'},
'ttl_as_iso8601': {'key': 'ttlAsIso8601', 'type': 'duration'},
'max_delivery_count': {'key': 'maxDeliveryCount', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(FeedbackProperties, self).__init__(**kwargs)
self.lock_duration_as_iso8601 = kwargs.get('lock_duration_as_iso8601', None)
self.ttl_as_iso8601 = kwargs.get('ttl_as_iso8601', None)
self.max_delivery_count = kwargs.get('max_delivery_count', None)
class ImportDevicesRequest(msrest.serialization.Model):
"""Use to provide parameters when requesting an import of all devices in the hub.
All required parameters must be populated in order to send to Azure.
:param input_blob_container_uri: Required. The input blob container URI.
:type input_blob_container_uri: str
:param output_blob_container_uri: Required. The output blob container URI.
:type output_blob_container_uri: str
"""
_validation = {
'input_blob_container_uri': {'required': True},
'output_blob_container_uri': {'required': True},
}
_attribute_map = {
'input_blob_container_uri': {'key': 'inputBlobContainerUri', 'type': 'str'},
'output_blob_container_uri': {'key': 'outputBlobContainerUri', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ImportDevicesRequest, self).__init__(**kwargs)
self.input_blob_container_uri = kwargs['input_blob_container_uri']
self.output_blob_container_uri = kwargs['output_blob_container_uri']
class IotHubCapacity(msrest.serialization.Model):
"""IoT Hub capacity information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar minimum: The minimum number of units.
:vartype minimum: long
:ivar maximum: The maximum number of units.
:vartype maximum: long
:ivar default: The default number of units.
:vartype default: long
:ivar scale_type: The type of the scaling enabled. Possible values include: "Automatic",
"Manual", "None".
:vartype scale_type: str or ~azure.mgmt.iothub.v2019_11_04.models.IotHubScaleType
"""
_validation = {
'minimum': {'readonly': True, 'maximum': 1, 'minimum': 1},
'maximum': {'readonly': True},
'default': {'readonly': True},
'scale_type': {'readonly': True},
}
_attribute_map = {
'minimum': {'key': 'minimum', 'type': 'long'},
| |
# Copyright 2016 Cisco Systems.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import mock
from neutron.tests import base
from networking_cisco.apps.saf.common import config
from networking_cisco.apps.saf.common import utils
from networking_cisco.apps.saf.db import dfa_db_models as dbm
from networking_cisco.apps.saf.server.services.firewall.native import (
fabric_setup_base as fsb)
import networking_cisco.apps.saf.server.services.firewall.native.fw_constants \
as fw_const
TENANT_NAME = 'TenantA'
TENANT_ID = '0000-1111-2222-5555'
FW_ID = '0000-aaaa-bbbb-ccce'
NET_ID = '0000-1111-bbbb-ccce'
OUT_NET_ID = '0000-1112-bbbb-ccce'
RTR_NET_ID = '0000-1113-bbbb-ccce'
SUBNET_ID = '0000-2222-bbbb-ccce'
OUT_SUBNET_ID = '0000-2222-bbbc-ccce'
RTR_SUBNET_ID = '0000-2222-bbbd-ccce'
FW_NAME = 'FwA'
POLCY_ID = '0000-aaaa-bbbb-cccc'
FW_TYPE = 'TE'
ROUTER_ID = '0000-aaaa-bbbb-5555'
IN_SUBNET = '172.16.31.10'
IN_SUBNET_AND_MASK = '172.16.31.10/24'
IN_START = '192.168.127.12'
IN_SEC_GW = '172.16.58.3'
IN_GW = '172.16.17.32'
IN_DCNM_GW = '192.168.3.11'
IN_END = '172.16.58.3'
SEGMENTATION_ID = 87999
OUT_SUBNET = '172.16.31.10'
OUT_SUBNET_AND_MASK = '172.16.31.10/24'
RTR_SUBNET_AND_MASK = '9.9.9.0/24'
OUT_START = '172.16.17.32'
OUT_SEC_GW = '192.168.127.12'
OUT_GW = '172.16.31.10'
OUT_DCNM_GW = '172.16.17.32'
OUT_END = '192.168.127.12'
OUT_SEGMENTATION_ID = 88000
EXT_PART = 34500
VLAN_ID = 0
try:
OrderedDict = collections.OrderedDict
except AttributeError:
import ordereddict
OrderedDict = ordereddict.OrderedDict
class FakeClass(object):
"""Fake class. """
@classmethod
def imitate(cls, *others):
for other in others:
for name in other.__dict__:
try:
setattr(cls, name, mock.Mock())
except (TypeError, AttributeError):
pass
return cls
@classmethod
def set_return(cls, class_name, fn_name, return_val):
getattr(cls, fn_name).return_value = return_val
class FabricBaseTest(base.BaseTestCase):
"""A test suite to exercise the Fabric setup Base. """
def setUp(self):
'''Setup for the test scripts '''
super(FabricBaseTest, self).setUp()
self._init_values()
config = self._fill_cfg()
self.cfg = config
self.cfg = config.CiscoDFAConfig().cfg
fsb.FabricBase.__bases__ = (FakeClass.imitate(dbm.DfaDBMixin,
fsb.FabricApi),)
FakeClass.set_return(dbm.DfaDBMixin, 'get_all_fw_db', dict())
mock.patch('networking_cisco.apps.saf.db.dfa_db_models.'
'DfaSegmentTypeDriver').start()
mock.patch('networking_cisco.apps.saf.db.dfa_db_models.'
'DfasubnetDriver').start()
mock.patch('networking_cisco.apps.saf.server.'
'dfa_openstack_helper.DfaNeutronHelper').start()
mock.patch('networking_cisco.apps.saf.db.dfa_db_models.'
'DfaDBMixin.update_fw_db').start()
self.upd_fw_db_res_mock = mock.patch(
'networking_cisco.apps.saf.db.dfa_db_models.DfaDBMixin.'
'update_fw_db_result').start()
self.app_state_final_res_mock = mock.patch(
'networking_cisco.apps.saf.db.dfa_db_models.DfaDBMixin.'
'append_state_final_result').start()
self.fabric_base = fsb.FabricBase()
self.add_nwk_db_mock = mock.patch.object(self.fabric_base,
'add_network_db').start()
self.fabric_base.store_dcnm(mock.MagicMock())
def _init_values(self):
self.tenant_name = TENANT_NAME
self.tenant_id = TENANT_ID
self.net_id = NET_ID
self.out_net_id = OUT_NET_ID
self.rtr_net_id = RTR_NET_ID
self.subnet_id = SUBNET_ID
self.out_subnet_id = OUT_SUBNET_ID
self.rtr_subnet_id = RTR_SUBNET_ID
self.fw_id = FW_ID
self.fw_name = FW_NAME
self.policy_id = POLCY_ID
self.fw_type = FW_TYPE
self.router_id = ROUTER_ID
self.in_subnet = IN_SUBNET
self.in_subnet_and_mask = IN_SUBNET_AND_MASK
self.in_gw = IN_GW
self.in_dcnm_gw = IN_DCNM_GW
self.in_sec_gw = IN_SEC_GW
self.in_start = IN_START
self.in_end = IN_END
self.segmentation_id = SEGMENTATION_ID
self.in_srvc_nwk_name = self.fw_id[0:4] + fw_const.IN_SERVICE_NWK + (
self.fw_id[len(self.fw_id) - 4:])
self.out_subnet = OUT_SUBNET
self.out_subnet_and_mask = OUT_SUBNET_AND_MASK
self.out_gw = OUT_GW
self.out_dcnm_gw = OUT_DCNM_GW
self.out_sec_gw = OUT_SEC_GW
self.out_start = OUT_START
self.out_end = OUT_END
self.out_segmentation_id = OUT_SEGMENTATION_ID
self.out_srvc_nwk_name = self.fw_id[0:4] + fw_const.OUT_SERVICE_NWK + (
self.fw_id[len(self.fw_id) - 4:])
self.mock_fw_dict = self._prepare_fw_dict()
self.net_dict = self._prepare_net_dict("in")
self.out_net_dict = self._prepare_net_dict("out")
self.rtr_net_dict = self._prepare_rtr_net_dict()
self.rtr_subnet_and_mask = RTR_SUBNET_AND_MASK
self.ext_part = EXT_PART
def _fill_cfg(self):
config.default_firewall_opts['firewall'][
'fw_auto_serv_nwk_create'] = True
config.default_firewall_opts['firewall'][
'fw_service_host_profile'] = fw_const.HOST_PROF
config.default_firewall_opts['firewall'][
'fw_service_host_fwd_mode'] = fw_const.HOST_FWD_MODE
config.default_firewall_opts['firewall'][
'fw_service_ext_profile'] = fw_const.EXT_PROF
config.default_firewall_opts['firewall'][
'fw_service_ext_fwd_mode'] = fw_const.EXT_FWD_MODE
config.default_firewall_opts['firewall'][
'fw_service_part_vrf_profile'] = fw_const.PART_PROF
config.default_firewall_opts['firewall']['fw_mgmt_ip'] = '1.1.1.1'
config.default_dcnm_opts['dcnm']['vlan_id_min'] = 2
config.default_dcnm_opts['dcnm']['vlan_id_max'] = 200
config.default_dcnm_opts['dcnm']['segmentation_id_min'] = 20000
config.default_dcnm_opts['dcnm']['segmentation_id_max'] = 30000
config.default_dcnm_opts['dcnm']['segmentation_reuse_timeout'] = 20
return config
def test_fabric_base_init(self):
'''Wrapper for the init'''
pass
def _prepare_fw_dict(self):
mock_fw_dict = {'rules': {}, 'tenant_name': self.tenant_name,
'tenant_id': self.tenant_id, 'fw_id': self.fw_id,
'fw_name': self.fw_name,
'firewall_policy_id': self.policy_id,
'fw_type': self.fw_type, 'router_id': self.router_id}
return mock_fw_dict
def _prepare_net_dict(self, direc):
if direc == 'in':
srvc_name = self.in_srvc_nwk_name
srvc_seg = SEGMENTATION_ID
srvc_prof = config.default_firewall_opts[
'firewall']['fw_service_host_profile']
srvc_fwd_mode = config.default_firewall_opts[
'firewall']['fw_service_host_fwd_mode']
else:
srvc_name = self.out_srvc_nwk_name
srvc_seg = OUT_SEGMENTATION_ID
srvc_prof = config.default_firewall_opts[
'firewall']['fw_service_ext_profile']
srvc_fwd_mode = config.default_firewall_opts[
'firewall']['fw_service_ext_fwd_mode']
network_dict = {'tenant_id': self.tenant_id, 'name': srvc_name,
'segmentation_id': srvc_seg, 'vlan': VLAN_ID,
'config_profile': srvc_prof, 'fwd_mode': srvc_fwd_mode}
return network_dict
def _prepare_rtr_net_dict(self):
rtr_nwk = self.fw_id[0:4] + fw_const.DUMMY_SERVICE_NWK + (
self.fw_id[len(self.fw_id) - 4:])
return {'tenant_id': self.tenant_id, 'name': rtr_nwk,
'segmentation_id': None, 'vlan': None,
'config_profile': None, 'fwd_mode': None}
def _fill_fw_net_dict(self):
return {'fw_id': self.fw_id, 'tenant_id': self.tenant_id,
'name': self.fw_name, 'in_service_ip': self.in_start,
'in_network_id': self.net_id}
def _fill_fw_net_res_dict(self):
fw_net_dict2 = self._fill_fw_net_dict()
fw_net_dict2['os_status'] = fw_const.OS_IN_NETWORK_CREATE_SUCCESS
return fw_net_dict2
def _fill_fw_out_net_dict(self):
return {'fw_id': self.fw_id, 'tenant_id': self.tenant_id,
'name': self.fw_name, 'out_service_ip': self.out_start,
'out_network_id': self.out_net_id}
def _fill_fw_rtr_net_dict(self):
return {'router_id': self.router_id, 'name': self.fw_name,
'router_net_id': self.rtr_net_id,
'tenant_id': self.tenant_id,
'fw_id': self.fw_id,
'router_subnet_id': self.rtr_subnet_id,
'os_status': fw_const.OS_DUMMY_RTR_CREATE_SUCCESS}
def _fill_fw_rtr_net_dict_virt(self):
return {'router_id': self.router_id, 'name': self.fw_name,
'router_net_id': None,
'tenant_id': self.tenant_id,
'fw_id': self.fw_id, 'router_subnet_id': None,
'os_status': fw_const.OS_DUMMY_RTR_CREATE_SUCCESS}
def _fill_fw_out_net_res_dict(self):
fw_net_dict2 = self._fill_fw_out_net_dict()
fw_net_dict2['os_status'] = fw_const.OS_OUT_NETWORK_CREATE_SUCCESS
return fw_net_dict2
def _fill_fw_dcnm_rest_net_dict(self):
return {
'status': 'ACTIVE', 'admin_state_up': True,
'tenant_id': self.tenant_id, 'provider:network_type': 'local',
'vlan_id': VLAN_ID, 'segmentation_id': self.segmentation_id,
'mob_domain': False, 'mob_domain_name': None,
'name': self.in_srvc_nwk_name, 'part_name': None,
'config_profile': config.default_firewall_opts[
'firewall']['fw_service_host_profile'],
'fwd_mode': config.default_firewall_opts[
'firewall']['fw_service_host_fwd_mode']}
def _fill_fw_dcnm_rest_subnet_dict(self):
name = self.fw_id[0:4] + fw_const.IN_SERVICE_SUBNET + (
self.fw_id[len(self.fw_id) - 4:])
alloc_pool_dict = {}
alloc_pool_dict['start'] = self.in_start
alloc_pool_dict['end'] = self.in_end
fw_subnet_dict = {'name': name,
'enable_dhcp': False,
'tenant_id': self.tenant_id,
'cidr': self.in_subnet_and_mask,
'gateway_ip': self.in_dcnm_gw,
'secondary_gw': self.in_sec_gw,
'ip_version': 4}
fw_subnet_dict['allocation_pools'] = []
fw_subnet_dict['allocation_pools'].append(alloc_pool_dict)
return fw_subnet_dict
def _fill_fw_dcnm_rest_out_net_dict(self):
return {
'status': 'ACTIVE', 'admin_state_up': True,
'tenant_id': self.tenant_id, 'provider:network_type': 'local',
'vlan_id': VLAN_ID, 'segmentation_id': self.out_segmentation_id,
'mob_domain': False, 'mob_domain_name': None,
'name': self.out_srvc_nwk_name,
'part_name': fw_const.SERV_PART_NAME,
'config_profile': config.default_firewall_opts[
'firewall']['fw_service_ext_profile'],
'fwd_mode': config.default_firewall_opts[
'firewall']['fw_service_ext_fwd_mode']}
def _fill_fw_dcnm_rest_out_subnet_dict(self):
name = self.fw_id[0:4] + fw_const.OUT_SERVICE_SUBNET + (
self.fw_id[len(self.fw_id) - 4:])
alloc_pool_dict = {}
alloc_pool_dict['start'] = self.out_start
alloc_pool_dict['end'] = self.out_end
fw_subnet_dict = {'name': name,
'enable_dhcp': False,
'tenant_id': self.tenant_id,
'cidr': self.out_subnet_and_mask,
'gateway_ip': self.out_dcnm_gw,
'secondary_gw': self.out_sec_gw,
'ip_version': 4}
fw_subnet_dict['allocation_pools'] = []
fw_subnet_dict['allocation_pools'].append(alloc_pool_dict)
return fw_subnet_dict
def _fill_fw_dcnm_net_dict(self):
return {'router_id': self.router_id,
'out_network_id': self.out_net_id,
'name': self.fw_name,
'router_net_id': self.rtr_net_id,
'tenant_id': self.tenant_id,
'fw_id': self.fw_id,
'dcnm_status': fw_const.DCNM_IN_NETWORK_CREATE_SUCCESS,
'in_network_id': self.net_id,
'out_service_ip': None,
'os_status': fw_const.OS_DUMMY_RTR_CREATE_SUCCESS,
'router_subnet_id': self.rtr_subnet_id,
'in_service_ip': None}
def _fill_fw_dcnm_out_net_dict(self):
return {'router_id': self.router_id,
'out_network_id': self.out_net_id,
'name': self.fw_name,
'router_net_id': self.rtr_net_id,
'tenant_id': self.tenant_id,
'fw_id': self.fw_id,
'dcnm_status': fw_const.DCNM_OUT_NETWORK_CREATE_SUCCESS,
'in_network_id': self.net_id,
'out_network_id': self.out_net_id,
'out_service_ip': None,
'os_status': fw_const.OS_DUMMY_RTR_CREATE_SUCCESS,
'router_subnet_id': self.rtr_subnet_id,
'in_service_ip': None,
'out_service_ip': None}
def _fill_fw_dcnm_part_upd_dict(self, direc):
dcnm_status = fw_const.DCNM_IN_PART_UPDATE_SUCCESS \
if direc == "in" else fw_const.DCNM_OUT_PART_UPDATE_SUCCESS
fw_part_dict = {'tenant_id': self.tenant_id,
'fw_id': self.fw_id,
'dcnm_status': dcnm_status,
'name': self.fw_name}
return fw_part_dict
def _fill_fw_dcnm_part_create_dict(self):
return {'tenant_id': self.tenant_id,
'fw_id': self.fw_id,
'dcnm_status': fw_const.DCNM_OUT_PART_CREATE_SUCCESS,
'name': self.fw_name}
def _fill_fw_del_net_dict(self):
return {'router_id': self.router_id, 'name': self.fw_name,
'router_net_id': self.rtr_net_id,
'tenant_id': self.tenant_id,
'fw_id': self.fw_id, 'out_network_id': self.out_net_id,
'in_network_id': self.net_id, 'in_service_ip': None,
'out_service_ip': None,
'router_subnet_id': self.rtr_subnet_id}
def _fill_fw_db_data(self, state):
fw_data = dict()
compl_result = fw_const.RESULT_FW_CREATE_INIT + '(' + str(state) + ')'
fw_data[self.fw_id] = {
'tenant_id': self.tenant_id, 'name': self.fw_name,
'in_network_id': self.net_id, 'out_network_id': self.out_net_id,
'os_status': fw_const.OS_DUMMY_RTR_CREATE_SUCCESS,
'result': compl_result, 'router_id': self.router_id,
'router_net_id': self.rtr_net_id,
'router_subnet_id': self.rtr_subnet_id}
return fw_data
def test_create_in_nwk(self):
"""Create IN Network. """
id_list = []
id_list.append(self.net_id)
id_list.append(self.subnet_id)
fw_net_dict2 = self._fill_fw_net_res_dict()
with mock.patch('networking_cisco.apps.saf.server.services.firewall.'
'native.fabric_setup_base.ServiceIpSegTenantMap.'
'get_state',
return_value=fw_const.OS_IN_NETWORK_STATE),\
mock.patch.object(self.fabric_base.os_helper, 'create_network',
return_value=id_list) as create_nwk:
FakeClass.set_return(fsb.FabricApi, 'get_in_ip_addr',
{'subnet': self.in_subnet,
'start': self.in_start,
'sec_gateway': self.in_sec_gw,
'gateway': self.in_gw,
'end': self.in_end})
self.fabric_base.service_segs.allocate_segmentation_id.\
return_value = self.segmentation_id
self.fabric_base.fabric_fsm[fw_const.OS_OUT_NETWORK_STATE][0] = \
mock.MagicMock()
self.fabric_base.fabric_fsm[fw_const.OS_OUT_NETWORK_STATE][0].\
return_value = False
# update_fw_db is removed from here. Because both this and
# update_fw_db_result uses the same fw_dict and fw_dict
# is updated with result before calling update_fw_db_result. But,
# mock records the updated fw_dict as a result of which argument
# check fails.
parent = mock.MagicMock()
parent.attach_mock(create_nwk, 'create_network')
parent.attach_mock(self.add_nwk_db_mock, 'add_network_db')
parent.attach_mock(self.upd_fw_db_res_mock, 'update_fw_db_result')
parent.attach_mock(self.app_state_final_res_mock,
'append_state_final_result')
self.fabric_base.prepare_fabric_fw(self.tenant_id,
self.mock_fw_dict, True,
fw_const.RESULT_FW_CREATE_INIT)
expected_calls = [mock.call.create_network(self.in_srvc_nwk_name,
self.tenant_id,
self.in_subnet_and_mask,
gw=self.in_gw),
mock.call.add_network_db(self.net_id, self.net_dict,
fw_const.FW_CONST,
'SUCCESS'),
mock.call.update_fw_db_result(
self.mock_fw_dict.get('fw_id'), fw_net_dict2),
mock.call.append_state_final_result(
self.mock_fw_dict.get('fw_id'),
fw_const.RESULT_FW_CREATE_INIT,
fw_const.OS_OUT_NETWORK_STATE),
mock.call.append_state_final_result(
self.mock_fw_dict.get('fw_id'),
fw_const.RESULT_FW_CREATE_INIT,
fw_const.OS_OUT_NETWORK_STATE)]
parent.assert_has_calls(expected_calls, any_order=False)
def test_create_in_nwk_fail(self):
"""Create IN Network Fail.
The Openstack create network helper function is mocked to return a
failure.
"""
id_list = []
with mock.patch('networking_cisco.apps.saf.server.services.firewall.'
'native.fabric_setup_base.ServiceIpSegTenantMap.'
'get_state',
return_value=fw_const.OS_IN_NETWORK_STATE),\
mock.patch.object(self.fabric_base.os_helper, 'create_network',
return_value=id_list) as create_nwk:
FakeClass.set_return(fsb.FabricApi, 'get_in_ip_addr',
{'subnet': self.in_subnet,
'start': self.in_start,
'sec_gateway': self.in_sec_gw,
'gateway': self.in_gw,
'end': self.in_end})
self.fabric_base.service_segs.allocate_segmentation_id.\
return_value = self.segmentation_id
self.fabric_base.fabric_fsm[fw_const.OS_OUT_NETWORK_STATE][0] = \
mock.MagicMock()
self.fabric_base.fabric_fsm[fw_const.OS_OUT_NETWORK_STATE][0].\
return_value = False
# update_fw_db is removed from here. Because both this and
# update_fw_db_result uses the same fw_dict and fw_dict
# is updated with result before calling update_fw_db_result. But,
# mock records the updated fw_dict as a result of which argument
# check fails.
parent = mock.MagicMock()
parent.attach_mock(create_nwk, 'create_network')
parent.attach_mock(self.add_nwk_db_mock, 'add_network_db')
parent.attach_mock(self.upd_fw_db_res_mock, 'update_fw_db_result')
parent.attach_mock(self.app_state_final_res_mock,
'append_state_final_result')
self.fabric_base.prepare_fabric_fw(self.tenant_id,
self.mock_fw_dict, True,
fw_const.RESULT_FW_CREATE_INIT)
expected_calls = [mock.call.create_network(self.in_srvc_nwk_name,
self.tenant_id,
self.in_subnet_and_mask,
gw=self.in_gw),
mock.call.append_state_final_result(
self.mock_fw_dict.get('fw_id'),
fw_const.RESULT_FW_CREATE_INIT,
fw_const.OS_IN_NETWORK_STATE)]
parent.assert_has_calls(expected_calls, any_order=False)
self.add_nwk_db_mock.assert_not_called()
self.upd_fw_db_res_mock.assert_not_called()
def test_create_out_nwk(self):
"""Create Out Network Test. """
id_list = []
id_list.append(self.out_net_id)
id_list.append(self.out_subnet_id)
fw_net_dict2 = self._fill_fw_out_net_res_dict()
with mock.patch('networking_cisco.apps.saf.server.services.firewall.'
'native.fabric_setup_base.ServiceIpSegTenantMap.'
'get_state',
return_value=fw_const.OS_OUT_NETWORK_STATE),\
mock.patch.object(self.fabric_base.os_helper, 'create_network',
return_value=id_list) as create_nwk:
FakeClass.set_return(fsb.FabricApi, 'get_out_ip_addr',
{'subnet': self.out_subnet,
'start': self.out_start,
'sec_gateway': self.out_sec_gw,
'gateway': self.out_gw,
'end': self.out_end})
self.fabric_base.service_segs.allocate_segmentation_id.\
return_value = self.out_segmentation_id
self.fabric_base.fabric_fsm[fw_const.OS_DUMMY_RTR_STATE][0] = \
mock.MagicMock()
self.fabric_base.fabric_fsm[fw_const.OS_DUMMY_RTR_STATE][0].\
return_value = False
# update_fw_db is removed from here. Because both this and
# update_fw_db_result uses the same fw_dict and fw_dict
# is updated with result before calling update_fw_db_result. But,
# mock records the | |
# coding: utf-8
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Trains and Evaluates the Unlimited Hand - Finger condition network using a feed dictionary."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=missing-docstring
import argparse
import csv
import glob
import math
import os.path
import random
import sys
import time
from six.moves import xrange # pylint: disable=redefined-builtin
import uh_sensor_values as uh_sensor_values
import tensorflow as tf
import numpy as np
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.datasets.mnist import DataSet
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import graph_io
from tensorflow.python.tools import freeze_graph
# Basic model parameters as external flags.
FLAGS = None
DATA_INDEX_ACCEL = 0
DATA_INDEX_GYRO = 1
DATA_INDEX_PHOTO_REFLECTOR = 2
DATA_INDEX_ANGLE = 3
DATA_INDEX_TEMPERATURE = 4
DATA_INDEX_QUATERNION = 5
DATA_INDEX_AMBIENT_LIGHT = 6
DUMMY_FILE_NAME = "dummy_sensor_data.csv"
READ_SAVED_DATA_BUFFER = []
MAX_FINGER_COUNT = 5
ENABLE_FINGER_COUNT = MAX_FINGER_COUNT
VALIDATE_SENSOR_DATA_SETS = np.array([], dtype=np.float32)
VALIDATE_VALUE_DATA_SETS = np.array([], dtype=np.float32)
class SensorDataFile:
def __init__(self, sensor_data_file):
self.sensor_data_file = sensor_data_file
self.sensor_data_file_desc = None
self.reach_eof = False
self.sub_sensor_data_file_array = []
def __del__(self):
self.fileClose()
def __str__(self):
return self.sensor_data_file + ": " + str(self.reach_eof)
def readLine(self):
if self.sensor_data_file_desc == None:
# open file
self.sensor_data_file_desc = open(self.sensor_data_file, 'r')
line = self.sensor_data_file_desc.readline()
if line == None or len(line) == 0:
# 本当は正しくないけど、簡易的に判断するようにする
self.reach_eof = True
return line
def isEndOfFile(self):
return self.reach_eof
def fileClose(self):
if self.sensor_data_file_desc != None:
self.sensor_data_file_desc.close()
self.sensor_data_file_desc = None
self.reach_eof = False
if len(self.sub_sensor_data_file_array) > 0:
for sub_sensor_data_file in self.sub_sensor_data_file_array:
sub_sensor_data_file.fileClose()
def placeholder_inputs(batch_size):
"""Generate placeholder variables to represent the input tensors.
These placeholders are used as inputs by the rest of the model building
code and will be fed from the downloaded data in the .run() loop, below.
Args:
batch_size: The batch size will be baked into both placeholders.
Returns:
sensor_values_placeholder: Sensor values placeholder.
labels_placeholder: Labels placeholder.
"""
# Note that the shapes of the placeholders match the shapes of the full
# sensor values and label tensors, except the first dimension is now batch_size
# rather than the full size of the train or test data sets.
sensor_values_placeholder = tf.placeholder(tf.float32, shape=(batch_size,
get_parameter_data_count()),
name="sensor_values_placeholder")
labels_placeholder = tf.placeholder(tf.int32, shape=(batch_size), name="labels_placeholder")
return sensor_values_placeholder, labels_placeholder
def fill_feed_dict(data_set, sensor_values_pl, labels_pl):
"""Fills the feed_dict for training the given step.
A feed_dict takes the form of:
feed_dict = {
<placeholder>: <tensor of values to be passed for placeholder>,
....
}
Args:
data_set: The set of images and labels, from input_data.read_data_sets()
sensor_values_pl: The sensor values placeholder, from placeholder_inputs().
labels_pl: The labels placeholder, from placeholder_inputs().
Returns:
feed_dict: The feed dictionary mapping from placeholders to values.
"""
# Create the feed_dict for the placeholders filled with the next
# `batch size` examples.
sensor_values_feed, labels_feed = data_set.next_batch(FLAGS.batch_size,
FLAGS.fake_data)
feed_dict = {
sensor_values_pl: sensor_values_feed,
labels_pl: labels_feed,
}
return feed_dict
def do_eval(sess,
eval_correct,
sensor_values_placeholder,
labels_placeholder,
data_set):
"""Runs one evaluation against the full epoch of data.
Args:
sess: The session in which the model has been trained.
eval_correct: The Tensor that returns the number of correct predictions.
sensor_values_placeholder: The sensor values placeholder.
labels_placeholder: The labels placeholder.
data_set: The set of sensor values and labels to evaluate, from
input_data.read_data_sets().
"""
# And run one epoch of eval.
true_count = 0 # Counts the number of correct predictions.
steps_per_epoch = data_set.num_examples // FLAGS.batch_size
num_examples = steps_per_epoch * FLAGS.batch_size
for step in xrange(steps_per_epoch):
feed_dict = fill_feed_dict(data_set,
sensor_values_placeholder,
labels_placeholder)
true_count += sess.run(eval_correct, feed_dict=feed_dict)
if num_examples == 0:
precision = float(true_count) / data_set.num_examples
print(' Num examples: %d Num correct: %d Precision @ 1: %0.04f' %
(data_set.num_examples, true_count, precision))
else:
precision = float(true_count) / num_examples
print(' Num examples: %d Num correct: %d Precision @ 1: %0.04f' %
(num_examples, true_count, precision))
def run_training():
"""Train sensor data for a number of steps."""
# check enable finger count from FLAGS.enable_finger_flags
ENABLE_FINGER_COUNT = get_enable_finger_count()
# Get the sets of images and labels for training, validation, and test on uh_sensor_values.
read_step = FLAGS.batch_size
max_read_step = FLAGS.max_steps
with tf.Graph().as_default() as graph:
# Create a session for running Ops on the Graph.
sess = tf.Session()
# Generate placeholders for the images and labels.
sensor_values_placeholder, labels_placeholder = placeholder_inputs(FLAGS.batch_size)
# Build a Graph that computes predictions from the inference model.
layer_units_array = [get_parameter_data_count()]
hidden_layer_units_array = FLAGS.hidden_layrer_units.split(',')
for hidden_layer_units in hidden_layer_units_array:
layer_units_array.append(int(hidden_layer_units))
if FLAGS.use_rps_mode:
# 3layer for rock-paper-scissors mode
layer_units_array.append(3)
else:
layer_units_array.append(FLAGS.max_finger_condition ** ENABLE_FINGER_COUNT)
logits = uh_sensor_values.inference(sensor_values_placeholder, layer_units_array)
# Add to the Graph the Ops for loss calculation.
loss = uh_sensor_values.loss(logits, labels_placeholder)
# Add to the Graph the Ops that calculate and apply gradients.
train_op = uh_sensor_values.training(FLAGS.optimizer, loss, FLAGS.learning_rate)
# Add the Op to compare the logits to the labels during evaluation.
eval_correct = uh_sensor_values.evaluation(logits, labels_placeholder)
# Build the summary Tensor based on the TF collection of Summaries.
summary = tf.summary.merge_all()
# Add the variable initializer Op.
init = tf.global_variables_initializer()
# Instantiate a SummaryWriter to output summaries and the Graph.
summary_writer = tf.summary.FileWriter(FLAGS.log_dir, sess.graph)
# And then after everything is built:
# Run the Op to initialize the variables.
sess.run(init)
# Create a saver for writing training checkpoints.
saver = tf.train.Saver(max_to_keep=FLAGS.max_save_checkpoint)
checkpoint = ''
checkpoint_file = os.path.join(FLAGS.log_dir, 'model.ckpt')
eof_dict = {}
data_files = []
data_file_paths = []
if FLAGS.random_learning:
max_read_step, out_file_name = create_random_data_file()
data_file_paths = [out_file_name]
else:
data_file_paths = glob.glob(FLAGS.input_data_dir + "/sensor_data_*")
total_read_step = 0
# ファイルパスからSensorDataFileインスタンスへ変更
for data_file_path in data_file_paths:
data_files.append(SensorDataFile(data_file_path))
for data_file in data_files:
print('%s: ' % data_file)
offset_step = 0
while True:
# read data_sets from CVS
data_sets = read_sensor_data_sets(data_file, offset_step=offset_step, read_step=read_step)
if data_sets != None:
# Start the training loop.
start_time = time.time()
# Fill a feed dictionary with the actual set of images and labels
# for this particular training step.
feed_dict = fill_feed_dict(data_sets.train,
sensor_values_placeholder,
labels_placeholder)
# Run one step of the model. The return values are the activations
# from the `train_op` (which is discarded) and the `loss` Op. To
# inspect the values of your Ops or variables, you may include them
# in the list passed to sess.run() and the value tensors will be
# returned in the tuple from the call.
_, loss_value = sess.run([train_op, loss],
feed_dict=feed_dict)
duration = time.time() - start_time
# Write the summaries and print an overview fairly often.
if total_read_step % 100 == 0:
# Print status to stdout.
print('Step %d - %d: loss = %.2f (%.3f sec)' % (total_read_step, total_read_step + read_step, loss_value, duration))
# Update the events file.
summary_str = sess.run(summary, feed_dict=feed_dict)
summary_writer.add_summary(summary_str, total_read_step)
summary_writer.flush()
if (FLAGS.max_steps > 0) and ((total_read_step + read_step) % FLAGS.max_steps == 0):
# Save a checkpoint and evaluate the model periodically.
checkpoint = saver.save(sess, checkpoint_file, global_step=total_read_step)
offset_step += read_step
total_read_step += read_step
else:
break;
# Save a checkpoint and evaluate the model periodically.
checkpoint = saver.save(sess, checkpoint_file, global_step=total_read_step)
graph_io.write_graph(sess.graph, FLAGS.saved_data_dir, "saved_data.pb", as_text=False)
input_binary = True
input_graph_path = os.path.join(FLAGS.saved_data_dir, "saved_data.pb")
input_saver = ""
output_node_names = "eval_correct"
restore_op_name = "save/restore_all"
filename_tensor_name = "save/Const:0"
output_graph_path = os.path.join(FLAGS.saved_data_dir, "saved_data_out.pb")
clear_devices = False
freeze_graph.freeze_graph(input_graph_path, input_saver,
input_binary, checkpoint, output_node_names,
restore_op_name, filename_tensor_name,
output_graph_path, clear_devices, "", "")
# Evaluate against the training set.
print('Validation Data Eval:')
global VALIDATE_SENSOR_DATA_SETS
global VALIDATE_VALUE_DATA_SETS
new_shape = (int(len(VALIDATE_SENSOR_DATA_SETS) / get_parameter_data_count()), get_parameter_data_count())
VALIDATE_SENSOR_DATA_SETS = np.reshape(VALIDATE_SENSOR_DATA_SETS, new_shape)
VALIDATE_SENSOR_DATA_SETS.astype(np.float32)
train = DataSet(VALIDATE_SENSOR_DATA_SETS, VALIDATE_VALUE_DATA_SETS, dtype=dtypes.uint8, reshape=False)
data_sets = base.Datasets(train=train, validation=train, test=train)
if data_sets != None:
do_eval(sess,
eval_correct,
sensor_values_placeholder,
labels_placeholder,
data_sets.train)
def get_enable_finger_count():
enable_finger_count = 0
max_finger_count = MAX_FINGER_COUNT
enable_finger_flags = FLAGS.enable_finger_flags
for exponent in xrange(max_finger_count):
if enable_finger_flags % 10 != 0:
enable_finger_count += 1
# 桁を下げる
enable_finger_flags = enable_finger_flags // 10
return enable_finger_count
def create_random_data_file():
# ランダムデータを集めたファイルは作成に時間が掛かるので、それは作成せずに、ダミーファイルパスを返却し、コール元でパスにより判断できるようにする
data_files = glob.glob(FLAGS.input_data_dir + "/sensor_data_*")
return (FLAGS.max_steps * len(data_files), DUMMY_FILE_NAME)
def read_sensor_data_sets(
train_data_file,
dtype=dtypes.uint8,
reshape=False,
training=True,
offset_step=0,
read_step=500):
global VALIDATE_SENSOR_DATA_SETS
global VALIDATE_VALUE_DATA_SETS
global READ_SAVED_DATA_BUFFER
sensor_data_sets = np.array([], dtype=np.float32)
value_data_sets = np.array([], dtype=np.float32)
no_data = True
combine_data_line_array = []
if train_data_file.sensor_data_file == DUMMY_FILE_NAME:
if (FLAGS.max_steps == 0) or (offset_step | |
1000,
i_max_out: Optional[int] = 1000,
i_max_ring: Optional[int] = 500,
additional_metrics: Optional[Tuple[list, float]] = None,
population_metrics: Optional[list] = None,
size_in_MW: Optional[bool] = False,
branching_propensity: Optional[float] = None,
metrics_weights: Optional[Tuple[list, nparray]] = None,
verbose: Optional[bool] = True,
show_plots: Optional[bool] = True,
save_path: Optional[str] = os.getcwd()):
"""Initialize the simulation parameters
Parameters
----------
linkage_distribution_input : list
target linkage distribution
monomer_distribution_input : list
target monomer distribution
expected_size : Tuple[int, float]
target polymer size (mean)
max_size : Tuple[int, float]
maximum polymer size
distribution_scaling : Optional[float], optional
std of the size distribution, by default 1.0
Tmetro : int
temperature for metropolis monte carlo in the inner loop
Tmetro_out : int
temperature for metropolis monte carlo in the outer loop
seed_init : Optional[int], optional
random seed, by default 1
library_name : Optional[str], optional
name of structure library or
type of lignin, by default 'lignin_x'
trial_index : Optional[int], optional
trial no., by default None
n_population : Optional[int], optional
population size, by default 100
i_max : Optional[int], optional
maximum number of iterations in the inner loop, by default 1000
i_max_out : Optional[int], optional
maximum number of iterations in the outter loop, by default 1000
i_max_ring : Optional[int], optional
maximum number of iterations in the ring loop, by default 500
additional_metrics : Optional[Tuple[list, float]], optional
additional metrics such as branching coefficient, by default None
population_metrics : Optional[list], optional
list of population metrics such as average MW, by default None
size_in_MW : Optional[bool], optional
flag indicating whether size is in MW, by default False
branching_propensity : Optional[float], optional
branching propensity, by default None
metrics_weights : Optional[Tuple[list, nparray]], optional
assigned weights for each metrics, by default None
verbose : Optional[bool], optional
flag to control printing outputs, by default True
show_plots : Optional[bool], optional
flag to control showing plots, by default True
save_path : Optional[str], optional
output file locations, by default os.getcwd()
"""
# inherit from Trajectory
super().__init__(linkage_distribution_input,
monomer_distribution_input,
Tmetro,
expected_size,
max_size,
distribution_scaling,
additional_metrics,
size_in_MW,
branching_propensity,
metrics_weights,
verbose)
# Set up io path
ResultsPathParent = os.path.join(save_path, ResultsName, library_name)
if not os.path.exists(ResultsPathParent): os.mkdir(ResultsPathParent)
# Determine previous trials in library
if trial_index==None:
trial_count = 0
trial_list=os.scandir(ResultsPathParent)
for trial_folder in trial_list:
# print(trial_folder)
if trial_folder.is_dir():
trial_count=trial_count+1
trial_index=trial_count
# create a new folder for the new trial
if trial_index is None:
ResultsPath = ResultsPathParent
else:
ResultsPath = os.path.join(ResultsPathParent, 'i'+str(trial_index))
print("Starting a new trial, No.{}:\n". format(trial_index))
if os.path.exists(ResultsPath): shutil.rmtree(ResultsPath)
os.mkdir(ResultsPath)
# assign to self
self.linkage_distribution_input = linkage_distribution_input
self.monomer_distribution_input = monomer_distribution_input
self.library_name = library_name
self.n_population = n_population
self.Tmetro_out = Tmetro_out
self.seed_init = seed_init
self.i_max = i_max
self.i_max_out = i_max_out
self.i_max_ring = i_max_ring
self.population_metrics = population_metrics
self.ResultsName=ResultsName
self.ResultsPath = ResultsPath
self.P_population = None
self.metrics_current_dict= None
self.metrics_target_dict = None
self.metrics_names_to_plot = None
self.metrics_population_to_plot=None
self.trial_index=trial_index
self.show_plots = show_plots
# estimate the maximum MW for scaling purposes
if self.size_in_MW:
self.max_MW = self.max_size
else:
self.max_MW = 150 * self.max_size
self.cal_MW = True
def run(self):
"""
Main body
run the MCMC simulations
"""
# Set the seed for this entire simulation
np.random.seed(self.seed_init)
FilePath = os.path.join(self.ResultsPath, self.library_name + '.out')
file = open(FilePath,"w")
file.write('==========Generating {} libraray =========\n'.format(self.library_name))
file.write('==========Current trial No. {} =========\n'.format(self.trial_index))
# Initialize the similuation trajectory
n_polymers = 0
# Set the distance tolerance
d_average = np.inf # set to a very large value
# Starting seed for each trajectory
rseed = 0
# assign individual trajectory weights
# exclude the last two population metrics from the input metrics
metrics_weights_individual = None
if self.metrics_weights is not None:
if self.population_metrics is not None:
metrics_weights_individual = self.metrics_weights[:-2]
else:
metrics_weights_individual = self.metrics_weights
traj = Trajectory(self.linkage_distribution_input,
self.monomer_distribution_input,
self.Tmetro,
expected_size = self.expected_size,
max_size = self.max_size,
distribution_scaling=self.distribution_scaling,
size_in_MW = self.size_in_MW,
additional_metrics = self.additional_metrics,
branching_propensity = self.branching_propensity,
metrics_weights=metrics_weights_individual,
verbose = self.verbose,
file=file)
# Set the optimization target
metrics_target = traj.metrics_target
metrics_names_to_match = monomer_types + linkage_names
if self.additional:
metrics_names_to_match += ['branching_coeff']
if self.population_metrics is not None:
metrics_names_to_match += ['MW', 'MW_weighted']
# Add population metrics to match, use normalized value for population metrics (MWs)
metrics_target_to_match = metrics_target.copy()
metrics_target_to_match_original = metrics_target.copy()
if self.population_metrics is not None:
population_metrics_normalized = [pi/self.max_MW for pi in self.population_metrics]
metrics_target_to_match = np.append(metrics_target_to_match, population_metrics_normalized)
metrics_target_to_match_original = np.append(metrics_target_to_match_original, self.population_metrics)
self.metrics_target_dict = ut.metrics_array_to_dict(metrics_target_to_match_original, metrics_names_to_match)
# Simulation main body
P_population = []
counts_population = []
monomer_count_population = []
MW_population = []
rseeds = []
metrics_population = []
distance = []
distance_polymer = []
i_step = 0
start = time.time()
monomer_accepted=0
monomer_iterations=0
while n_polymers <= self.n_population and i_step <= self.i_max_out:
P_i, distance_i, monomer_accepted_count, monomer_iteration_count = traj.run_MCMC(rseed, self.i_max)
counts_P, monomer_count_P, MW_P = ch.get_counts_polymer(P_i, \
additional=self.additional, cal_MW=self.cal_MW)
# check if the molecule is valid
if MW_P < 100: continue
metrics_P = ut.counts_to_metrics(counts_P, additional=self.additional)
# make a copy
counts_population_copy = counts_population.copy()
monomer_count_population_copy = monomer_count_population.copy()
MW_population_copy = MW_population.copy()
metrics_population_copy = metrics_population.copy()
# append the new
counts_population_copy.append(counts_P)
monomer_count_population_copy.append(monomer_count_P)
MW_population_copy.append(MW_P)
metrics_population_copy.append(metrics_P)
# compute the metrics from the sum of counts
counts_sum = np.sum(np.array(counts_population_copy), axis = 0)
metrics_average_to_match = ut.counts_to_metrics(counts_sum, additional=self.additional)
# add population metrics to match
if self.population_metrics is not None:
MW_average = ut.MW_array_to_number_average(np.array(MW_population_copy))
MW_weight_average = ut.MW_array_to_weight_average(np.array(MW_population_copy))
metrics_average_to_match = np.append(metrics_average_to_match, [MW_average/self.max_MW, MW_weight_average/self.max_MW])
# Compute the new distance
d_average_new = ut.cal_distance(metrics_target_to_match, metrics_average_to_match, self.metrics_weights)
energy_flag = False
delta_d = d_average_new - d_average
# accept the change if energy going downhill
if delta_d <= 0 or self.Tmetro_out == np.inf :
energy_flag = True
# test using Boltzmann distribution
else:
if self.Tmetro_out > 0:
w = np.exp(-delta_d / kb /self.Tmetro_out)
if np.random.rand() <= w:
energy_flag = True
if energy_flag: #and distance_flag and NN_flag):
d_average = d_average_new
counts_population = counts_population_copy
monomer_count_population = monomer_count_population_copy
MW_population = MW_population_copy
metrics_population = metrics_population_copy
P_population.append(P_i)
n_polymers += 1
file.write('\tn_polymer {} added on iteration no {} \n'.format(n_polymers-1, i_step))
if self.verbose:
print('\tn_polymer {} added on iteration no {}'.format(n_polymers-1, i_step))
else:
file.write('\tPolymer addition rejected\n')
if self.verbose:
print('\tPolymer addition rejected')
distance_polymer.append(distance_i)
distance.append(d_average)
rseed += 1
rseeds.append(rseed)
monomer_accepted += monomer_accepted_count
monomer_iterations += monomer_iteration_count
i_step += 1
distance_final = d_average
end = time.time()
file.write('Runtime for creating all polymers : {:.2f} minutes \n'.format((end-start)/60))
print('Runtime for creating all polymers : {:.2f} minutes \n'.format((end-start)/60))
# Add rings
distance_polymer_w_ring = []
distance_w_ring = np.Inf
ring_count=0
ring_iterations=0
if (self.branching_propensity is None) or (self.branching_propensity > 0.0):
counts_population_w_ring = []
monomer_count_population_w_ring = []
MW_population_w_ring = []
P_population_w_ring = []
metrics_population_w_ring = []
polymer_no=1
start = time.time()
for Pi, ri in zip(P_population, rseeds):
P_i, distance_i, acceptance_count, ring_its = traj.run_MCMC_ring(Pi, ri, self.i_max_ring)
counts_w_ring_P, monomer_count_w_ring_P, MW_w_ring_P = ch.get_counts_polymer(P_i, \
additional=self.additional, cal_MW=self.cal_MW)
# check if the molecule is valid
if MW_w_ring_P < 100: continue
metrics_w_ring_P = ut.counts_to_metrics(counts_w_ring_P, additional=self.additional)
file.write('\t{} ring(s) added to polymer {}\n'.format(acceptance_count, polymer_no))
if(acceptance_count>0) and self.verbose:
print('\t{} ring(s) added to polymer {}'.format(acceptance_count, polymer_no))
P_population_w_ring.append(P_i)
counts_population_w_ring.append(counts_w_ring_P)
monomer_count_population_w_ring.append(monomer_count_w_ring_P)
MW_population_w_ring.append(MW_w_ring_P)
metrics_population_w_ring.append(metrics_w_ring_P)
distance_polymer_w_ring.append(distance_i)
ring_count=ring_count+acceptance_count
ring_iterations=ring_iterations+ring_its
polymer_no=polymer_no+1
# record the simulation time
end = time.time()
file.write('Runtime for adding the rings : {:.2f} minutes \n'.format((end-start)/60))
print('Runtime for adding the rings : {:.2f} minutes \n'.format((end-start)/60))
# compute the metrics from the sum of counts
counts_w_ring_sum = np.sum(np.array(counts_population_w_ring), axis = 0)
metrics_w_ring_to_match = ut.counts_to_metrics(counts_w_ring_sum, additional=self.additional)
# add population metrics to match
if self.population_metrics is not None:
MW_w_ring_average = ut.MW_array_to_number_average(np.array(MW_population_w_ring))
MW_w_ring_weight_average = ut.MW_array_to_weight_average(np.array(MW_population_w_ring))
metrics_w_ring_to_match = np.append(metrics_w_ring_to_match, [MW_w_ring_average/self.max_MW, MW_w_ring_weight_average/self.max_MW])
d_w_ring_average = ut.cal_distance(metrics_target_to_match, metrics_w_ring_to_match, self.metrics_weights)
delta_d = d_w_ring_average - d_average
distance_w_ring = d_w_ring_average
if delta_d <= 0:
P_population = P_population_w_ring
counts_population = counts_population_w_ring
monomer_count_population = monomer_count_population_w_ring
MW_population = MW_population_w_ring
metrics_population = metrics_population_w_ring
distance_final = d_average
# assign trajectory properties to self
start = time.time()
self.distance = distance
self.distance_polymer = distance_polymer
self.distance_w_ring = distance_w_ring
self.distance_polymer_w_ring = distance_polymer_w_ring
self.distance_final = distance_final
# Characterize the entire population
# Average the population
MW_population = np.expand_dims(MW_population, 1)
monomer_count_population = np.expand_dims(monomer_count_population, 1)
metrics_population = np.array(metrics_population)
# Save population data to csv files
self.P_population = P_population
if (self.trial_index==None):
population = ch.Population(P_population, self.library_name, ResultsName=self.ResultsName)
else:
population = ch.Population(P_population, self.library_name, ResultsName=self.ResultsName, TrialIndex=str(self.trial_index))
population.analyze()
# the metrics average including branching | |
fixed length experiments but
are inapplicable to long-running services except in the offline
scenario
- [ ] Using knowledge of the LoadGen implementation to predict upcoming
lulls or spikes in the server scenario
- [ ] Treating beams in a beam search differently. For example,
employing different precision for different beams
- [ ] Changing the number of beams per beam search relative to the reference
- [ ] Incorporating explicit statistical information about the performance or accuracy sets
- [ ] Techniques that take advantage of upsampled images.
- [ ] Techniques that only improve performance when there are identical samples in a query.
- [x] None of the above
Is the submission congruent with all relevant MLPerf rules?
- [x] Yes
- [ ] No
For each SUT, does the submission accurately reflect the real-world
performance of the SUT?
- [x] Yes
- [ ] No"""
def get_checklist(checklist_template=checklist_template, name='<NAME>', email='<EMAIL>',
system='rpi4-tflite-v1.15', system_name='Raspberry Pi 4 (rpi4)', revision='61220457de',
division='closed', category='available', task='image-classification', benchmark='mobilenet', scenario='singlestream',
performance_sample_count=1024, performance_sample_count_met=True,
accuracy_pc=12.345, accuracy_met=True, numerics='fp32'):
def tick(var): return "x" if var else " "
print("=" * 100)
print(system)
print("=" * 100)
revision_other = revision not in [ '61220457de', '5684c11e39', '55c0ea4e77', 'd31c18fbd9', '1d0e06e54a' ]
benchmark_other = benchmark not in [ 'mobilenet', 'resnet']
if benchmark=='mobilenet':
accuracy_met = accuracy_pc >= 71.676*0.98
elif benchmark=='resnet':
accuracy_met = accuracy_pc >= 76.456*0.99
else:
accuracy_met = accuracy_met and accuracy_pc > 0
checklist = checklist_template % {
"name" : name,
"email" : email,
"system_name": system_name,
# Division.
"closed" : tick(division=='closed'),
"open" : tick(division=='open'),
# Division.
"category_available" : tick(category.lower()=='available'),
"category_preview" : tick(category.lower()=='preview'),
"category_rdi" : tick(category.lower()=='rdi'),
# Benchmark.
"benchmark_mobilenet": tick(benchmark=='mobilenet'),
"benchmark_resnet": tick(benchmark=='resnet'),
"benchmark_other": tick(benchmark_other),
"benchmark_other_specify": benchmark if benchmark_other else '',
# Table.
"system" : system,
"benchmark" : benchmark,
"query_count": 50000 if task=='image-classification' else 5000,
"accuracy_pc" : "%.3f" % accuracy_pc,
# Scenario.
"scenario_singlestream": tick(scenario=='singlestream'),
"scenario_multistream": tick(scenario=='multistream'),
"scenario_server": tick(scenario=='server'),
"scenario_offline": tick(scenario=='offline'),
# Accuracy.
"mobilenet_accuracy_met" : tick(benchmark=='mobilenet' and accuracy_met),
"resnet_accuracy_met" : tick(benchmark=='resnet' and accuracy_met),
"accuracy_not_met" : tick(not accuracy_met),
# "How many samples are loaded into the QSL in performance mode?"
"performance_sample_count": performance_sample_count,
"performance_sample_count_1024": tick(performance_sample_count==1024),
"performance_sample_count_256": tick(performance_sample_count==256),
"performance_sample_count_64": tick(performance_sample_count==64),
"performance_sample_count_not_met": tick(not performance_sample_count_met), # TODO
# LoadGen revision.
"revision_61220457de": tick(revision=='61220457de'),
"revision_5684c11e39": tick(revision=='5684c11e39'),
"revision_55c0ea4e77": tick(revision=='55c0ea4e77'),
"revision_d31c18fbd9": tick(revision=='d31c18fbd9'),
"revision_1d0e06e54a": tick(revision=='1d0e06e54a'),
"revision_other": tick(revision_other),
"revision_other_specify": revision if revision_other else '',
# Numerics.
"numerics_uint8": tick(numerics=='uint8'),
"numerics_fp32": tick(numerics=='fp32'),
}
print(checklist)
print("-" * 100)
return checklist
# null = get_checklist(system='rpi4-armnn-v19.08-neon', system_name='Raspberry Pi 4 (rpi4)', benchmark='mobilenet', accuracy_pc=70.241, numerics='uint8')
# null = get_checklist(system='hikey960-tflite-v1.15', system_name='Linaro HiKey 960 (hikey960)', benchmark='resnet', accuracy_pc=75.692, revision='deadbeef')
null = get_checklist(system='velociti-tensorflow-v1.14-cpu', name='<NAME>; <NAME>', email='<EMAIL>; <EMAIL>', system_name='HP Z640 G1X62EA workstation (velociti)', division='open', category='RDI', benchmark='ssd-mobilenet-fpn')
# <a id="check"></a>
# ## Check the experimental data
# In[ ]:
#
# Image Classification - Closed (MobileNet, ResNet).
#
repos_image_classification_closed = [
# firefly
'mlperf.closed.image-classification.firefly.tflite-v1.15', # https://github.com/mlperf/submissions_inference_0_5/pull/18
'mlperf.closed.image-classification.firefly.armnn-v19.08.neon', # https://github.com/mlperf/submissions_inference_0_5/pull/21
'mlperf.closed.image-classification.firefly.armnn-v19.08.opencl', #https://github.com/mlperf/submissions_inference_0_5/pull/22
# hikey960
'mlperf.closed.image-classification.hikey960.tflite-v1.15', # https://github.com/mlperf/submissions_inference_0_5/pull/23
'mlperf.closed.image-classification.hikey960.armnn-v19.08.neon', # https://github.com/mlperf/submissions_inference_0_5/pull/24
'mlperf.closed.image-classification.hikey960.armnn-v19.08.opencl', # https://github.com/mlperf/submissions_inference_0_5/pull/25
# rpi4
'mlperf.closed.image-classification.rpi4.tflite-v1.15', # https://github.com/mlperf/submissions_inference_0_5/pull/26/
'mlperf.closed.image-classification.rpi4.armnn-v19.08.neon', # https://github.com/mlperf/submissions_inference_0_5/pull/30
# mate10pro
'mlperf.closed.image-classification.mate10pro.armnn-v19.08.neon', # https://github.com/mlperf/submissions_inference_0_5/pull/32
'mlperf.closed.image-classification.mate10pro.armnn-v19.08.opencl', # https://github.com/mlperf/submissions_inference_0_5/pull/35
]
repos_image_classification_closed_audit = [
'mlperf.closed.image-classification.firefly.audit', # https://github.com/mlperf/submissions_inference_0_5/pull/234
'mlperf.closed.image-classification.hikey960.audit', # https://github.com/mlperf/submissions_inference_0_5/pull/236
'mlperf.closed.image-classification.rpi4.audit', # https://github.com/mlperf/submissions_inference_0_5/pull/238
#'mlperf.closed.image-classification.mate10pro.audit',
]
#
# Image Classification - Open (MobileNets-v1,v2).
#
repos_image_classification_open = [
# firefly
'mlperf.open.image-classification.firefly.tflite-v1.15', # https://github.com/mlperf/submissions_inference_0_5/pull/39
'mlperf.open.image-classification.firefly.tflite-v1.15.mobilenet-v1-quantized', # https://github.com/mlperf/submissions_inference_0_5/pull/127
'mlperf.open.image-classification.firefly.armnn-v19.08.opencl', # https://github.com/mlperf/submissions_inference_0_5/pull/40
'mlperf.open.image-classification.firefly.armnn-v19.08.neon', # https://github.com/mlperf/submissions_inference_0_5/pull/120
# hikey960
'mlperf.open.image-classification.hikey960.tflite-v1.15', # https://github.com/mlperf/submissions_inference_0_5/pull/37
'mlperf.open.image-classification.hikey960.tflite-v1.15.mobilenet-v1-quantized', # https://github.com/mlperf/submissions_inference_0_5/pull/128
'mlperf.open.image-classification.hikey960.armnn-v19.08.opencl', # https://github.com/mlperf/submissions_inference_0_5/pull/38
'mlperf.open.image-classification.hikey960.armnn-v19.08.neon', # https://github.com/mlperf/submissions_inference_0_5/pull/121
# rpi4
'mlperf.open.image-classification.rpi4.tflite-v1.15', # https://github.com/mlperf/submissions_inference_0_5/pull/122
'mlperf.open.image-classification.rpi4.tflite-v1.15.mobilenet-v1-quantized', # https://github.com/mlperf/submissions_inference_0_5/pull/129
'mlperf.open.image-classification.rpi4.armnn-v19.08.neon', # https://github.com/mlperf/submissions_inference_0_5/pull/123
# mate10pro
'mlperf.open.image-classification.mate10pro.tflite-v1.13.mobilenet', # https://github.com/mlperf/submissions_inference_0_5/pull/130
'mlperf.open.image-classification.mate10pro.tflite-v1.13.mobilenet-v1-quantized', # https://github.com/mlperf/submissions_inference_0_5/pull/135
]
repos_image_classification_open_audit = [
'mlperf.open.image-classification.firefly.audit', # https://github.com/mlperf/submissions_inference_0_5/pull/255
'mlperf.open.image-classification.hikey960.audit', # https://github.com/mlperf/submissions_inference_0_5/pull/257
'mlperf.open.image-classification.rpi4.audit', # https://github.com/mlperf/submissions_inference_0_5/pull/258
#'mlperf.open.image-classification.mate10pro.audit',
]
#
# Object Detection - Open (TensorFlow Model Zoo + YOLO-v3)
#
repos_object_detection_open = [
# velociti
'mlperf.open.object-detection.velociti', # https://www.dropbox.com/s/wiea3a8zf077jsv/mlperf.open.object-detection.velociti.zip
]
# In[ ]:
# repos_for_testing = [
# 'mlperf.closed.image-classification.mate10pro.tflite-v1.13.mobilenet.BAD_LOADGEN',
# 'mlperf.closed.image-classification.mate10pro.armnn-v19.08.opencl.BAD_RESNET',
# 'mlperf.closed.image-classification.mate10pro.armnn-v19.08.neon.BAD_RESNET',
# 'mlperf-inference-vision-experiments-count5'
# ]
# In[ ]:
# #!ck recache repo
# for repo_uoa in repos:
# print("=" * 100)
# print(repo_uoa)
# print("=" * 100)
# !ck list $repo_uoa:experiment:* | sort
# print("-" * 100)
# print("")
# In[ ]:
upstream_path=os.environ.get('CK_ENV_MLPERF_INFERENCE','')
# In[ ]:
root_dir=os.environ.get('CK_MLPERF_SUBMISSION_ROOT','')
def check_experimental_results(repo_uoa, module_uoa='experiment', tags='mlperf', submitter='dividiti', path=None, audit=False):
if not os.path.exists(root_dir): os.mkdir(root_dir)
print("Storing results under '%s'" % root_dir)
r = ck.access({'action':'search', 'repo_uoa':repo_uoa, 'module_uoa':module_uoa, 'tags':tags})
if r['return']>0:
print('Error: %s' % r['error'])
exit(1)
experiments = r['lst']
for experiment in experiments:
data_uoa = experiment['data_uoa']
r = ck.access({'action':'list_points', 'repo_uoa':repo_uoa, 'module_uoa':module_uoa, 'data_uoa':data_uoa})
if r['return']>0:
print('Error: %s' % r['error'])
exit(1)
experiment_tags = r['dict']['tags']
experiment_points = r['points']
experiment_path = r['path']
# Load pipeline to determine the original program_name
load_pipeline_adict = { 'action': 'load_pipeline',
'repo_uoa': repo_uoa,
'module_uoa': module_uoa,
'data_uoa': data_uoa,
}
r=ck.access( load_pipeline_adict )
if r['return']>0:
print('Error: %s' % r['error'])
exit(1)
pipeline = r['pipeline']
program_name = pipeline['choices']['data_uoa']
print("*" * 100)
division=task=platform=library=inference_engine=backend=benchmark=scenario=mode=preprocessing=test=notes = ''
for atag in experiment_tags:
if '.' in atag: # Expected format: attribute1.value1 , attribute2.value2 , etc - in any order
# Example: "division.open", "submitter.dividiti", "task.image-classification", "platform.xavier",
# "inference_engine.tflite", "inference_engine_version.v2.1.1", "inference_engine_backend.dummy",
# "workload.mobilenet-v2-1.4-224", "scenario.singlestream", "mode.performance"
(attribute, value) = atag.split('.', 1) # protection from dotted version notation!
if attribute == 'division':
division = value
elif attribute == 'task':
task = value
elif attribute == 'platform':
platform = value
elif attribute == 'inference_engine':
inference_engine = value
elif attribute == 'inference_engine_version':
inference_engine_version = value
elif attribute == 'inference_engine_backend':
backend = value if value!='dummy' else ''
elif attribute == 'workload': # actually, the model!
benchmark = value
elif attribute == 'scenario':
scenario = value
elif attribute == 'mode':
mode = value
if division and task and platform and inference_engine and benchmark and scenario and mode:
library = inference_engine + (('-' + inference_engine_version) if inference_engine_version else '')
elif 'velociti' in experiment_tags:
# Expected format: [ "mlperf", "open", "object-detection", "velociti", "cpu", "rcnn-inception-resnet-v2-lowproposals", "singlestream", "accuracy" ]
(_, division, task, platform, backend, benchmark, scenario, mode) = experiment_tags
if task == 'object-detection':
library = 'tensorflow-v1.14'
else:
library = 'tensorrt-v6.0'
backend = ''
notes = '======= DEMO ======='
elif 'accuracy' in experiment_tags:
# FIXME: With the benefit of hindsight, [ ..., "armnn-v19.08", "neon", ... ] should have come
# as one tag "armnn-v19.08-neon", since we join them in this notebook anyway.
if 'neon' in experiment_tags or 'opencl' in experiment_tags:
# Expected format: [ "mlperf", "open", "image-classification", "firefly", "armnn-v19.08", "neon", "mobilenet-v1-0.5-128", "singlestream", "accuracy", "using-opencv" ]
(_, division, task, platform, library, backend, benchmark, scenario, mode, preprocessing) = experiment_tags
else:
# Expected format: [ "mlperf", "open", "image-classification", "firefly", "tflite-v1.15", "mobilenet-v1-0.5-128", "singlestream", "accuracy", "using-opencv" ]
(_, division, task, platform, library, benchmark, scenario, mode, preprocessing) = experiment_tags
elif 'performance' in experiment_tags:
if 'neon' in experiment_tags or 'opencl' in experiment_tags:
# Expected format: [ "mlperf", "open", "image-classification", "firefly", "armnn-v19.08", "neon", "mobilenet-v1-0.5-128", "singlestream", "performance" ]
(_, division, task, platform, library, backend, benchmark, scenario, mode) = experiment_tags
else:
# Expected format: [ "mlperf", "open", "image-classification", "firefly", "tflite-v1.15", "mobilenet-v1-0.5-128", "singlestream", "performance" ]
(_, division, task, platform, library, benchmark, scenario, mode) = experiment_tags
elif 'audit' in experiment_tags: # As accuracy but with the test name instead of the preprocessing method.
if 'neon' in experiment_tags or 'opencl' in experiment_tags:
# Expected format: [ "mlperf", "open", "image-classification", "firefly", "armnn-v19.08", "neon", "mobilenet-v1-0.5-128", "singlestream", "audit", "TEST03" ]
(_, division, task, platform, library, backend, benchmark, scenario, mode, test) = experiment_tags
else:
# Expected format: [ "mlperf", "open", "image-classification", "firefly", "tflite-v1.15", "mobilenet-v1-0.5-128", "singlestream", "audit", "TEST03" ]
(_, division, task, platform, library, benchmark, scenario, mode, test) = experiment_tags
else:
raise Exception("Expected 'accuracy' or 'performance' or 'audit' in experiment_tags!")
# if mode == 'accuracy': continue
organization = submitter
if not inference_engine:
(inference_engine, inference_engine_version) = library.split('-')
if backend != '':
system = platform+'-'+library+'-'+backend
else:
system = platform+'-'+library
division_system = division+'-'+system
program_and_model_combination = program_name+'-'+benchmark
#
# Directory structure according to the Inference section of the General MLPerf Submission Rules:
# https://github.com/mlperf/policies/blob/master/submission_rules.adoc#552-inference
#
# <division>/
# <organization>/
#
division_dir = os.path.join(root_dir, division)
if not os.path.exists(division_dir): os.mkdir(division_dir)
organization_dir = os.path.join(division_dir, organization)
if not os.path.exists(organization_dir): os.mkdir(organization_dir)
#
# "systems"/
# <system_desc_id>.json
#
systems_dir = os.path.join(organization_dir, 'systems')
if not os.path.exists(systems_dir): os.mkdir(systems_dir)
system_json_name = '%s.json' % system
system_json_path = os.path.join(systems_dir, system_json_name)
system_json = dump_system_description_dictionary(system_json_path, division, platform, inference_engine, inference_engine_version, backend)
print('%s' % systems_dir)
print(' |_ %s [%s]' % (system_json_name, division_system))
#
# "code"/
# <benchmark_name_per_reference>/
# <implementation_id>/
# <Code interface with loadgen and other arbitrary stuff>
#
code_dir = os.path.join(organization_dir, 'code')
if not os.path.exists(code_dir): os.mkdir(code_dir)
# FIXME: For now, not always "per reference".
benchmark_dir = os.path.join(code_dir, benchmark)
if not os.path.exists(benchmark_dir): os.mkdir(benchmark_dir)
implementation_dir = os.path.join(benchmark_dir, program_name)
if not os.path.exists(implementation_dir): os.mkdir(implementation_dir)
print('%s' % | |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Various classes representing distributed inputs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import sys
import six
from tensorflow.python import tf2
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.experimental.ops import cardinality
from tensorflow.python.data.experimental.ops import distribute
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.ops import multi_device_iterator_ops
from tensorflow.python.data.ops import optional_ops
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribute_utils
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.distribute import input_ops
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import values
from tensorflow.python.distribute.distribute_lib import InputReplicationMode
from tensorflow.python.eager import context
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device as tf_device
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.types import distribute as distribute_types
from tensorflow.python.util import nest
from tensorflow.python.util.compat import collections_abc
from tensorflow.python.util.deprecation import deprecated
from tensorflow.python.util.tf_export import tf_export
from tensorflow.tools.docs import doc_controls
def get_distributed_dataset(dataset,
input_workers,
strategy,
num_replicas_in_sync=None,
input_context=None):
"""Returns a distributed dataset from the given tf.data.Dataset instance.
This is a common function that is used by all strategies to return a
distributed dataset. The distributed dataset instance returned is different
depending on if we are in a TF 1 or TF 2 context. The distributed dataset
instances returned differ from each other in the APIs supported by each of
them.
Args:
dataset: a tf.data.Dataset instance.
input_workers: an InputWorkers object which specifies devices on which
iterators should be created.
strategy: a `tf.distribute.Strategy` object, used to run all-reduce to
handle last partial batch.
num_replicas_in_sync: Optional integer. If this is not None, the value is
used to decide how to rebatch datasets into smaller batches so that
the total batch size for each step (across all workers and replicas)
adds up to `dataset`'s batch size.
input_context: `InputContext` for sharding. Only pass this in for between
graph multi-worker cases where there is only one `input_worker`. In
these cases, we will shard based on the `input_pipeline_id` and
`num_input_pipelines` in the `InputContext`.
Returns:
A distributed dataset instance.
"""
if tf2.enabled():
return DistributedDataset(
dataset,
input_workers,
strategy,
num_replicas_in_sync=num_replicas_in_sync,
input_context=input_context)
else:
return DistributedDatasetV1(
dataset,
input_workers,
strategy,
num_replicas_in_sync=num_replicas_in_sync,
input_context=input_context)
def get_distributed_datasets_from_function(dataset_fn,
input_workers,
input_contexts,
strategy,
options=None):
"""Returns a distributed dataset from the given input function.
This is a common function that is used by all strategies to return a
distributed dataset. The distributed dataset instance returned is different
depending on if we are in a TF 1 or TF 2 context. The distributed dataset
instances returned differ from each other in the APIs supported by each of
them.
Args:
dataset_fn: a function that returns a tf.data.Dataset instance.
input_workers: an InputWorkers object which specifies devices on which
iterators should be created.
input_contexts: A list of `InputContext` instances to be passed to call(s)
to `dataset_fn`. Length and order should match worker order in
`worker_device_pairs`.
strategy: a `tf.distribute.Strategy` object, used to run all-reduce to
handle last partial batch.
options: Default is None. `tf.distribute.InputOptions` used to control
options on how this dataset is distributed.
Returns:
A distributed dataset instance.
Raises:
ValueError: if `options.experimental_replication_mode` and
`options.experimental_place_dataset_on_device` are not consistent
"""
if (options is not None and
options.experimental_replication_mode != InputReplicationMode.PER_REPLICA
and options.experimental_place_dataset_on_device):
raise ValueError(
"When `experimental_place_dataset_on_device` is set for dataset "
"placement, you must also specify `PER_REPLICA` for the "
"replication mode")
if (options is not None and
options.experimental_replication_mode == InputReplicationMode.PER_REPLICA
and options.experimental_prefetch_to_device and
options.experimental_place_dataset_on_device):
raise ValueError(
"`experimental_place_dataset_on_device` can not be set to True "
"when experimental_prefetch_to_device is True and "
"replication mode is set to `PER_REPLICA`")
if tf2.enabled():
return DistributedDatasetsFromFunction(dataset_fn, input_workers,
input_contexts, strategy, options)
else:
return DistributedDatasetsFromFunctionV1(
dataset_fn,
input_workers,
input_contexts,
strategy,
options)
@tf_export("distribute.DistributedIterator", v1=[])
class DistributedIteratorInterface(collections_abc.Iterator,
distribute_types.Iterator):
"""An iterator over `tf.distribute.DistributedDataset`.
`tf.distribute.DistributedIterator` is the primary mechanism for enumerating
elements of a `tf.distribute.DistributedDataset`. It supports the Python
Iterator protocol, which means it can be iterated over using a for-loop or by
fetching individual elements explicitly via `get_next()`.
You can create a `tf.distribute.DistributedIterator` by calling `iter` on
a `tf.distribute.DistributedDataset` or creating a python loop over a
`tf.distribute.DistributedDataset`.
Visit the [tutorial](https://www.tensorflow.org/tutorials/distribute/input)
on distributed input for more examples and caveats.
"""
def get_next(self):
"""Returns the next input from the iterator for all replicas.
Example use:
>>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"])
>>> dataset = tf.data.Dataset.range(100).batch(2)
>>> dist_dataset = strategy.experimental_distribute_dataset(dataset)
>>> dist_dataset_iterator = iter(dist_dataset)
>>> @tf.function
... def one_step(input):
... return input
>>> step_num = 5
>>> for _ in range(step_num):
... strategy.run(one_step, args=(dist_dataset_iterator.get_next(),))
>>> strategy.experimental_local_results(dist_dataset_iterator.get_next())
(<tf.Tensor: shape=(1,), dtype=int64, numpy=array([10])>,
<tf.Tensor: shape=(1,), dtype=int64, numpy=array([11])>)
Returns:
A single `tf.Tensor` or a `tf.distribute.DistributedValues` which contains
the next input for all replicas.
Raises:
`tf.errors.OutOfRangeError`: If the end of the iterator has been reached.
"""
raise NotImplementedError(
"DistributedIterator.get_next() must be implemented in descendants.")
@property
def element_spec(self):
# pylint: disable=line-too-long
"""The type specification of an element of `tf.distribute.DistributedIterator`.
Example usage:
>>> global_batch_size = 16
>>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"])
>>> dataset = tf.data.Dataset.from_tensors(([1.],[2])).repeat(100).batch(global_batch_size)
>>> distributed_iterator = iter(strategy.experimental_distribute_dataset(dataset))
>>> distributed_iterator.element_spec
(PerReplicaSpec(TensorSpec(shape=(None, 1), dtype=tf.float32, name=None),
TensorSpec(shape=(None, 1), dtype=tf.float32, name=None)),
PerReplicaSpec(TensorSpec(shape=(None, 1), dtype=tf.int32, name=None),
TensorSpec(shape=(None, 1), dtype=tf.int32, name=None)))
Returns:
A nested structure of `tf.TypeSpec` objects matching the structure of an
element of this `tf.distribute.DistributedIterator`. This returned value
is typically a `tf.distribute.DistributedValues` object and specifies the
`tf.TensorSpec` of individual components.
"""
raise NotImplementedError(
"DistributedIterator.element_spec() must be implemented in descendants")
def get_next_as_optional(self):
# pylint: disable=line-too-long
"""Returns a `tf.experimental.Optional` that contains the next value for all replicas.
If the `tf.distribute.DistributedIterator` has reached the end of the
sequence, the returned `tf.experimental.Optional` will have no value.
Example usage:
>>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"])
>>> global_batch_size = 2
>>> steps_per_loop = 2
>>> dataset = tf.data.Dataset.range(10).batch(global_batch_size)
>>> distributed_iterator = iter(
... strategy.experimental_distribute_dataset(dataset))
>>> def step_fn(x):
... # train the model with inputs
... return x
>>> @tf.function
... def train_fn(distributed_iterator):
... for _ in tf.range(steps_per_loop):
... optional_data = distributed_iterator.get_next_as_optional()
... if not optional_data.has_value():
... break
... per_replica_results = strategy.run(step_fn, args=(optional_data.get_value(),))
... tf.print(strategy.experimental_local_results(per_replica_results))
>>> train_fn(distributed_iterator)
... # ([0 1], [2 3])
... # ([4], [])
Returns:
An `tf.experimental.Optional` object representing the next value from the
`tf.distribute.DistributedIterator` (if it has one) or no value.
"""
# pylint: enable=line-too-long
raise NotImplementedError(
"get_next_as_optional() not implemented in descendants")
@tf_export("distribute.DistributedDataset", v1=[])
class DistributedDatasetInterface(collections_abc.Iterable,
distribute_types.Iterable):
# pylint: disable=line-too-long
"""Represents a dataset distributed among devices and machines.
A `tf.distribute.DistributedDataset` could be thought of as a "distributed"
dataset. When you use `tf.distribute` API to scale training to multiple
devices or machines, you also need to distribute the input data, which leads
to a `tf.distribute.DistributedDataset` instance, instead of a
`tf.data.Dataset` instance in the non-distributed case. In TF 2.x,
`tf.distribute.DistributedDataset` objects are Python iterables.
Note: `tf.distribute.DistributedDataset` instances are *not* of type
`tf.data.Dataset`. It only supports two usages we will mention below:
iteration and `element_spec`. We don't support any other APIs to transform or
inspect the dataset.
There are two APIs to create a `tf.distribute.DistributedDataset` object:
`tf.distribute.Strategy.experimental_distribute_dataset(dataset)`and
`tf.distribute.Strategy.distribute_datasets_from_function(dataset_fn)`.
*When to use which?* When you have a `tf.data.Dataset` instance, and the
regular batch splitting (i.e. re-batch the input `tf.data.Dataset` instance
with a new batch size that is equal to the global batch size divided by the
number of replicas in sync) and autosharding (i.e. the
`tf.data.experimental.AutoShardPolicy` options) work for you, use the former
API. Otherwise, if you are *not* using a canonical `tf.data.Dataset` instance,
or you would like to customize the batch splitting or sharding, you can wrap
these logic in a `dataset_fn` and use the latter API. Both API handles
prefetch to device for the user. For more details and examples, follow the
links to the APIs.
There are two main usages of a `DistributedDataset` object:
1. Iterate over it | |
"""
This script serves to do recurrence analysis on the sv-gene pairs identified
We do the following things:
1. From the top 100 of each SV type (so top 400), which genes are there? Which are the top 15 most recurrent?
2. For these genes, also check which other mutations are found in these genes in different patients.
3. Then also check which genes are recurrent if we ignore the top 100, and just look across all positive SV-gne pairs.
"""
import sys
import numpy as np
import random
from scipy import stats
from statsmodels.sandbox.stats.multicomp import multipletests
import matplotlib.pyplot as plt
import os
import matplotlib
matplotlib.use('Agg')
outDir = sys.argv[1]
finalOutDir = outDir + '/figure4/'
if not os.path.exists(finalOutDir):
os.makedirs(finalOutDir)
finalOutDirFullFigure = outDir + '/figS5/'
if not os.path.exists(finalOutDirFullFigure):
os.makedirs(finalOutDirFullFigure)
#load the sv-gene pairs
positivePairs = np.loadtxt(outDir + '/linkedSVGenePairs/nonCoding_geneSVPairs.txt_pathogenicPairsFeatures.txt', dtype='object')
print(positivePairs.shape)
topPairs = dict()
topPairGenes = dict() #all unique genes
svTypes = ['DEL', 'DUP', 'INV', 'ITX']
for svType in svTypes:
svPairs = np.loadtxt(outDir + '/featureImportance/pairLabels_top100_' + svType + '.txt', dtype='object')
rankedPairs = []
ind = len(svPairs)
for pair in svPairs:
splitPair = pair.split('_')
topPairGenes[splitPair[0]] = 0
rankedPairs.append([pair, ind])
ind -= 1
topPairs[svType] = rankedPairs
degPairs = np.loadtxt(outDir + '/tadDisruptionsZScores/zScores.txt', dtype='object')
#format: gene as key, as values, the first is the number of times we found the gene,
#the second how many were dels, then dups, invs, itx.
splitPairs = dict()
genes = dict()
for pair in positivePairs: #get stats for all pairs
splitPair = pair[0].split('_')
if splitPair[0] + '_' + splitPair[7] not in splitPairs:
splitPairs[splitPair[0] + '_' + splitPair[7]] = []
splitPairs[splitPair[0] + '_' + splitPair[7]].append(pair)
if splitPair[0] not in genes:
#count, cross-patient count, nc: del, dup, inv, itx, snv, cnv amp, cnv del, sv del, sv dup, sv inv, sv itx
genes[splitPair[0]] = [0]*13
genes[splitPair[0]].append([]) #add the patient names here
genes[splitPair[0]].append(0) #negative dels
genes[splitPair[0]].append(0) #negative dups
genes[splitPair[0]].append(0) #negative invs
genes[splitPair[0]].append(0) #negative itx
genes[splitPair[0]][0] += 1
if splitPair[12] == 'DEL':
genes[splitPair[0]][1] += 1
elif splitPair[12] == 'DUP':
genes[splitPair[0]][2] += 1
elif splitPair[12] == 'INV':
genes[splitPair[0]][3] += 1
elif splitPair[12] == 'ITX':
genes[splitPair[0]][4] += 1
patient = splitPair[7]
genes[splitPair[0]][13].append(patient)
#convert to numpy array for easy ranking
recurrentGenes = []
for gene in genes:
#count how many unique patients affect the gene for recurrence
uniquePatients = np.unique(genes[gene][13])
data = [gene] + [len(uniquePatients)] + genes[gene]
recurrentGenes.append(data)
recurrentGenes = np.array(recurrentGenes, dtype='object')
#sort
sortedGenes = recurrentGenes[np.argsort(recurrentGenes[:,1])[::-1]]
sortedGenesTop = []
for gene in sortedGenes:
if gene[0] not in topPairGenes:
continue
sortedGenesTop.append(gene)
sortedGenesTop = np.array(sortedGenesTop, dtype='object')
#make a matrix in which we show visually which genes are affected in which patients
#this matrix is genes x patients
uniquePatients = dict()
top = 15 #making the matrix only for the top X genes
ind = 0
for gene in sortedGenesTop:
if ind >= top:
continue
patients = gene[15]
for patient in patients:
if patient not in uniquePatients:
uniquePatients[patient] = 0
uniquePatients[patient] += 1
ind += 1
#make a matrix of genes by patients
recurrenceMatrix = np.zeros([top, len(uniquePatients)])
ind = 0
patientOrder = dict() #order of patients in the matrix
for patientInd in range(0, len(uniquePatients)):
patient = list(uniquePatients.keys())[patientInd]
patientOrder[patient] = patientInd
for gene in sortedGenesTop:
if ind >= top:
continue
patients = gene[15]
for patient in patients:
patientInd = patientOrder[patient]
recurrenceMatrix[ind, patientInd] += 1
ind += 1
print(recurrenceMatrix)
#make a grid plot, showing the different SV types that the patients have
#color the genes with -/+ direction, see if it correlates with the SV types.
fig, ax = plt.subplots()
for row in range(0, recurrenceMatrix.shape[0]):
if row < recurrenceMatrix.shape[0]-1:
ax.axhline(row+0.5, linestyle='--', color='k', linewidth=0.5)
for col in range(0, recurrenceMatrix.shape[1]):
if col < recurrenceMatrix.shape[1]-1:
ax.axvline(col+0.5, linestyle='--', color='k', linewidth=0.5)
if recurrenceMatrix[row,col] > 0:
#get the sv type to see which symbol to assign
gene = sortedGenesTop[row, 0]
patient = list(uniquePatients.keys())[col]
pairs = splitPairs[gene + '_' + patient]
#generate some random offsets to avoid overlapping data
offsetsX = random.sample(range(-30,30), len(pairs))
offsetsX = [i / float(100) for i in offsetsX]
offsetsY = random.sample(range(-30,30), len(pairs))
offsetsY = [i / float(100) for i in offsetsY]
ind = 0
for pair in pairs:
splitPair = pair[0].split('_')
svType = splitPair[12]
markerType = '.'
if svType == 'DEL':
markerType = '.'
elif svType == 'DUP':
markerType = 's'
elif svType == 'INV':
markerType = '^'
elif svType == 'ITX':
markerType = '*'
#also get up/down color
if patient + '_' + gene in degPairs[:,0]:
#get the z-score of the pair.
degPairInfo = degPairs[degPairs[:,0] == patient + '_' + gene][0]
color = 'red'
if float(degPairInfo[5]) > 1.5:
color = 'red'
elif float(degPairInfo[5]) < -1.5:
color = 'blue'
else:
color = 'grey'
else:
continue #this is a pair with likely coding mutations, skip it
plt.scatter(col + offsetsY[ind], offsetsX[ind] + (recurrenceMatrix.shape[0] - row -1), marker=markerType, edgecolor=color,
facecolor='none', s=35)
ind += 1
#the genes are swapped around to show the most recurrent on top, so reverse thelabels as well
plt.yticks(range(0, recurrenceMatrix.shape[0]), sortedGenesTop[0:top,0][::-1])
plt.xticks(range(0, recurrenceMatrix.shape[1]), list(uniquePatients.keys()), rotation=90)
#plt.grid()
plt.tight_layout()
plt.savefig(finalOutDir + '/recurrence_top400.svg')
plt.clf()
#Next, we are interested in patients with alternative mutations.
#So here, for each gene, first show how many patients have an SNV, CNV, or SV
#keep in mind that a duplication could be non-coding if it is in the same patient
#this will later become obvious in the visualization
#load the patient-gene mutation pairs
mutationDir = outDir + '/patientGeneMutationPairs/'
snvPatients = np.load(mutationDir + 'snvPatients.npy', allow_pickle=True, encoding='latin1').item()
svPatientsDel = np.load(mutationDir + 'svPatientsDel.npy', allow_pickle=True, encoding='latin1').item()
svPatientsDup = np.load(mutationDir + 'svPatientsDup.npy', allow_pickle=True, encoding='latin1').item()
svPatientsInv = np.load(mutationDir + 'svPatientsInv.npy', allow_pickle=True, encoding='latin1').item()
svPatientsItx = np.load(mutationDir + 'svPatientsItx.npy', allow_pickle=True, encoding='latin1').item()
cnvPatientsDel = np.load(mutationDir + 'cnvPatientsDel.npy', allow_pickle=True, encoding='latin1').item()
cnvPatientsAmp = np.load(mutationDir + 'cnvPatientsAmp.npy', allow_pickle=True, encoding='latin1').item()
#also show the non-coding SVs that do not lead to expression changes
allPairs = np.loadtxt(outDir + '/linkedSVGenePairs/nonCoding_geneSVPairs.txt_', dtype='object')
for pair in allPairs:
splitPair = pair[0].split('_')
gene = splitPair[0]
patient = splitPair[7]
sortedGeneInd = np.where(sortedGenes[:,0] == gene)[0]
if gene in snvPatients[patient]:
sortedGenes[sortedGeneInd, 5] += 1
if gene in cnvPatientsDel[patient]:
sortedGenes[sortedGeneInd, 6] += 1
if gene in cnvPatientsAmp[patient]:
sortedGenes[sortedGeneInd, 7] += 1
if gene in svPatientsDel[patient]:
sortedGenes[sortedGeneInd, 8] += 1
if gene in svPatientsDup[patient]:
sortedGenes[sortedGeneInd, 9] += 1
if gene in svPatientsInv[patient]:
sortedGenes[sortedGeneInd, 10] += 1
if gene in svPatientsItx[patient]:
sortedGenes[sortedGeneInd, 11] += 1
#for the current pair, only add it if it is not in the positive set.
if pair[0] not in positivePairs[:,0]:
#then check the type of SV, and add it to the right gene.
svType = splitPair[12]
if svType == 'DEL':
sortedGenes[sortedGeneInd, 16] += 1
elif svType == 'DUP':
sortedGenes[sortedGeneInd, 17] += 1
elif svType == 'INV':
sortedGenes[sortedGeneInd, 18] += 1
elif svType == 'ITX':
sortedGenes[sortedGeneInd, 19] += 1
print(sortedGenesTop[0:15,:])
#show these data in a bar plot.
#for each type of mutation, add to the stacked bar chart.
#fig,ax = plt.subplots()
geneInd = 0
ymax = 0
for gene in sortedGenes:
if gene[0] not in sortedGenesTop[0:15,0]:
continue
print(gene)
plt.bar(geneInd, gene[5], color='#ffcc00ff')
plt.bar(geneInd, gene[6], bottom=gene[5], color='#9955ffff')
plt.bar(geneInd, gene[7], bottom=gene[5]+gene[6], color='#ff6600b5')
plt.bar(geneInd, gene[8], bottom=gene[5]+gene[6]+gene[7], color='#0000ffb4')
plt.bar(geneInd, gene[9], bottom=gene[5]+gene[6]+gene[7]+gene[8], color='#d40000c6')
plt.bar(geneInd, gene[10], bottom=gene[5]+gene[6]+gene[7]+gene[8]+gene[9], color='#ff00ccb8')
plt.bar(geneInd, gene[11], bottom=gene[5]+gene[6]+gene[7]+gene[8]+gene[9]+gene[10], color='#808080ff')
if gene[5]+gene[6]+gene[7]+gene[8]+gene[9]+gene[10]+gene[11] > ymax:
ymax = gene[5]+gene[6]+gene[7]+gene[8]+gene[9]+gene[10]+gene[11] + 1
geneInd += 1
plt.ylim(0,ymax+1)
plt.tight_layout()
plt.savefig(finalOutDir + '/recurrence_bars.svg')
plt.clf()
exit()
###Also make the full recurrence plot for all patients.
#this is quick and dirty, should have been a re-usable function.
#load the sv-gene pairs
topPairs = dict()
topPairGenes = dict() #all unique genes
svTypes = ['DEL', 'DUP', 'INV', 'ITX']
degPairs = np.loadtxt(outDir + '/tadDisruptionsZScores/zScores.txt', dtype='object')
#format: gene as key, as values, the first is the number of times we found the gene,
#the second how many were dels, then dups, invs, itx.
splitPairs = dict()
genes = dict()
for pair in positivePairs: #get stats for all pairs
splitPair = pair[0].split('_')
if splitPair[0] + '_' + splitPair[7] not in splitPairs:
splitPairs[splitPair[0] + '_' + splitPair[7]] = []
splitPairs[splitPair[0] + '_' + splitPair[7]].append(pair)
if splitPair[0] not in genes:
#count, cross-patient count, nc: del, dup, inv, itx, snv, cnv amp, cnv del, sv del, sv dup, sv inv, sv itx
genes[splitPair[0]] = [0]*13
genes[splitPair[0]].append([]) #add the patient names here
genes[splitPair[0]].append(0) #negative dels
genes[splitPair[0]].append(0) #negative dups
genes[splitPair[0]].append(0) #negative invs
genes[splitPair[0]].append(0) #negative itx
genes[splitPair[0]][0] += 1
if splitPair[12] == 'DEL':
genes[splitPair[0]][1] += 1
elif splitPair[12] == 'DUP':
genes[splitPair[0]][2] += 1
elif splitPair[12] == 'INV':
genes[splitPair[0]][3] += 1
elif splitPair[12] == 'ITX':
genes[splitPair[0]][4] += 1
patient = splitPair[7]
genes[splitPair[0]][13].append(patient)
#convert to numpy array for easy ranking
recurrentGenes = []
for gene in genes:
#count how many unique patients affect the gene for recurrence
uniquePatients = np.unique(genes[gene][13])
data = [gene] + [len(uniquePatients)] + genes[gene]
recurrentGenes.append(data)
recurrentGenes = np.array(recurrentGenes, dtype='object')
#sort
sortedGenes = recurrentGenes[np.argsort(recurrentGenes[:,1])[::-1]]
sortedGenesTop = []
for gene in sortedGenes:
#if gene[0] not in topPairGenes:
# continue
sortedGenesTop.append(gene)
sortedGenesTop = np.array(sortedGenesTop, dtype='object')
#make a matrix in which we show visually which genes are affected in which patients
#this matrix is genes x patients
uniquePatients = dict()
top = 50 #making the matrix only for the top X genes
ind = 0
for gene in sortedGenesTop:
if ind >= top:
continue
patients = gene[15]
for patient in patients:
if patient not in uniquePatients:
uniquePatients[patient] = 0
uniquePatients[patient] += 1
ind += 1
#make a matrix of genes by patients
recurrenceMatrix = np.zeros([top, len(uniquePatients)])
ind = 0
patientOrder = dict() #order of patients in the matrix
for patientInd in range(0, len(uniquePatients)):
patient = list(uniquePatients.keys())[patientInd]
patientOrder[patient] = patientInd
for gene in sortedGenesTop:
if ind >= top:
continue
patients = gene[15]
for patient in patients:
patientInd = patientOrder[patient]
recurrenceMatrix[ind, patientInd] += 1
ind += 1
print(recurrenceMatrix)
#make a grid plot, showing the different SV types that the patients have
#color the genes with -/+ direction, see if it correlates with the SV types.
fig, ax = plt.subplots(figsize=(20,10))
for row in range(0, recurrenceMatrix.shape[0]):
if row < recurrenceMatrix.shape[0]-1:
ax.axhline(row+0.5, linestyle='--', color='k', linewidth=0.5)
for col in range(0, recurrenceMatrix.shape[1]):
if col < recurrenceMatrix.shape[1]-1:
ax.axvline(col+0.5, linestyle='--', color='k', linewidth=0.5)
if recurrenceMatrix[row,col] > 0:
#get the sv type to see which symbol | |
<reponame>clovaai/StatAssist-GradBoost<filename>frostnet.py
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD
from timm.models.registry import register_model
def _cfg(url=''):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'conv1', 'classifier': 'classifier',
}
class ConvBNReLU(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0,
dilation=1, groups=1):
super(ConvBNReLU, self).__init__()
self.conv = nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size, stride,
padding, dilation, groups, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(False)
)
def forward(self, x):
x = self.conv(x)
return x
def fuse_model(self):
torch.quantization.fuse_modules(self.conv, ['0', '1','2'], inplace=True)
class ConvReLU(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0,
dilation=1, groups=1):
super(ConvBN, self).__init__()
self.conv = nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size, stride,
padding, dilation, groups, bias=False),
nn.ReLU(False)
)
def forward(self, x):
x = self.conv(x)
return x
def fuse_model(self):
torch.quantization.fuse_modules(self.conv, ['0', '1'], inplace=True)
class ConvBN(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0,
dilation=1, groups=1):
super(ConvBN, self).__init__()
self.conv = nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size, stride,
padding, dilation, groups, bias=False),
nn.BatchNorm2d(out_channels)
)
def forward(self, x):
x = self.conv(x)
return x
def fuse_model(self):
torch.quantization.fuse_modules(self.conv, ['0', '1'], inplace=True)
def _make_divisible(v, divisor=8, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
class CascadePreExBottleneck(nn.Module):
def __init__(self, in_channels, out_channels, quantized = False,
kernel_size=3, stride=1, dilation=1,expand_ratio=6,
reduce_factor = 4, block_type = 'CAS'):
super(CascadePreExBottleneck, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.expand_ratio = expand_ratio
self.quantized = quantized
if in_channels//reduce_factor < 8:
block_type = 'MB'
self.block_type = block_type
r_channels = _make_divisible(in_channels//reduce_factor)
if stride == 1 and in_channels==out_channels:
self.reduction = False
else:
self.reduction = True
if self.expand_ratio == 1:
self.squeeze_conv = None
self.conv1 = None
n_channels = in_channels
else:
if block_type == 'CAS':
self.squeeze_conv = ConvBNReLU(in_channels,r_channels, 1)
n_channels = r_channels + in_channels
else:
n_channels = in_channels
self.conv1 = ConvBNReLU(n_channels,n_channels*expand_ratio, 1)
self.conv2 = ConvBNReLU(n_channels*expand_ratio, n_channels*expand_ratio, kernel_size, stride,
(kernel_size - 1) // 2 , 1,
groups=n_channels*expand_ratio)
self.reduce_conv = ConvBN(n_channels*expand_ratio, out_channels, 1)
if self.quantized:
self.skip_add = nn.quantized.FloatFunctional()
self.quant_cat = nn.quantized.FloatFunctional()
def forward(self, x):
if not self.expand_ratio == 1:
if self.block_type == 'CAS':
squeezed = self.squeeze_conv(x)
if self.quantized:
out = self.quant_cat.cat([squeezed,x],1)
else:
out = torch.cat([squeezed,x],1)
else:
out = x
out = self.conv1(out)
else:
out = x
out = self.conv2(out)
out = self.reduce_conv(out)
if not self.reduction:
if self.quantized:
out = self.skip_add.add(x,out)
else:
out = torch.add(x,out)
return out
class FrostNet(nn.Module):
def __init__(self, nclass=1000, mode='large', width_mult=1.0, quantized = False,
bottleneck=CascadePreExBottleneck, drop_rate=0.2, dilated=False,**kwargs):
super(FrostNet, self).__init__()
self.quantized = quantized
if mode == 'large':
layer1_setting = [
# kernel_size, c, e, r, s
[3, 16, 1, 1, 1], #0
[3, 24, 6, 4, 2], #1
#[, , , , ], #2
#[, , , , ], #3
[3, 24, 3, 4, 1], #4
]
layer2_setting = [
[5, 40, 6, 4, 2], #5
#[, , , , ], #6
#[, , , , ], #7
[3, 40, 3, 4, 1], #8
]
layer3_setting = [
[5, 80, 6, 4, 2], #9
#[, , , , ], #10
[5, 80, 3, 4, 1], #11
[5, 80, 3, 4, 1], #12
[5, 96, 6, 4, 1], #13
#[, , , , ], #14
[5, 96, 3, 4, 1], #15
[3, 96, 3, 4, 1], #16
[3, 96, 3, 4, 1], #17
]
layer4_setting = [
[5, 192, 6, 2, 2], #18
[5, 192, 6, 4, 1], #19
[5, 192, 6, 4, 1], #20
[5, 192, 3, 4, 1], #21
[5, 192, 3, 4, 1], #22
]
layer5_setting = [
[5, 320, 6, 2, 1], #23
]
elif mode == 'base':
layer1_setting = [
# kernel_size, c, e, r, s
[3, 16, 1, 1, 1], #0
[5, 24, 6, 4, 2], #1
#[, , , , ], #2
#[, , , , ], #3
[3, 24, 3, 4, 1], #4
]
layer2_setting = [
[5, 40, 3, 4, 2], #5
#[, , , , ], #6
[5, 40, 3, 4, 1], #7
#[, , , , ], #8
]
layer3_setting = [
[5, 80, 3, 4, 2], #9
#[, , , , ], #10
#[, , , , ], #11
[3, 80, 3, 4, 1], #12
[5, 96, 3, 2, 1], #13
[3, 96, 3, 4, 1], #14
[5, 96, 3, 4, 1], #15
[5, 96, 3, 4, 1], #16
]
layer4_setting = [
[5, 192, 6, 2, 2], #17
[5, 192, 3, 2, 1], #18
[5, 192, 3, 2, 1], #19
[5, 192, 3, 2, 1], #20
]
layer5_setting = [
[5, 320, 6, 2, 1], #21
]
elif mode == 'small':
layer1_setting = [
# kernel_size, c, e, r, s
[3, 16, 1, 1, 1], #0
[5, 24, 3, 4, 2], #1
[3, 24, 3, 4, 1], #2
#[, , , , ], #3
]
layer2_setting = [
[5, 40, 3, 4, 2], #4
#[, , , , ], #5
#[, , , , ], #6
]
layer3_setting = [
[5, 80, 3, 4, 2], #7
[5, 80, 3, 4, 1], #8
[3, 80, 3, 4, 1], #9
[5, 96, 3, 2, 1], #10
[5, 96, 3, 4, 1], #11
[5, 96, 3, 4, 1], #12
]
layer4_setting = [
[5, 192, 6, 4, 2], #13
[5, 192, 6, 4, 1], #14
[5, 192, 6, 4, 1], #15
]
layer5_setting = [
[5, 320, 6, 2, 1], #16
]
else:
raise ValueError('Unknown mode.')
# building first layer
self.in_channels = _make_divisible(int(32*min(1.0,width_mult)))
self.conv1 = ConvBNReLU(3, self.in_channels, 3, 2, 1)
# building bottleneck blocks
self.layer1 = self._make_layer(bottleneck, layer1_setting, width_mult, 1)
self.layer2 = self._make_layer(bottleneck, layer2_setting, width_mult, 1)
self.layer3 = self._make_layer(bottleneck, layer3_setting, width_mult, 1)
if dilated:
dilation = 2
else:
dilation = 1
self.layer4 = self._make_layer(bottleneck, layer4_setting, width_mult, dilation)
self.layer5 = self._make_layer(bottleneck, layer5_setting, width_mult, dilation)
# building last several layers
last_in_channels = self.in_channels
self.last_layer = ConvBNReLU(last_in_channels,1280, 1)
self.classifier = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Dropout(drop_rate),
nn.Conv2d(1280, nclass, 1)
)
self.mode = mode
self._init_weights()
if self.quantized:
self.quant = torch.quantization.QuantStub()
self.dequant = torch.quantization.DeQuantStub()
def _make_layer(self, block, block_setting, width_mult, dilation=1):
layers = list()
for k, c, e, r, s in block_setting:
out_channels = _make_divisible(int(c * width_mult))
stride = s if (dilation == 1) else 1
layers.append(block(self.in_channels, out_channels, quantized = self.quantized, kernel_size = k,
stride=s, dilation=dilation, expand_ratio=e, reduce_factor = r))
self.in_channels = out_channels
return nn.Sequential(*layers)
def forward(self, x):
if self.quantized:
x = self.quant(x)
x = self.conv1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.layer5(x)
x = self.last_layer(x)
x = self.classifier(x)
if self.quantized:
x = self.dequant(x)
x = x.view(x.size(0), x.size(1))
return x
def fuse_model(self):
for m in self.modules():
if type(m) in [ConvBNReLU, ConvBN, ConvReLU]:
m.fuse_model()
def _init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.zeros_(m.bias)
@register_model
def frostnet_quant_large_1_25(**kwargs):
return FrostNet(nclass=1000, mode='large', width_mult=1.25, quantized=True, bottleneck= CascadePreExBottleneck, **kwargs)
@register_model
def frostnet_quant_large_1_0(**kwargs):
return FrostNet(nclass=1000, mode='large', width_mult=1.0, quantized=True, bottleneck= CascadePreExBottleneck, **kwargs)
@register_model
def frostnet_quant_large_0_75(**kwargs):
return FrostNet(nclass=1000, mode='large', width_mult=0.75, quantized=True, bottleneck= CascadePreExBottleneck, **kwargs)
@register_model
def frostnet_quant_large_0_5(**kwargs):
return FrostNet(nclass=1000, mode='large', width_mult=0.5, quantized=True,bottleneck= CascadePreExBottleneck, **kwargs)
@register_model
def frostnet_quant_large_0_35(**kwargs):
return FrostNet(nclass=1000, mode='large', width_mult=0.35, quantized=True,bottleneck= CascadePreExBottleneck, **kwargs)
@register_model
def frostnet_quant_base_1_25(**kwargs):
return FrostNet(nclass=1000, mode='base', width_mult=1.25, quantized=True, bottleneck= CascadePreExBottleneck, **kwargs)
@register_model
def frostnet_quant_base_1_0(**kwargs):
return FrostNet(nclass=1000, mode='base', width_mult=1.0, quantized=True, bottleneck= CascadePreExBottleneck, **kwargs)
@register_model
def frostnet_quant_base_0_75(**kwargs):
return FrostNet(nclass=1000, mode='base', | |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# -*- coding: UTF-8 -*-
import unittest
import os
import numpy as np
import gc
import onnxruntime as onnxrt
import threading
import sys
from helper import get_name
from onnxruntime.capi.onnxruntime_pybind11_state import Fail
class TestInferenceSession(unittest.TestCase):
def run_model(self, session_object, run_options):
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = session_object.get_inputs()[0].name
res = session_object.run([], {input_name: x}, run_options=run_options)
output_expected = np.array([[1.0, 4.0], [9.0, 16.0], [25.0, 36.0]], dtype=np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
def testModelSerialization(self):
try:
so = onnxrt.SessionOptions()
so.log_verbosity_level = 1
so.logid = "TestModelSerialization"
so.optimized_model_filepath = "./PythonApiTestOptimizedModel.onnx"
onnxrt.InferenceSession(get_name("mul_1.onnx"), sess_options=so, providers=['CPUExecutionProvider'])
self.assertTrue(os.path.isfile(so.optimized_model_filepath))
except Fail as onnxruntime_error:
if str(onnxruntime_error) == "[ONNXRuntimeError] : 1 : FAIL : Unable to serialize model as it contains" \
" compiled nodes. Please disable any execution providers which generate compiled nodes.":
pass
else:
raise onnxruntime_error
def testGetProviders(self):
self.assertTrue('CPUExecutionProvider' in onnxrt.get_available_providers())
# get_all_providers() returns the default EP order from highest to lowest.
# CPUExecutionProvider should always be last.
self.assertTrue('CPUExecutionProvider' == onnxrt.get_all_providers()[-1])
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), providers=onnxrt.get_available_providers())
self.assertTrue('CPUExecutionProvider' in sess.get_providers())
def testEnablingAndDisablingTelemetry(self):
onnxrt.disable_telemetry_events()
# no-op on non-Windows builds
# may be no-op on certain Windows builds based on build configuration
onnxrt.enable_telemetry_events()
def testSetProviders(self):
if 'CUDAExecutionProvider' in onnxrt.get_available_providers():
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), providers=['CUDAExecutionProvider'])
# confirm that CUDA Provider is in list of registered providers.
self.assertTrue('CUDAExecutionProvider' in sess.get_providers())
# reset the session and register only CPU Provider.
sess.set_providers(['CPUExecutionProvider'])
# confirm only CPU Provider is registered now.
self.assertEqual(['CPUExecutionProvider'], sess.get_providers())
def testSetProvidersWithOptions(self):
if 'TensorrtExecutionProvider' in onnxrt.get_available_providers():
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), providers=['TensorrtExecutionProvider'])
self.assertIn('TensorrtExecutionProvider', sess.get_providers())
options = sess.get_provider_options()
option = options['TensorrtExecutionProvider']
self.assertIn('device_id', option)
self.assertIn('trt_max_partition_iterations', option)
self.assertIn('trt_min_subgraph_size', option)
self.assertIn('trt_max_workspace_size', option)
self.assertIn('trt_dump_subgraphs', option)
self.assertIn('trt_engine_cache_enable', option)
self.assertIn('trt_engine_cache_path', option)
self.assertIn('trt_force_sequential_engine_build', option)
max_partition_iterations = option['trt_max_partition_iterations']
new_max_partition_iterations = int(max_partition_iterations) + 1
min_subgraph_size = option['trt_min_subgraph_size']
new_min_subgraph_size = int(min_subgraph_size) + 1
ori_max_workspace_size = option['trt_max_workspace_size']
new_max_workspace_size = int(ori_max_workspace_size) // 2
option = {}
option['trt_max_partition_iterations'] = new_max_partition_iterations
option['trt_min_subgraph_size'] = new_min_subgraph_size
option['trt_max_workspace_size'] = new_max_workspace_size
dump_subgraphs = "true"
option['trt_dump_subgraphs'] = dump_subgraphs
engine_cache_enable = "true"
option['trt_engine_cache_enable'] = engine_cache_enable
engine_cache_path = './engine_cache'
option['trt_engine_cache_path'] = engine_cache_path
force_sequential_engine_build = "true"
option['trt_force_sequential_engine_build'] = force_sequential_engine_build
sess.set_providers(['TensorrtExecutionProvider'], [option])
options = sess.get_provider_options()
option = options['TensorrtExecutionProvider']
self.assertEqual(option['trt_max_partition_iterations'], str(new_max_partition_iterations))
self.assertEqual(option['trt_min_subgraph_size'], str(new_min_subgraph_size))
self.assertEqual(option['trt_max_workspace_size'], str(new_max_workspace_size))
self.assertEqual(option['trt_dump_subgraphs'], '1')
self.assertEqual(option['trt_engine_cache_enable'], '1')
self.assertEqual(option['trt_engine_cache_path'], str(engine_cache_path))
self.assertEqual(option['trt_force_sequential_engine_build'], '1')
# We currently disable following test code since that not all test machines/GPUs have nvidia int8 capability
'''
int8_use_native_calibration_table = "false"
option['trt_int8_use_native_calibration_table'] = int8_use_native_calibration_table
int8_enable = "true"
option['trt_int8_enable'] = int8_enable
calib_table_name = '/home/onnxruntime/table.flatbuffers' # this file is not existed
option['trt_int8_calibration_table_name'] = calib_table_name
with self.assertRaises(RuntimeError):
sess.set_providers(['TensorrtExecutionProvider'], [option])
'''
if 'CUDAExecutionProvider' in onnxrt.get_available_providers():
import sys
import ctypes
CUDA_SUCCESS = 0
def runBaseTest1():
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), providers=['CUDAExecutionProvider'])
self.assertTrue('CUDAExecutionProvider' in sess.get_providers())
option1 = {'device_id': 0}
sess.set_providers(['CUDAExecutionProvider'], [option1])
self.assertEqual(['CUDAExecutionProvider', 'CPUExecutionProvider'], sess.get_providers())
option2 = {'device_id': -1}
with self.assertRaises(RuntimeError):
sess.set_providers(['CUDAExecutionProvider'], [option2])
sess.set_providers(['CUDAExecutionProvider', 'CPUExecutionProvider'], [option1, {}])
self.assertEqual(['CUDAExecutionProvider', 'CPUExecutionProvider'], sess.get_providers())
def runBaseTest2():
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), providers=['CUDAExecutionProvider'])
self.assertIn('CUDAExecutionProvider', sess.get_providers())
# test get/set of "gpu_mem_limit" configuration.
options = sess.get_provider_options()
self.assertIn('CUDAExecutionProvider', options)
option = options['CUDAExecutionProvider']
self.assertIn('gpu_mem_limit', option)
ori_mem_limit = option['gpu_mem_limit']
new_mem_limit = int(ori_mem_limit) // 2
option['gpu_mem_limit'] = new_mem_limit
sess.set_providers(['CUDAExecutionProvider'], [option])
options = sess.get_provider_options()
self.assertEqual(options['CUDAExecutionProvider']['gpu_mem_limit'], str(new_mem_limit))
option['gpu_mem_limit'] = ori_mem_limit
sess.set_providers(['CUDAExecutionProvider'], [option])
options = sess.get_provider_options()
self.assertEqual(options['CUDAExecutionProvider']['gpu_mem_limit'], ori_mem_limit)
def test_get_and_set_option_with_values(option_name, option_values):
provider_options = sess.get_provider_options()
self.assertIn('CUDAExecutionProvider', provider_options)
cuda_options = options['CUDAExecutionProvider']
self.assertIn(option_name, cuda_options)
for option_value in option_values:
cuda_options[option_name] = option_value
sess.set_providers(['CUDAExecutionProvider'], [cuda_options])
new_provider_options = sess.get_provider_options()
self.assertEqual(
new_provider_options.get('CUDAExecutionProvider', {}).get(option_name),
str(option_value))
test_get_and_set_option_with_values(
'arena_extend_strategy', ['kNextPowerOfTwo', 'kSameAsRequested'])
test_get_and_set_option_with_values(
'cudnn_conv_algo_search', ["DEFAULT", "EXHAUSTIVE", "HEURISTIC"])
test_get_and_set_option_with_values(
'do_copy_in_default_stream', [0, 1])
option['gpu_external_alloc'] = '0'
option['gpu_external_free'] = '0'
option['gpu_external_empty_cache'] = '0'
sess.set_providers(['CUDAExecutionProvider'], [option])
options = sess.get_provider_options()
self.assertEqual(options['CUDAExecutionProvider']['gpu_external_alloc'], '0')
self.assertEqual(options['CUDAExecutionProvider']['gpu_external_free'], '0')
self.assertEqual(options['CUDAExecutionProvider']['gpu_external_empty_cache'], '0')
#
# Note: Tests that throw an exception leave an empty session due to how set_providers currently works,
# so run them last. Each set_providers call will attempt to re-create a session, so it's
# fine for a test that fails to run immediately after another one that fails.
# Alternatively a valid call to set_providers could be used to recreate the underlying session
# after a failed call.
#
option['arena_extend_strategy'] = 'wrong_value'
with self.assertRaises(RuntimeError):
sess.set_providers(['CUDAExecutionProvider'], [option])
option['gpu_mem_limit'] = -1024
with self.assertRaises(RuntimeError):
sess.set_providers(['CUDAExecutionProvider'], [option])
option['gpu_mem_limit'] = 1024.1024
with self.assertRaises(RuntimeError):
sess.set_providers(['CUDAExecutionProvider'], [option])
option['gpu_mem_limit'] = 'wrong_value'
with self.assertRaises(RuntimeError):
sess.set_providers(['CUDAExecutionProvider'], [option])
def getCudaDeviceCount():
import ctypes
num_device = ctypes.c_int()
result = ctypes.c_int()
error_str = ctypes.c_char_p()
result = cuda.cuInit(0)
result = cuda.cuDeviceGetCount(ctypes.byref(num_device))
if result != CUDA_SUCCESS:
cuda.cuGetErrorString(result, ctypes.byref(error_str))
print("cuDeviceGetCount failed with error code %d: %s" % (result, error_str.value.decode()))
return -1
return num_device.value
def setDeviceIdTest(i):
import ctypes
import onnxruntime as onnxrt
device = ctypes.c_int()
result = ctypes.c_int()
error_str = ctypes.c_char_p()
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), providers=['CPUExecutionProvider'])
option = {'device_id': i}
sess.set_providers(['CUDAExecutionProvider'], [option])
self.assertEqual(['CUDAExecutionProvider', 'CPUExecutionProvider'], sess.get_providers())
result = cuda.cuCtxGetDevice(ctypes.byref(device))
if result != CUDA_SUCCESS:
cuda.cuGetErrorString(result, ctypes.byref(error_str))
print("cuCtxGetDevice failed with error code %d: %s" % (result, error_str.value.decode()))
self.assertEqual(result, CUDA_SUCCESS)
self.assertEqual(i, device.value)
def runAdvancedTest():
num_device = getCudaDeviceCount()
if num_device < 0:
return
# Configure session to be ready to run on all available cuda devices
for i in range(num_device):
setDeviceIdTest(i)
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), providers=['CPUExecutionProvider'])
# configure session with invalid option values and that should fail
with self.assertRaises(RuntimeError):
option = {'device_id': num_device}
sess.set_providers(['CUDAExecutionProvider'], [option])
option = {'device_id': 'invalid_value'}
sess.set_providers(['CUDAExecutionProvider'], [option])
# configure session with invalid option should fail
with self.assertRaises(RuntimeError):
option = {'invalid_option': 123}
sess.set_providers(['CUDAExecutionProvider'], [option])
libnames = ('libcuda.so', 'libcuda.dylib', 'cuda.dll')
for libname in libnames:
try:
cuda = ctypes.CDLL(libname)
runBaseTest1()
runBaseTest2()
runAdvancedTest()
except OSError:
continue
else:
break
else:
runBaseTest1()
runBaseTest2()
# raise OSError("could not load any of: " + ' '.join(libnames))
def testInvalidSetProviders(self):
with self.assertRaises(RuntimeError) as context:
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), providers=['CPUExecutionProvider'])
sess.set_providers(['InvalidProvider'])
self.assertTrue('Unknown Provider Type: InvalidProvider' in str(context.exception))
def testSessionProviders(self):
if 'CUDAExecutionProvider' in onnxrt.get_available_providers():
# create session from scratch, but constrain it to only use the CPU.
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), providers=['CPUExecutionProvider'])
self.assertEqual(['CPUExecutionProvider'], sess.get_providers())
def testRunModel(self):
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), providers=onnxrt.get_available_providers())
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "X")
input_shape = sess.get_inputs()[0].shape
self.assertEqual(input_shape, [3, 2])
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [3, 2])
res = sess.run([output_name], {input_name: x})
output_expected = np.array([[1.0, 4.0], [9.0, 16.0], [25.0, 36.0]], dtype=np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
def testRunModelFromBytes(self):
with open(get_name("mul_1.onnx"), "rb") as f:
content = f.read()
sess = onnxrt.InferenceSession(content, providers=onnxrt.get_available_providers())
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "X")
input_shape = sess.get_inputs()[0].shape
self.assertEqual(input_shape, [3, 2])
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [3, 2])
res = sess.run([output_name], {input_name: x})
output_expected = np.array([[1.0, 4.0], [9.0, 16.0], [25.0, 36.0]], dtype=np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
def testRunModel2(self):
sess = onnxrt.InferenceSession(get_name("matmul_1.onnx"), providers=onnxrt.get_available_providers())
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "X")
input_shape = sess.get_inputs()[0].shape
self.assertEqual(input_shape, [3, 2])
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [3, 1])
res = sess.run([output_name], {input_name: x})
output_expected = np.array([[5.0], [11.0], [17.0]], dtype=np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
def testRunModel2Contiguous(self):
sess = onnxrt.InferenceSession(get_name("matmul_1.onnx"), providers=onnxrt.get_available_providers())
x = np.array([[2.0, 1.0], [4.0, 3.0], [6.0, 5.0]], dtype=np.float32)[:, [1, 0]]
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "X")
input_shape = sess.get_inputs()[0].shape
self.assertEqual(input_shape, [3, 2])
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [3, 1])
res = sess.run([output_name], {input_name: x})
output_expected = np.array([[5.0], [11.0], [17.0]], dtype=np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
xcontiguous = np.ascontiguousarray(x)
rescontiguous = sess.run([output_name], {input_name: xcontiguous})
np.testing.assert_allclose(output_expected, rescontiguous[0], rtol=1e-05, atol=1e-08)
def testRunModelMultipleThreads(self):
available_providers = onnxrt.get_available_providers()
# Skip this test for a "pure" DML onnxruntime python wheel. We keep this test enabled for instances where both DML and CUDA
# EPs are available (Windows GPU CI pipeline has this config) - this test will pass because CUDA has higher precendence than DML
# and the nodes are assigned to only the CUDA EP (which supports this test)
if ('DmlExecutionProvider' in available_providers and not 'CUDAExecutionProvider' in available_providers):
print("Skipping testRunModelMultipleThreads as the DML EP does not support calling Run() on different threads using the same session object ")
else:
so = onnxrt.SessionOptions()
so.log_verbosity_level = 1
so.logid = "MultiThreadsTest"
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), sess_options=so, providers=available_providers)
ro1 = onnxrt.RunOptions()
ro1.logid = "thread1"
t1 = threading.Thread(target=self.run_model, args=(sess, ro1))
ro2 = onnxrt.RunOptions()
ro2.logid = "thread2"
t2 = threading.Thread(target=self.run_model, args=(sess, ro2))
t1.start()
t2.start()
t1.join()
t2.join()
def testListAsInput(self):
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), providers=onnxrt.get_available_providers())
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = sess.get_inputs()[0].name
res = sess.run([], {input_name: x.tolist()})
| |
#!/usr/bin/python
import os, glob, hashlib, pickle, argparse, shutil, ntpath
import os, glob, hashlib, pickle, argparse, shutil, multiprocessing, signal, sys
from multiprocessing import Pool
from functools import partial
######################### Classes ##############################
class AndroidDensity:
def __init__(self, name, path, scaleFactor):
self.name = name
self.path = path
self.scaleFactor = scaleFactor
class IosDensity:
def __init__(self, name, suffix, scaleFactor):
self.name = name
self.suffix = suffix
self.scaleFactor = scaleFactor
class Colors:
PURPLE = '\033[95m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
################################################################
################# Directories configuration ####################
dirRoot = "./"
dirRaw = dirRoot + "raw/"
dirAssets = dirRoot + "drawables/"
# ScaleFactor with origin in XXXHDPI density. Source: http://jennift.com/dpical.html
androidDensities = [
AndroidDensity("HDPI", "drawable-hdpi/", 0.375),
AndroidDensity("X-HDPI", "drawable-xhdpi/", 0.5),
AndroidDensity("XX-HDPI", "drawable-xxhdpi/", 0.75),
AndroidDensity("XXX-HDPI", "drawable-xxxhdpi/", 1.0)
]
# ScaleFactor with origin in @3X density.
iosDensities = [
IosDensity("@1X", "", 0.333333),
IosDensity("@2X", "@2X", 0.666666),
IosDensity("@3X", "@3X", 1.0)
]
################################################################
# Constants
STORAGE_FILE_NAME = ".warp_storage"
TARGET_ANDROID = "android"
TARGET_IOS = "ios"
# Variables with default values
poolThreads = multiprocessing.cpu_count() + 1
upToDateFiles = []
deletedFiles = []
newFiles = []
modifiedFiles = []
targetPlatform = ""
shouldCleanProject = False
shouldRunSilently = False
versionName = "1.0.1"
# Script entry point
def main():
parseCommandLineOptions()
greet()
setUpPathVariables()
if shouldCleanProject or shouldForceCleanProject:
cleanProject()
else:
makeRequiredDirectories()
classifyRawFiles(upToDateFiles, deletedFiles, newFiles, modifiedFiles)
processUpToDateAssets(upToDateFiles)
processNewAssets(newFiles)
processModifiedAssets(modifiedFiles)
processDeletedAssets(deletedFiles)
goodbye()
# Parse command line options and store them in variables
def parseCommandLineOptions():
parser = argparse.ArgumentParser(description="Seamless mobile assets management")
baseGroup = parser.add_argument_group('Basic usage')
baseGroup.add_argument("-t", "--target",
dest="target",
required=True,
choices=[TARGET_ANDROID, TARGET_IOS],
help="specifies the platform where the assets will be used",
metavar=TARGET_ANDROID +"/" + TARGET_IOS)
baseGroup.add_argument("-i", "--input",
dest="input",
help="directory where the raw assets are located",
metavar="\"raw/assets/path\"")
baseGroup.add_argument("-o", "--output",
dest="output",
help="directory where the processed assets will be placed",
metavar="\"proccesed/assets/path\"")
baseGroup.add_argument("-v", "--version",
action='version',
version='%(prog)s ' + versionName)
baseGroup.add_argument("-T", "--threads",
dest="threads",
help="number of threads to use while processing the assets",
metavar="N",
default=multiprocessing.cpu_count() + 1,
type=int)
buildGroup = parser.add_argument_group('Processing options')
buildGroup.add_argument("-c", "--clean",
action="store_true",
default=False,
dest="clean",
help="remove every generated asset")
buildGroup.add_argument("-f", "--force-clean",
action="store_true",
default=False,
dest="force_clean",
help="forces the removal of the output folder")
uiGroup = parser.add_argument_group('UI')
uiGroup.add_argument("-s", "--silent",
action="store_true",
default=False,
dest="silent",
help="doesn't show the welcome message")
# Save parsed options as global variables
global targetPlatform
global dirRaw
global dirAssets
global shouldCleanProject
global shouldForceCleanProject
global shouldRunSilently
global poolThreads
args = parser.parse_args()
targetPlatform = args.target
if args.input: dirRaw = args.input
if args.output: dirAssets = args.output
shouldCleanProject = args.clean
shouldForceCleanProject = args.force_clean
shouldRunSilently = args.silent
poolThreads = args.threads if args.threads > 0 else 1
# Greet
def greet():
logo = [
" ",
" **********************************",
" * _ _____ ____ ____ *",
" * | | / / | / __ \/ __ \\ *",
" * | | /| / / /| | / /_/ / /_/ / *",
" * | |/ |/ / ___ |/ _, _/ ____/ *",
" * |__/|__/_/ |_/_/ |_/_/ *",
" * *",
" * Wolox Assets Rapid Processor *",
" **********************************",
" v."+ versionName +" ",
" "
]
if not shouldRunSilently:
for line in logo:
print(Colors.PURPLE + line + Colors.ENDC)
# Adds neccesary PATH variables. Useful when running the script from a non
# user shell (like with Gradle in Android)
def setUpPathVariables():
os.environ['PATH'] = os.environ['PATH'] + ":/usr/local/bin"
# Clears previously processed assets and the hash storage file
def cleanProject():
print(Colors.YELLOW + "Cleaning previously processed assets..." + Colors.ENDC)
# Dictionary of previously hashed files: <file path, MD5 hash>
storedHashedFiles = loadHashedFiles()
# Delete all the stored files
for path, md5 in storedHashedFiles.iteritems():
assetToClean = ntpath.basename(path)
print(Colors.BLUE + "DELETING ASSET: " + assetToClean + Colors.ENDC)
deleteAsset(assetToClean)
# Remove generated density folders if empty
for density in androidDensities:
densityDir = dirAssets + density.path
if os.path.exists(densityDir) and (os.listdir(densityDir) == [] or shouldForceCleanProject) :
print(Colors.BLUE + "DELETING ASSET DIRECTORY: " + densityDir + Colors.ENDC)
if shouldForceCleanProject:
shutil.rmtree(densityDir)
else :
os.rmdir(densityDir)
# Remove assets output folder if empty
if os.path.exists(dirAssets) and os.listdir(dirAssets) == [] :
print(Colors.BLUE + "DELETING EMPTY OUTPUT DIRECTORY: " + dirAssets + Colors.ENDC)
os.rmdir(dirAssets)
# Remove storage file
if os.path.exists(dirRaw + STORAGE_FILE_NAME):
os.remove(dirRaw + STORAGE_FILE_NAME)
print(Colors.YELLOW + "Assets cleared" + Colors.ENDC)
# Make the required directories to process asssets if they doesn't exist already
def makeRequiredDirectories():
# Make raw directory if needed
if not os.path.exists(dirRaw):
print("Making directory for raw assets: " + dirRaw)
os.makedirs(dirRaw)
# Make directories for Android processed assets
if targetPlatform == TARGET_ANDROID:
for density in androidDensities:
if not os.path.exists(dirAssets + density.path):
print("Making directory for Android assets: " + dirAssets + density.path)
os.makedirs(dirAssets + density.path)
# Make directories for iOS processed assets
else:
if not os.path.exists(dirAssets):
print("Making directory for iOS assets:" + dirAssets)
os.makedirs(dirAssets)
# Classify raw files into collections of up to date, deleted, new and modified files
def classifyRawFiles(upToDateFiles, deletedFiles, newFiles, modifiedFiles):
# Dictionary of previously hashed files: <file path, MD5 hash>
storedHashedFiles = loadHashedFiles()
# Dictionary of newly hashed files and ready to compare for diff: <file path, MD5 hash>
recentlyHashedFiles = hashRawFiles()
saveHashedFiles(recentlyHashedFiles)
# Classify files by comparing recent hashes with previously hased files
for path, md5 in recentlyHashedFiles.iteritems():
if path in storedHashedFiles:
# CASE 1: The file is present and the hashes are the same (the file is the same)
if md5 == recentlyHashedFiles[path]:
upToDateFiles.append(path)
# CASE 2: The file is present, but the hashes doesn't match (the file has been modified)
else:
modifiedFiles.append(path)
del storedHashedFiles[path] # Removed the processed entry
# CASE 3: The file isn't present on the previous hash dictionary, it must be a new file
else:
newFiles.append(path)
# The leftovers in the previous hash dictionary must be the deleted files
for path in storedHashedFiles:
deletedFiles.append(path)
# Hash (MD5) files in the raw directory and return them as a dictionary <file path, MD5 hash>
def hashRawFiles():
BLOCKSIZE = 65536
hashedFiles = {}
# Hash files in the raw directory
for filePath in glob.glob(dirRaw + "*.png"):
hasher = hashlib.md5()
with open(filePath, 'rb') as fileToHash:
buf = fileToHash.read(BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = fileToHash.read(BLOCKSIZE)
hashedFiles.update({filePath:hasher.hexdigest()})
return hashedFiles
# Store a dictionary of files to Hash
def saveHashedFiles(filesToHash):
with open(dirRaw + STORAGE_FILE_NAME, "wb") as hashStorage:
pickle.dump(filesToHash, hashStorage, pickle.HIGHEST_PROTOCOL)
# Retrieve a dictionary of hashed files
def loadHashedFiles():
try:
with open(dirRaw + STORAGE_FILE_NAME, "rb") as hashStorage:
return pickle.load(hashStorage)
except IOError:
return {}
# Process files that we found in a previous run by the script
def processUpToDateAssets(upToDateFiles):
for path in upToDateFiles:
print(Colors.BLUE + os.path.basename(path) + ": STATE > UP TO DATE" + Colors.ENDC)
# Execute a specific function in a pool of workers for every "argument" in mapArguments.
def mapInWorkers(function, mapArguments):
pool = Pool(poolThreads)
try:
pool.map_async(function, mapArguments).get(0xFFFF)
pool.close()
except KeyboardInterrupt:
print(Colors.RED + "Interrupted" + Colors.ENDC)
pool.terminate()
sys.exit(1)
# Process files that are new to the project
def processNewAssets(newFiles):
processNew = partial(processRawPngAssetWithTitle, "{}: STATE > NEW")
mapInWorkers(processNew, newFiles)
# Process files that were modified in the project
def processModifiedAssets(modifiedFiles):
processModified = partial(processRawPngAssetWithTitle, "{}: STATE > UPDATED")
mapInWorkers(processModified, modifiedFiles)
# Process files that were deleted from the project
def processDeletedAssets(deletedFiles):
for path in deletedFiles:
assetName = os.path.basename(path)
print(Colors.BLUE + assetName + ": STATE > REMOVED" + Colors.ENDC)
deleteAsset(assetName)
# Prints the title, replacing the keyword for the path basename, scale and compress the asset for every screen density
def processRawPngAssetWithTitle(title, rawAssetPath):
print (Colors.BLUE + title.format(os.path.basename(rawAssetPath)) + Colors.ENDC)
processRawPngAsset(rawAssetPath)
# Scale and compress the asset for every screen density
def processRawPngAsset(rawAssetPath):
filename = os.path.basename(rawAssetPath)
filenameAndExtension = os.path.splitext(filename) # "example.png" -> ["example", ".png"]
# Process assets for Android (e.g: /drawable-xxhdpi/...)
if targetPlatform == TARGET_ANDROID:
for density in androidDensities:
processedAssetPath = dirAssets + density.path + filename
sendAssetToPngPipeline(rawAssetPath, density, processedAssetPath)
# Process assets for iOS (e.g: ...@3X)
else:
for density in iosDensities:
processedAssetPath = dirAssets + filenameAndExtension[0] + density.suffix + filenameAndExtension[1]
sendAssetToPngPipeline(rawAssetPath, density, processedAssetPath)
print(filename + ": Processed the asset for every screen density")
def sendAssetToPngPipeline(rawAssetPath, density, processedAssetPath):
filename = os.path.basename(rawAssetPath)
print("{0}: SCALING to {1}".format(filename, density.name))
scaleImage(rawAssetPath, density.scaleFactor, processedAssetPath)
print(filename + ": COMPRESSING for " + density.name)
compressPNG(processedAssetPath)
# Scale the asset for a given screen density using FFMPEG
def scaleImage(inputPath, scaleFactor, outputPath):
os.system("ffmpeg -loglevel error -y -i \"{0}\" -vf scale=iw*{1}:-1 \"{2}\"".format(inputPath, scaleFactor, outputPath))
# Compress a PNG asset using PNGQuant
def compressPNG(inputPath):
os.system("pngquant \"{0}\" --force --ext .png".format(inputPath))
# Remove asset in every screen density
def deleteAsset(assetName):
for density in androidDensities:
if os.path.exists(dirAssets + density.path + assetName):
os.remove(dirAssets + density.path + assetName)
print(assetName + ": DELETED asset for " + density.name)
# Goodbye
def goodbye():
print(Colors.GREEN | |
cases.
This function converts these IOB tags to the easier-to-consume IOB2 format;
see
https://en.wikipedia.org/wiki/Inside%E2%80%93outside%E2%80%93beginning_(tagging)
for details. Basically, every entity in IOB2 format begins with a `B` tag.
The `I` tag is only used for the second, third, etc. tokens of an entity.
:param df: A `pd.DataFrame` with one row per token of the document.
In addition to the metadata columns corresponding to `column_names`, this
dataframe must also contain sentence information in a column called `sentence`.
:param column_names: Names for the metadata columns in the original data file
that were used to generate the names of the columns of `df`.
:param iob_columns: Mask indicating which of the metadata columns after the
token text should be treated as being in IOB format.
:returns: A version of `df` with corrected IOB2 tags in the `ent_iob`
column. The original dataframe is not modified.
"""
ret = df.copy()
sentence_begins = df["sentence"].values.begin_token
for i in range(len(column_names)):
if iob_columns[i]:
name = column_names[i]
iobs = df[f"{name}_iob"].values.copy() # Modified in place
entities = df[f"{name}_type"].values
# Special-case the first one
if iobs[0] == "I":
iobs[0] = "B"
for i in range(1, len(iobs)):
tag = iobs[i]
prev_tag = iobs[i - 1]
if tag == "I":
if (
prev_tag == "O" # Previous token not an entity
or (prev_tag in ("I", "B")
and entities[i] != entities[i - 1]
) # Previous token a different type of entity
or (sentence_begins[i] != sentence_begins[i - 1]
) # Start of new sentence
):
iobs[i] = "B"
ret[f"{name}_iob"] = iobs
return ret
def _doc_to_df(doc: List[_SentenceData],
column_names: List[str],
iob_columns: List[bool],
space_before_punct: bool) -> pd.DataFrame:
"""
Convert the "Python objects" representation of a document from a
CoNLL-2003 file into a `pd.DataFrame` of token metadata.
:param doc: List of Python objects that represents the document.
:param column_names: Names for the metadata columns that come after the
token text. These names will be used to generate the names of the dataframe
that this function returns.
:param iob_columns: Mask indicating which of the metadata columns after the
token text should be treated as being in IOB format. If a column is in IOB format,
the returned dataframe will contain *two* columns, holding IOB2 tags and
entity type tags, respectively. For example, an input column "ent" will turn into
output columns "ent_iob" and "ent_type".
:param space_before_punct: If `True`, add whitespace before
punctuation characters (and after left parentheses)
when reconstructing the text of the document.
:return: DataFrame with four columns:
* `char_span`: Span of each token, with character offsets.
Backed by the concatenation of the tokens in the document into
a single string with one sentence per line.
* `token_span`: Span of each token, with token offsets.
Backed by the contents of the `char_span` column.
* `ent_iob`: IOB2-format tags of tokens, exactly as they appeared
in the original file, with no corrections applied.
* `ent_type`: Entity type names for tokens tagged "I" or "B" in
the `ent_iob` column; `None` everywhere else.
"""
# Character offsets of tokens in the reconstructed document
begins_list = [] # Type: List[np.ndarray]
ends_list = [] # Type: List[np.ndarray]
# Reconstructed text of each sentence
sentences_list = [] # Type: List[np.ndarray]
# Token offsets of sentences containing each token in the document.
sentence_begins_list = [] # Type: List[np.ndarray]
sentence_ends_list = [] # Type: List[np.ndarray]
# Token metadata column values. Key is column name, value is metadata for
# each token.
meta_lists = _make_empty_meta_values(column_names, iob_columns)
char_position = 0
token_position = 0
for sentence_num in range(len(doc)):
sentence = doc[sentence_num]
tokens = sentence.tokens
# Don't put spaces before punctuation in the reconstituted string.
no_space_before_mask = (
np.zeros(len(tokens), dtype=np.bool) if space_before_punct
else _SPACE_BEFORE_MATCH_FN(tokens))
no_space_after_mask = (
np.zeros(len(tokens), dtype=np.bool) if space_before_punct
else _SPACE_AFTER_MATCH_FN(tokens))
no_space_before_mask[0] = True # No space before first token
no_space_after_mask[-1] = True # No space after last token
shifted_no_space_after_mask = np.roll(no_space_after_mask, 1)
prefixes = np.where(
np.logical_or(no_space_before_mask,
shifted_no_space_after_mask),
"", " ")
string_parts = np.ravel((prefixes, tokens), order="F")
sentence_text = "".join(string_parts)
sentences_list.append(sentence_text)
lengths = np.array([len(t) for t in tokens])
prefix_lengths = np.array([len(p) for p in prefixes])
# Begin and end offsets, accounting for which tokens have spaces
# before them.
e = np.cumsum(lengths + prefix_lengths)
b = e - lengths
begins_list.append(b + char_position)
ends_list.append(e + char_position)
sentence_begin_token = token_position
sentence_end_token = token_position + len(e)
sentence_begins = np.repeat(sentence_begin_token, len(e))
sentence_ends = np.repeat(sentence_end_token, len(e))
sentence_begins_list.append(sentence_begins)
sentence_ends_list.append(sentence_ends)
for k in sentence.token_metadata.keys():
meta_lists[k].extend(sentence.token_metadata[k])
char_position += e[-1] + 1 # "+ 1" to account for newline
token_position += len(e)
begins = np.concatenate(begins_list)
ends = np.concatenate(ends_list)
doc_text = "\n".join(sentences_list)
char_spans = CharSpanArray(doc_text, begins, ends)
token_begins = np.arange(len(begins))
token_spans = TokenSpanArray(char_spans, token_begins, token_begins + 1)
sentence_spans = TokenSpanArray(char_spans,
np.concatenate(sentence_begins_list),
np.concatenate(sentence_ends_list))
ret = pd.DataFrame(
{"char_span": char_spans,
"token_span": token_spans
})
for k, v in meta_lists.items():
ret[k] = v
ret["sentence"] = sentence_spans
return ret
def _output_doc_to_df(tokens: pd.DataFrame,
outputs: Dict[str, List[str]],
column_name: str,
copy_tokens: bool) -> pd.DataFrame:
"""
Convert the "Python objects" representation of a document from a
CoNLL-2003 file into a `pd.DataFrame` of token metadata.
:param tokens: `pd.DataFrame` containing metadata about the tokens
of this document, as returned by `conll_2003_to_dataframe`
:param outputs: Dictionary containing outputs for this document,
with fields "iob" and "entity".
:param column_name: Name for the metadata value that the IOB-tagged data
in `input_file` encodes. If this name is present in `doc_dfs`, its value
will be replaced with the data from `input_file`; otherwise a new column
will be added to each dataframe.
:param copy_tokens: `True` if token information should be deep-copied.
:return: DataFrame with four columns:
* `char_span`: Span of each token, with character offsets.
Backed by the concatenation of the tokens in the document into
a single string with one sentence per line.
* `token_span`: Span of each token, with token offsets.
Backed by the contents of the `char_span` column.
* `ent_iob`: IOB2-format tags of tokens, corrected so that every
entity begins with a "B" tag.
* `ent_type`: Entity type names for tokens tagged "I" or "B" in
the `ent_iob` column; `None` everywhere else.
"""
if copy_tokens:
return pd.DataFrame(
{"char_span": tokens["char_span"].copy(),
"token_span": tokens["token_span"].copy(),
f"{column_name}_iob": np.array(outputs["iob"]),
f"{column_name}_type": np.array(outputs["entity"]),
"sentence": tokens["sentence"].copy()})
else:
return pd.DataFrame(
{"char_span": tokens["char_span"],
"token_span": tokens["token_span"],
f"{column_name}_iob": np.array(outputs["iob"]),
f"{column_name}_type": np.array(outputs["entity"]),
"sentence": tokens["sentence"]})
#####################################################
# External API functions below this line
def iob_to_spans(
token_features: pd.DataFrame,
iob_col_name: str = "ent_iob",
char_span_col_name: str = "char_span",
entity_type_col_name: str = "ent_type",
):
"""
Convert token tags in Inside–Outside–Beginning (IOB2) format to a series of
`TokenSpan`s of entities. See https://en.wikipedia.org/wiki/Inside%E2%80%93outside%E2%80%93beginning_(tagging)
for more information on IOB2 format.
:param token_features: DataFrame of token features in the format returned by
`make_tokens_and_features`.
:param iob_col_name: Name of a column in `token_features` that contains the
IOB2 tags as strings, "I", "O", or "B".
:param char_span_col_name: Name of a column in `token_features` that
contains the tokens as a `CharSpanArray`.
:param entity_type_col_name: Optional name of a column in `token_features`
that contains entity type information; or `None` if no such column exists.
:return: A `pd.DataFrame` with the following columns:
* `token_span`: Span (with token offsets) of each entity
* `<value of entity_type_col_name>`: (optional) Entity type
"""
# Start out with 1-token prefixes of all entities.
begin_mask = token_features[iob_col_name] == "B"
first_tokens = token_features[begin_mask].index
if entity_type_col_name is None:
entity_types = np.zeros(len(first_tokens))
else:
entity_types = token_features[begin_mask][entity_type_col_name]
# Add an extra "O" tag to the end of the IOB column to simplify the logic
# for handling the case where the document ends with an entity.
iob_series = (
token_features[iob_col_name].append(pd.Series(["O"])).reset_index(drop=True)
)
entity_prefixes = pd.DataFrame(
{
"ent_type": entity_types,
"begin": first_tokens, # Inclusive
"end": first_tokens + 1, # Exclusive
"next_tag": iob_series.iloc[first_tokens + 1].values,
}
)
df_list = [] # Type: pd.DataFrame
if len(entity_prefixes.index) == 0:
# Code below needs at least one element in the list for schema
df_list = [entity_prefixes]
# Iteratively expand the prefixes
while len(entity_prefixes.index) > 0:
complete_mask = entity_prefixes["next_tag"].isin(["O", "B"])
complete_entities = entity_prefixes[complete_mask]
incomplete_entities = entity_prefixes[~complete_mask].copy()
incomplete_entities["end"] = incomplete_entities["end"] + 1
incomplete_entities["next_tag"] = iob_series.iloc[
incomplete_entities["end"]
].values
df_list.append(complete_entities)
entity_prefixes = incomplete_entities
all_entities = pd.concat(df_list)
# Sort spans by location, not length.
all_entities.sort_values("begin", inplace=True)
# | |
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2015, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import fnmatch as fnm
import json
import math
import numpy as np
import os
import re
import sys
import logging
from collections import defaultdict
from colors import TestColors
class Results(object):
def __init__(self, results_dir):
self.results_dir = results_dir
self.results_json = results_dir + '/results.json'
self.results = {}
# Setup logging
self._log = logging.getLogger('Results')
# Do nothing if results have been already parsed
if os.path.isfile(self.results_json):
return
# Parse results
self.base_wls = defaultdict(list)
self.test_wls = defaultdict(list)
self._log.info('Loading energy/perf data...')
for test_idx in sorted(os.listdir(self.results_dir)):
test_dir = self.results_dir + '/' + test_idx
if not os.path.isdir(test_dir):
continue
test = TestFactory.get(test_idx, test_dir, self.results)
test.parse()
results_json = self.results_dir + '/results.json'
self._log.info('Dump perf results on JSON file [%s]...',
results_json)
with open(results_json, 'w') as outfile:
json.dump(self.results, outfile, indent=4, sort_keys=True)
################################################################################
# Tests processing base classes
################################################################################
class Test(object):
def __init__(self, test_idx, test_dir, res):
self.test_idx = test_idx
self.test_dir = test_dir
self.res = res
match = TEST_DIR_RE.search(test_dir)
if not match:
self._log.error('Results folder not matching naming template')
self._log.error('Skip parsing of test results [%s]', test_dir)
return
# Create required JSON entries
wtype = match.group(1)
if wtype not in res.keys():
res[wtype] = {}
wload_idx = match.group(3)
if wload_idx not in res[wtype].keys():
res[wtype][wload_idx] = {}
conf_idx = match.group(2)
if conf_idx not in res[wtype][wload_idx].keys():
res[wtype][wload_idx][conf_idx] = {}
# Set the workload type for this test
self.wtype = wtype
self.wload_idx = wload_idx
self.conf_idx = conf_idx
# Energy metrics collected for all tests
self.little = []
self.total = []
self.big = []
def parse(self):
self._log.info('Processing results from wtype [%s]', self.wtype)
# Parse test's run results
for run_idx in sorted(os.listdir(self.test_dir)):
# Skip all files which are not folders
run_dir = os.path.join(self.test_dir, run_idx)
if not os.path.isdir(run_dir):
continue
run = self.parse_run(run_idx, run_dir)
self.collect_energy(run)
self.collect_performance(run)
# Report energy/performance stats over all runs
self.res[self.wtype][self.wload_idx][self.conf_idx]\
['energy'] = self.energy()
self.res[self.wtype][self.wload_idx][self.conf_idx]\
['performance'] = self.performance()
def collect_energy(self, run):
# Keep track of average energy of each run
self.little.append(run.little_nrg)
self.total.append(run.total_nrg)
self.big.append(run.big_nrg)
def energy(self):
# Compute energy stats over all run
return {
'LITTLE' : Stats(self.little).get(),
'big' : Stats(self.big).get(),
'Total' : Stats(self.total).get()
}
class TestFactory(object):
@staticmethod
def get(test_idx, test_dir, res):
# Retrive workload class from results folder name
match = TEST_DIR_RE.search(test_dir)
if not match:
self._log.error('Results folder not matching naming template')
self._log.error('Skip parsing of test results [%s]', test_dir)
return
# Create workload specifi test class
wtype = match.group(1)
if wtype == 'rtapp':
return RTAppTest(test_idx, test_dir, res)
# Return a generi test parser
return DefaultTest(test_idx, test_dir, res)
class Energy(object):
def __init__(self, nrg_file):
# Set of exposed attributes
self.little = 0.0
self.big = 0.0
self.total = 0.0
self._log.debug('Parse [%s]...', nrg_file)
with open(nrg_file, 'r') as infile:
nrg = json.load(infile)
if 'LITTLE' in nrg:
self.little = float(nrg['LITTLE'])
if 'big' in nrg:
self.big = float(nrg['big'])
self.total = self.little + self.big
self._log.debug('Energy LITTLE [%s], big [%s], Total [%s]',
self.little, self.big, self.total)
class Stats(object):
def __init__(self, data):
self.stats = {}
self.stats['count'] = len(data)
self.stats['min'] = min(data)
self.stats['max'] = max(data)
self.stats['avg'] = sum(data)/len(data)
std = Stats.stdev(data)
c99 = Stats.ci99(data, std)
self.stats['std'] = std
self.stats['c99'] = c99
def get(self):
return self.stats
@staticmethod
def stdev(values):
sum1 = 0
sum2 = 0
for value in values:
sum1 += value
sum2 += math.pow(value, 2)
# print 'sum1: {}, sum2: {}'.format(sum1, sum2)
avg = sum1 / len(values)
var = (sum2 / len(values)) - (avg * avg)
# print 'avg: {} var: {}'.format(avg, var)
std = math.sqrt(var)
return float(std)
@staticmethod
def ci99(values, std):
count = len(values)
ste = std / math.sqrt(count)
c99 = 2.58 * ste
return c99
################################################################################
# Run processing base classes
################################################################################
class Run(object):
def __init__(self, run_idx, run_dir):
self.run_idx = run_idx
self.nrg = None
self._log.debug('Parse [%s]...', 'Run', run_dir)
# Energy stats
self.little_nrg = 0
self.total_nrg = 0
self.big_nrg = 0
nrg_file = run_dir + '/energy.json'
if os.path.isfile(nrg_file):
self.nrg = Energy(nrg_file)
self.little_nrg = self.nrg.little
self.total_nrg = self.nrg.total
self.big_nrg = self.nrg.big
################################################################################
# RTApp workload parsing classes
################################################################################
class RTAppTest(Test):
def __init__(self, test_idx, test_dir, res):
super(RTAppTest, self).__init__(test_idx, test_dir, res)
# RTApp specific performance metric
self.slack_pct = []
self.perf_avg = []
self.edp1 = []
self.edp2 = []
self.edp3 = []
self.rtapp_run = {}
def parse_run(self, run_idx, run_dir):
return RTAppRun(run_idx, run_dir)
def collect_performance(self, run):
# Keep track of average performances of each run
self.slack_pct.extend(run.slack_pct)
self.perf_avg.extend(run.perf_avg)
self.edp1.extend(run.edp1)
self.edp2.extend(run.edp2)
self.edp3.extend(run.edp3)
# Keep track of performance stats for each run
self.rtapp_run[run.run_idx] = {
'slack_pct' : Stats(run.slack_pct).get(),
'perf_avg' : Stats(run.perf_avg).get(),
'edp1' : Stats(run.edp1).get(),
'edp2' : Stats(run.edp2).get(),
'edp3' : Stats(run.edp3).get(),
}
def performance(self):
# Dump per run rtapp stats
prf_file = os.path.join(self.test_dir, 'performance.json')
with open(prf_file, 'w') as ofile:
json.dump(self.rtapp_run, ofile, indent=4, sort_keys=True)
# Return oveall stats
return {
'slack_pct' : Stats(self.slack_pct).get(),
'perf_avg' : Stats(self.perf_avg).get(),
'edp1' : Stats(self.edp1).get(),
'edp2' : Stats(self.edp2).get(),
'edp3' : Stats(self.edp3).get(),
}
class RTAppRun(Run):
def __init__(self, run_idx, run_dir):
# Call base class to parse energy data
super(RTAppRun, self).__init__(run_idx, run_dir)
# RTApp specific performance stats
self.slack_pct = []
self.perf_avg = []
self.edp1 = []
self.edp2 = []
self.edp3 = []
rta = {}
# Load run's performance of each task
for task_idx in sorted(os.listdir(run_dir)):
if not fnm.fnmatch(task_idx, 'rt-app-*.log'):
continue
# Parse run's performance results
prf_file = run_dir + '/' + task_idx
task = RTAppPerf(prf_file, self.nrg)
# Keep track of average performances of each task
self.slack_pct.append(task.prf['slack_pct'])
self.perf_avg.append(task.prf['perf_avg'])
self.edp1.append(task.prf['edp1'])
self.edp2.append(task.prf['edp2'])
self.edp3.append(task.prf['edp3'])
# Keep track of performance stats for each task
rta[task.name] = task.prf
# Dump per task rtapp stats
prf_file = os.path.join(run_dir, 'performance.json')
with open(prf_file, 'w') as ofile:
json.dump(rta, ofile, indent=4, sort_keys=True)
class RTAppPerf(object):
def __init__(self, perf_file, nrg):
# Set of exposed attibutes
self.prf = {
'perf_avg' : 0,
'perf_std' : 0,
'run_sum' : 0,
'slack_sum' : 0,
'slack_pct' : 0,
'edp1' : 0,
'edp2' : 0,
'edp3' : 0
}
self._log.debug('Parse [%s]...', perf_file)
# Load performance data for each RT-App task
self.name = perf_file.split('-')[-2]
self.data = np.loadtxt(perf_file, comments='#', unpack=False)
# Max Slack (i.e. configured/expected slack): period - run
max_slack = np.subtract(
self.data[:,RTAPP_COL_C_PERIOD], self.data[:,RTAPP_COL_C_RUN])
# Performance Index: 100 * slack / max_slack
perf = np.divide(self.data[:,RTAPP_COL_SLACK], max_slack)
perf = np.multiply(perf, 100)
self.prf['perf_avg'] = np.mean(perf)
self.prf['perf_std'] = np.std(perf)
self._log.debug('perf [%s]: %6.2f,%6.2f',
self.name, self.prf['perf_avg'],
self.prf['perf_std'])
# Negative slacks
nslacks = self.data[:,RTAPP_COL_SLACK]
nslacks = nslacks[nslacks < 0]
self._log.debug('Negative slacks: %s', nslacks)
self.prf['slack_sum'] = -nslacks.sum()
self._log.debug('Negative slack [%s] sum: %6.2f',
self.name, self.prf['slack_sum'])
# Slack over run-time
self.prf['run_sum'] = np.sum(self.data[:,RTAPP_COL_RUN])
self.prf['slack_pct'] = 100 * self.prf['slack_sum'] / self.prf['run_sum']
self._log.debug('SlackPct [%s]: %6.2f %%', self.name, self.slack_pct)
if nrg is None:
return
# Computing EDP
self.prf['edp1'] = nrg.total * math.pow(self.prf['run_sum'], 1)
self._log.debug('EDP1 [%s]: {%6.2f}', self.name, self.prf['edp1'])
self.prf['edp2'] = nrg.total * math.pow(self.prf['run_sum'], 2)
self._log.debug('EDP2 [%s]: %6.2f', self.name, self.prf['edp2'])
self.prf['edp3'] = nrg.total * math.pow(self.prf['run_sum'], 3)
self._log.debug('EDP3 [%s]: %6.2f', self.name, self.prf['edp3'])
# Columns of the per-task rt-app log file
RTAPP_COL_IDX = 0
RTAPP_COL_PERF = 1
RTAPP_COL_RUN = 2
RTAPP_COL_PERIOD = 3
RTAPP_COL_START = 4
RTAPP_COL_END = 5
RTAPP_COL_REL_ST = 6
RTAPP_COL_SLACK = 7
RTAPP_COL_C_RUN = 8
RTAPP_COL_C_PERIOD = 9
RTAPP_COL_WU_LAT = 10
################################################################################
# Generic workload performance parsing class
################################################################################
class DefaultTest(Test):
def __init__(self, test_idx, test_dir, res):
super(DefaultTest, self).__init__(test_idx, test_dir, res)
# Default performance metric
self.ctime_avg = []
self.perf_avg = []
self.edp1 = []
self.edp2 = []
self.edp3 = []
def parse_run(self, run_idx, run_dir):
return DefaultRun(run_idx, run_dir)
def collect_performance(self, run):
# Keep track of average performances of each run
self.ctime_avg.append(run.ctime_avg)
self.perf_avg.append(run.perf_avg)
self.edp1.append(run.edp1)
self.edp2.append(run.edp2)
self.edp3.append(run.edp3)
def performance(self):
return {
'ctime_avg' : Stats(self.ctime_avg).get(),
'perf_avg' : Stats(self.perf_avg).get(),
'edp1' : Stats(self.edp1).get(),
'edp2' : Stats(self.edp2).get(),
'edp3' : Stats(self.edp3).get(),
}
class DefaultRun(Run):
def __init__(self, run_idx, run_dir):
# Call base class to parse energy data
super(DefaultRun, self).__init__(run_idx, run_dir)
# Default specific performance stats
self.ctime_avg = 0
self.perf_avg = 0
self.edp1 = 0
self.edp2 = 0
self.edp3 = 0
# Load default performance.json
prf_file = os.path.join(run_dir, 'performance.json')
if not os.path.isfile(prf_file):
self._log.warning('No performance.json found in %s',
run_dir)
return
# Load performance report from JSON
with open(prf_file, 'r') as infile:
prf = json.load(infile)
| |
<reponame>DailyDreaming/toil-rnaseq
from __future__ import print_function
import errno
import os
import textwrap
from collections import OrderedDict, defaultdict
from urlparse import urlparse
from toil_rnaseq.utils.expando import Expando
schemes = ('file', 'http', 's3', 'ftp', 'gdc')
_iter_types = (list, tuple, set, frozenset)
def parse_samples(path_to_manifest=None):
"""
Parses samples from manifest
:param str path_to_manifest: Path to manifest file containing sample information
:return: Samples and their attributes as defined in the manifest
:rtype: list(list(str, str, str, str))
"""
samples = []
with open(path_to_manifest, 'r') as f:
for line in f.readlines():
if not line.isspace() and not line.startswith('#'):
sample = line.strip().split('\t')
# Enforce number of columns
require(len(sample) == 4, 'Bad manifest format! '
'Expected 4 tab separated columns, User: "{}"'.format(sample))
# Unpack sample information
file_type, paired, uuid, url = sample
# Check file_type
file_types = ['tar', 'fq', 'bam']
require(file_type in file_types, '1st column is not valid {}. User: "{}"'.format(file_types, file_type))
# Check paired/unpaired
pair_types = ['paired', 'single']
require(paired in pair_types, '2nd column is not valid {}. User: "{}"'.format(pair_types, paired))
# If paired fastq data, ensure correct number of URLs
if file_type == 'fq' and paired == 'paired':
require(len(url.split(',')) % 2 == 0, 'Paired fastqs require an even number of URLs separated'
' by a comma: User: "{}"'.format(url))
samples.append(sample)
return samples
def generate_config():
return textwrap.dedent("""
##############################################################################################################
# TOIL RNA-SEQ WORKFLOW CONFIGURATION FILE #
##############################################################################################################
# This configuration file is formatted in YAML. Simply write the value (at least one space) after the colon.
# Edit the values in this configuration file and then rerun the pipeline: "toil-rnaseq run"
# Just Kallisto or STAR/RSEM can be run by supplying only the inputs to those tools
#
# URLs can take the form: http://, ftp://, file://, s3://, gdc://
# Local inputs follow the URL convention: file:///full/path/to/input
# S3 URLs follow the convention: s3://bucket/directory/file.txt
#
# Comments (beginning with #) do not need to be removed. Optional parameters left blank are treated as false.
##############################################################################################################
# REQUIRED OPTIONS #
##############################################################################################################
# Required: Output location of sample. Can be full path to a directory or an s3:// URL
# WARNING: S3 buckets must exist prior to upload, or it will fail.
output-dir:
##############################################################################################################
# WORKFLOW INPUTS (Alignment and Quantification) #
##############################################################################################################
# URL {scheme} to index tarball used by STAR
star-index: http://courtyard.gi.ucsc.edu/~jvivian/toil-rnaseq-inputs/starIndex_hg38_no_alt.tar.gz
# URL {scheme} to reference tarball used by RSEM
# Running RSEM requires a star-index as a well as an rsem-ref
rsem-ref: http://courtyard.gi.ucsc.edu/~jvivian/toil-rnaseq-inputs/rsem_ref_hg38_no_alt.tar.gz
# URL {scheme} to kallisto index file.
kallisto-index: http://courtyard.gi.ucsc.edu/~jvivian/toil-rnaseq-inputs/kallisto_hg38.idx
# URL {scheme} to hera index
hera-index: http://courtyard.gi.ucsc.edu/~jvivian/toil-rnaseq-inputs/hera-index.tar.gz
# Maximum file size of input sample (for resource allocation during initial download)
max-sample-size: 20G
##############################################################################################################
# WORKFLOW OPTIONS (Quality Control) #
##############################################################################################################
# If true, will preprocess samples with cutadapt using adapter sequences
cutadapt: true
# Adapter sequence to trim when running CutAdapt. Defaults set for Illumina
fwd-3pr-adapter: AGATCGGAAGAG
# Adapter sequence to trim (for reverse strand) when running CutAdapt. Defaults set for Illumina
rev-3pr-adapter: AGATCGGAAGAG
# If true, will run FastQC and include QC in sample output
fastqc: true
# If true, will run UMEND BamQC and include statistics about Uniquely Mapped Exonic Non-Duplicate (UMEND) reads
# If bamqc and save-bam are enabled, a bam with duplicates marked (output of BAMQC) is saved
bamqc:
##############################################################################################################
# CREDENTIAL OPTIONS (for downloading samples from secure locations) #
##############################################################################################################
# Optional: Provide a full path to a 32-byte key used for SSE-C Encryption in Amazon
ssec:
# Optional: Provide a full path to the token.txt used to download from the GDC
gdc-token:
##############################################################################################################
# ADDITIONAL FILE OUTPUT OPTIONS #
##############################################################################################################
# Optional: If true, saves the wiggle file (.bg extension) output by STAR
# WARNING: Requires STAR sorting, which has memory leak issues that can crash the workflow
wiggle:
# Optional: If true, saves the aligned BAM (by coordinate) produced by STAR
# You must also specify an ssec key if you want to upload to the s3-output-dir
# as read data is assumed to be controlled access
save-bam:
##############################################################################################################
# DEVELOPER OPTIONS #
##############################################################################################################
# Optional: If true, uses resource requirements appropriate for continuous integration
ci-test:
""".format(scheme=[x + '://' for x in schemes])[1:])
def user_input_config(config_path):
"""
User input of workflow configuration file
:param str config_path: Path to configuration file
:return: Configuration file path or None if user skips
:rtype: str
"""
print('\n\t\t\tUser Input of Toil-rnaseq Configuration File\n')
start = raw_input('Type Y/y and hit enter to continue: ').lower()
if start != 'y':
return None
print('User will see comments for a configuation option followed by "<OPTION>: [Default Value]"')
print('\tN/n to skip\n\ttrue/false for boolean statements\n\tq/quit to stop\n\tEnter key to submit option\n')
config = OrderedDict()
comments = defaultdict(list)
quit_flag = False
config_template = generate_config().split('\n')
for line in config_template:
if not line.startswith('#') and line:
option, default = line.split(': ')
# Fetch comments for current option
index = config_template.index(line) - 1
while True:
comments[option].insert(0, config_template[index])
index -= 1
if not config_template[index]:
break
if quit_flag:
config[option] = default
continue
print('\n'.join(comments[option]) + '\n\n')
# Show option and get user input
user_input = None
while not user_input:
user_input = raw_input('\n{}: [{}]\n\tUser Input: '.format(option, default)).lower()
if user_input == 'q' or user_input == 'quit':
quit_flag = True
config[option] = default
continue
elif user_input == 'n':
config[option] = default
continue
else:
config[option] = user_input
print('Writing out configuration file to: {}'.format(config_path))
with open(config_path, 'w') as f:
for option in config:
f.write('\n'.join(comments[option]))
f.write('\n{}: {}\n\n'.format(option, config[option]))
return config_path
def generate_manifest():
return textwrap.dedent("""
##############################################################################################################
# TOIL RNA-SEQ WORKFLOW MANIFEST FILE #
##############################################################################################################
# Edit this manifest to include information pertaining to each sample to be run.
# There are 4 tab-separated columns: filetype, paired/unpaired, UUID, URL(s) to sample
#
# filetype Filetype of the sample. Options: "tar", "fq", or "bam" for tarball, fastq/fastq.gz, or BAM
# paired Indicates whether the data is paired or single-ended. Options: "paired" or "single"
# UUID This should be a unique identifier for the sample to be processed
# URL A URL starting with {scheme} that points to the sample
#
# If sample is being submitted as a fastq or several fastqs, provide URLs separated by a comma.
# If providing paired fastqs, alternate the fastqs so every R1 is paired with its R2 as the next URL.
# Samples must have the same extension - do not mix and match gzip and non-gzipped sample pairs.
#
# Samples consisting of tarballs with fastq files inside must follow the file name convention of
# ending in an R1/R2 or _1/_2 followed by one of the 4 extensions: .fastq.gz, .fastq, .fq.gz, .fq
#
# BAMs are accepted, but must have been aligned from paired reads NOT single-end reads.
#
# GDC URLs may only point to individual BAM files. No other format is accepted.
#
# Examples of several combinations are provided below. Lines beginning with # are ignored.
#
# tar paired UUID_1 file:///path/to/sample.tar
# fq paired UUID_2 file:///path/to/R1.fq.gz,file:///path/to/R2.fq.gz
# tar single UUID_3 http://sample-depot.com/single-end-sample.tar
# tar paired UUID_4 s3://my-bucket-name/directory/paired-sample.tar.gz
# fq single UUID_5 s3://my-bucket-name/directory/single-end-file.fq
# bam paired UUID_6 gdc://1a5f5e03-4219-4704-8aaf-f132f23f26c7
#
# Place your samples below, one per line.
""".format(scheme=[x + '://' for x in schemes])[1:])
def user_input_manifest(manifest_path):
"""
User input of workflow manifest file
:param str manifest_path: Path to write out manifest
:return: Path to manifest or None if user skips
:rtype: str
"""
print('\n\t\t\tUser Input of Toil-rnaseq Manifest')
start = raw_input('Type Y/y and hit enter to continue: ').lower()
if start != 'y':
return None
print('\n'.join(generate_manifest().split('\n')[:-1])) # Don't print last line of manifest
print('\n\nFollow the prompts to enter sample information, based on the information above.\n')
samples = []
while True:
filetype, paired, uuid = None, None, None
url = 'bad-url'
while filetype not in ['tar', 'fq', 'bam']:
filetype = raw_input('Enter the filetype of the sample: ')
while paired not in ['paired', 'single']:
paired = raw_input('Enter whether sample is paired or single-end: ')
uuid = raw_input('Enter unique name (or UUID) of sample: | |
#!/usr/bin/env python
#
# module_tests.py: testing modules / external sources.
#
# Subversion is a tool for revision control.
# See http://subversion.apache.org for more information.
#
# ====================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
######################################################################
# General modules
import sys
import os
import re
import shutil
# Our testing module
import svntest
# (abbreviation)
Skip = svntest.testcase.Skip_deco
SkipUnless = svntest.testcase.SkipUnless_deco
XFail = svntest.testcase.XFail_deco
Issues = svntest.testcase.Issues_deco
Issue = svntest.testcase.Issue_deco
Wimp = svntest.testcase.Wimp_deco
Item = svntest.wc.StateItem
######################################################################
# Tests
#
# Each test must return on success or raise on failure.
#----------------------------------------------------------------------
### todo: it's inefficient to keep calling externals_test_setup() for
### every test. It's slow. But it's very safe -- we're guaranteed to
### have a clean repository, built from the latest Subversion, with
### the svn:externals properties preset in a known way. Right now I
### can't think of any other way to achieve that guarantee, so the
### result is that each individual test is slow.
def externals_test_setup(sbox):
"""Set up a repository in which some directories have the externals property,
and set up another repository, referred to by some of those externals.
Both repositories contain greek trees with five revisions worth of
random changes, then in the sixth revision the first repository --
and only the first -- has some externals properties set. ### Later,
test putting externals on the second repository. ###
The arrangement of the externals in the first repository is:
/A/B/ ==> ^/A/D/gamma gamma
/A/C/ ==> exdir_G <scheme>:///<other_repos>/A/D/G
../../../<other_repos_basename>/A/D/H@1 exdir_H
/A/D/ ==> ^/../<other_repos_basename>/A exdir_A
//<other_repos>/A/D/G/ exdir_A/G/
exdir_A/H -r 1 <scheme>:///<other_repos>/A/D/H
/<some_paths>/A/B x/y/z/blah
A dictionary is returned keyed by the directory created by the
external whose value is the URL of the external.
"""
# The test itself will create a working copy
sbox.build(create_wc = False)
svntest.main.safe_rmtree(sbox.wc_dir)
wc_init_dir = sbox.add_wc_path('init') # just for setting up props
repo_dir = sbox.repo_dir
repo_url = sbox.repo_url
other_repo_dir, other_repo_url = sbox.add_repo_path('other')
other_repo_basename = os.path.basename(other_repo_dir)
# Get a scheme relative URL to the other repository.
scheme_relative_other_repo_url = other_repo_url[other_repo_url.find(':')+1:]
# Get a server root relative URL to the other repository by trimming
# off the first three /'s.
server_relative_other_repo_url = other_repo_url
for i in range(3):
j = server_relative_other_repo_url.find('/') + 1
server_relative_other_repo_url = server_relative_other_repo_url[j:]
server_relative_other_repo_url = '/' + server_relative_other_repo_url
# These files will get changed in revisions 2 through 5.
mu_path = os.path.join(wc_init_dir, "A/mu")
pi_path = os.path.join(wc_init_dir, "A/D/G/pi")
lambda_path = os.path.join(wc_init_dir, "A/B/lambda")
omega_path = os.path.join(wc_init_dir, "A/D/H/omega")
# These are the directories on which `svn:externals' will be set, in
# revision 6 on the first repo.
B_path = os.path.join(wc_init_dir, "A/B")
C_path = os.path.join(wc_init_dir, "A/C")
D_path = os.path.join(wc_init_dir, "A/D")
# Create a working copy.
svntest.actions.run_and_verify_svn(None, None, [],
'checkout',
repo_url, wc_init_dir)
# Make revisions 2 through 5, but don't bother with pre- and
# post-commit status checks.
svntest.main.file_append(mu_path, "Added to mu in revision 2.\n")
svntest.actions.run_and_verify_svn(None, None, [],
'ci', '-m', 'log msg',
'--quiet', wc_init_dir)
svntest.main.file_append(pi_path, "Added to pi in revision 3.\n")
svntest.actions.run_and_verify_svn(None, None, [],
'ci', '-m', 'log msg',
'--quiet', wc_init_dir)
svntest.main.file_append(lambda_path, "Added to lambda in revision 4.\n")
svntest.actions.run_and_verify_svn(None, None, [],
'ci', '-m', 'log msg',
'--quiet', wc_init_dir)
svntest.main.file_append(omega_path, "Added to omega in revision 5.\n")
svntest.actions.run_and_verify_svn(None, None, [],
'ci', '-m', 'log msg',
'--quiet', wc_init_dir)
# Get the whole working copy to revision 5.
expected_output = svntest.wc.State(wc_init_dir, {
})
svntest.actions.run_and_verify_update(wc_init_dir,
expected_output, None, None)
# Now copy the initial repository to create the "other" repository,
# the one to which the first repository's `svn:externals' properties
# will refer. After this, both repositories have five revisions
# of random stuff, with no svn:externals props set yet.
svntest.main.copy_repos(repo_dir, other_repo_dir, 5)
# This is the returned dictionary.
external_url_for = { }
external_url_for["A/B/gamma"] = "^/A/D/gamma"
external_url_for["A/C/exdir_G"] = other_repo_url + "/A/D/G"
external_url_for["A/C/exdir_H"] = "../../../" + \
other_repo_basename + \
"/A/D/H@1"
# Set up the externals properties on A/B/, A/C/ and A/D/.
externals_desc = \
external_url_for["A/B/gamma"] + " gamma\n"
change_external(B_path, externals_desc, commit=False)
externals_desc = \
"exdir_G " + external_url_for["A/C/exdir_G"] + "\n" + \
external_url_for["A/C/exdir_H"] + " exdir_H\n"
change_external(C_path, externals_desc, commit=False)
external_url_for["A/D/exdir_A"] = "^/../" + other_repo_basename + "/A"
external_url_for["A/D/exdir_A/G/"] = scheme_relative_other_repo_url + \
"/A/D/G/"
external_url_for["A/D/exdir_A/H"] = other_repo_url + "/A/D/H"
external_url_for["A/D/x/y/z/blah"] = server_relative_other_repo_url + "/A/B"
externals_desc = \
external_url_for["A/D/exdir_A"] + " exdir_A" + \
"\n" + \
external_url_for["A/D/exdir_A/G/"] + " exdir_A/G/" + \
"\n" + \
"exdir_A/H -r 1 " + external_url_for["A/D/exdir_A/H"] + \
"\n" + \
external_url_for["A/D/x/y/z/blah"] + " x/y/z/blah" + \
"\n"
change_external(D_path, externals_desc, commit=False)
# Commit the property changes.
expected_output = svntest.wc.State(wc_init_dir, {
'A/B' : Item(verb='Sending'),
'A/C' : Item(verb='Sending'),
'A/D' : Item(verb='Sending'),
})
expected_status = svntest.actions.get_virginal_state(wc_init_dir, 5)
expected_status.tweak('A/B', 'A/C', 'A/D', wc_rev=6, status=' ')
svntest.actions.run_and_verify_commit(wc_init_dir,
expected_output,
expected_status,
None, wc_init_dir)
return external_url_for
def change_external(path, new_val, commit=True):
"""Change the value of the externals property on PATH to NEW_VAL,
and commit the change unless COMMIT is False."""
svntest.actions.set_prop('svn:externals', new_val, path)
if commit:
svntest.actions.run_and_verify_svn(None, None, [], 'ci',
'-m', 'log msg', '--quiet', path)
def change_external_expect_error(path, new_val, expected_err):
"""Try to change the value of the externals property on PATH to NEW_VAL,
but expect to get an error message that matches EXPECTED_ERR."""
svntest.actions.set_prop('svn:externals', new_val, path,
expected_re_string=expected_err)
def probe_paths_exist(paths):
""" Probe each one of PATHS to see if it exists, otherwise throw a
Failure exception. """
for path in paths:
if not os.path.exists(path):
raise svntest.Failure("Probing for " + path + " failed.")
def probe_paths_missing(paths):
""" Probe each one of PATHS to see if does not exist, otherwise throw a
Failure exception. """
for path in paths:
if os.path.exists(path):
raise svntest.Failure(path + " unexpectedly still exists.")
#----------------------------------------------------------------------
### todo: It would be great if everything used the new wc.py system to
### check output/status. In fact, it would be great to do more output
### and status checking period! But must first see how well the
### output checkers deal with multiple summary lines. With external
### modules, you can get the first "Updated to revision X" line, and
### then there will be more "Updated to..." and "Checked out..." lines
### following it, one line for each new or changed external.
#----------------------------------------------------------------------
def checkout_with_externals(sbox):
"test checkouts with externals"
externals_test_setup(sbox)
wc_dir = sbox.wc_dir
repo_url = sbox.repo_url
# Create a working copy.
svntest.actions.run_and_verify_svn(None, None, [],
'checkout',
repo_url, wc_dir)
# Probe the working copy a bit, see if it's as expected.
expected_existing_paths = [
sbox.ospath('A/B/gamma'),
sbox.ospath('A/C/exdir_G'),
sbox.ospath('A/C/exdir_G/pi'),
sbox.ospath('A/C/exdir_H'),
sbox.ospath('A/C/exdir_H/omega'),
sbox.ospath('A/D/x'),
sbox.ospath('A/D/x/y'),
sbox.ospath('A/D/x/y/z'),
sbox.ospath('A/D/x/y/z/blah'),
sbox.ospath('A/D/x/y/z/blah/E/alpha'),
sbox.ospath('A/D/x/y/z/blah/E/beta'),
]
probe_paths_exist(expected_existing_paths)
# Pick a file at random, make sure it has the expected contents.
for path, contents in ((sbox.ospath('A/C/exdir_H/omega'),
"This is the file 'omega'.\n"),
(sbox.ospath('A/B/gamma'),
"This is the file 'gamma'.\n")):
if open(path).read() != contents:
raise svntest.Failure("Unexpected contents for rev 1 of " + path)
#----------------------------------------------------------------------
def update_receive_new_external(sbox):
"update to receive a new external module"
external_url_for = externals_test_setup(sbox)
wc_dir = sbox.wc_dir
other_wc_dir = sbox.add_wc_path('other')
repo_url = sbox.repo_url
other_repo_url = repo_url + ".other"
# Checkout two working copies.
svntest.actions.run_and_verify_svn(None, None, [],
'checkout',
repo_url, wc_dir)
svntest.actions.run_and_verify_svn(None, None, [],
'checkout',
repo_url, other_wc_dir)
# Add one new external item to the property on A/D. The new item is
# "exdir_E", deliberately added in the middle not at the end.
new_externals_desc = \
external_url_for["A/D/exdir_A"] + " exdir_A" + \
"\n" + \
external_url_for["A/D/exdir_A/G/"] + " exdir_A/G/" + \
"\n" + \
"exdir_E " + other_repo_url + "/A/B/E" + \
"\n" + \
"exdir_A/H -r 1 " + external_url_for["A/D/exdir_A/H"] + \
"\n" + \
external_url_for["A/D/x/y/z/blah"] + " x/y/z/blah" + \
"\n"
# Set and commit the property
change_external(sbox.ospath('A/D'), new_externals_desc)
# Update the other working copy, see if we get the new item.
expected_output = svntest.wc.State(other_wc_dir, {
'A/D' : Item(status=' U'),
'A/D/exdir_E/beta' : Item(status='A '),
'A/D/exdir_E/alpha' : Item(status='A '),
})
svntest.actions.run_and_verify_update(other_wc_dir,
expected_output, None, None)
probe_paths_exist([os.path.join(other_wc_dir, "A", | |
<reponame>microsoftgraph/msgraph-cli-archived<filename>msgraph/cli/command_modules/identitydirmgt/azext_identitydirmgt/generated/action.py
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=protected-access
import argparse
from collections import defaultdict
from knack.util import CLIError
class AddAddresses(argparse._AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AddAddresses, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string): # pylint: disable=no-self-use
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'city':
d['city'] = v[0]
elif kl == 'country-or-region':
d['country_or_region'] = v[0]
elif kl == 'office-location':
d['office_location'] = v[0]
elif kl == 'postal-code':
d['postal_code'] = v[0]
elif kl == 'state':
d['state'] = v[0]
elif kl == 'street':
d['street'] = v[0]
else:
raise CLIError('Unsupported Key {} is provided for parameter addresses. All possible keys are: city, '
'country-or-region, office-location, postal-code, state, street'.format(k))
return d
class AddOnPremisesProvisioningErrors(argparse._AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AddOnPremisesProvisioningErrors, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string): # pylint: disable=no-self-use
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'category':
d['category'] = v[0]
elif kl == 'occurred-date-time':
d['occurred_date_time'] = v[0]
elif kl == 'property-causing-error':
d['property_causing_error'] = v[0]
elif kl == 'value':
d['value'] = v[0]
else:
raise CLIError('Unsupported Key {} is provided for parameter on_premises_provisioning_errors. All '
'possible keys are: category, occurred-date-time, property-causing-error, value'.format(k))
return d
class AddPhones(argparse._AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AddPhones, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string): # pylint: disable=no-self-use
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'language':
d['language'] = v[0]
elif kl == 'number':
d['number'] = v[0]
elif kl == 'region':
d['region'] = v[0]
elif kl == 'type':
d['type'] = v[0]
else:
raise CLIError('Unsupported Key {} is provided for parameter phones. All possible keys are: language, '
'number, region, type'.format(k))
return d
class AddDirectReports(argparse._AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AddDirectReports, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string): # pylint: disable=no-self-use
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'deleted-date-time':
d['deleted_date_time'] = v[0]
elif kl == 'id':
d['id'] = v[0]
else:
raise CLIError('Unsupported Key {} is provided for parameter direct_reports. All possible keys are: '
'deleted-date-time, id'.format(k))
return d
class AddManager(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
namespace.manager = action
def get_action(self, values, option_string): # pylint: disable=no-self-use
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'deleted-date-time':
d['deleted_date_time'] = v[0]
elif kl == 'id':
d['id'] = v[0]
else:
raise CLIError('Unsupported Key {} is provided for parameter manager. All possible keys are: '
'deleted-date-time, id'.format(k))
return d
class AddContactsOrgcontactMemberOf(argparse._AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AddContactsOrgcontactMemberOf, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string): # pylint: disable=no-self-use
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'deleted-date-time':
d['deleted_date_time'] = v[0]
elif kl == 'id':
d['id'] = v[0]
else:
raise CLIError('Unsupported Key {} is provided for parameter member_of. All possible keys are: '
'deleted-date-time, id'.format(k))
return d
class AddContactsOrgcontactTransitiveMemberOf(argparse._AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AddContactsOrgcontactTransitiveMemberOf, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string): # pylint: disable=no-self-use
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'deleted-date-time':
d['deleted_date_time'] = v[0]
elif kl == 'id':
d['id'] = v[0]
else:
raise CLIError('Unsupported Key {} is provided for parameter transitive_member_of. All possible keys '
'are: deleted-date-time, id'.format(k))
return d
class AddAlternativeSecurityIds(argparse._AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AddAlternativeSecurityIds, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string): # pylint: disable=no-self-use
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'identity-provider':
d['identity_provider'] = v[0]
elif kl == 'key':
d['key'] = v[0]
elif kl == 'type':
d['type'] = v[0]
else:
raise CLIError('Unsupported Key {} is provided for parameter alternative_security_ids. All possible '
'keys are: identity-provider, key, type'.format(k))
return d
class AddDevicesDeviceMemberOf(argparse._AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AddDevicesDeviceMemberOf, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string): # pylint: disable=no-self-use
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'deleted-date-time':
d['deleted_date_time'] = v[0]
elif kl == 'id':
d['id'] = v[0]
else:
raise CLIError('Unsupported Key {} is provided for parameter member_of. All possible keys are: '
'deleted-date-time, id'.format(k))
return d
class AddRegisteredOwners(argparse._AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AddRegisteredOwners, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string): # pylint: disable=no-self-use
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'deleted-date-time':
d['deleted_date_time'] = v[0]
elif kl == 'id':
d['id'] = v[0]
else:
raise CLIError('Unsupported Key {} is provided for parameter registered_owners. All possible keys are: '
'deleted-date-time, id'.format(k))
return d
class AddRegisteredUsers(argparse._AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AddRegisteredUsers, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string): # pylint: disable=no-self-use
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'deleted-date-time':
d['deleted_date_time'] = v[0]
elif kl == 'id':
d['id'] = v[0]
else:
raise CLIError('Unsupported Key {} is provided for parameter registered_users. All possible keys are: '
'deleted-date-time, id'.format(k))
return d
class AddDevicesDeviceTransitiveMemberOf(argparse._AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AddDevicesDeviceTransitiveMemberOf, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string): # pylint: disable=no-self-use
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'deleted-date-time':
d['deleted_date_time'] = v[0]
elif kl == 'id':
d['id'] = v[0]
else:
raise CLIError('Unsupported Key {} is provided for parameter transitive_member_of. All possible keys '
'are: deleted-date-time, id'.format(k))
return d
class AddDevicesDeviceExtensions(argparse._AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AddDevicesDeviceExtensions, self).__call__(parser, namespace, action, | |
# ex:set ts=4 sw=4: <- for vim
#
# EPANET Output File Tool Internal Plugin which reads EPANET 2.00.12 Output File
#
import EPANETOutputFilePlugin
import struct
import gettext
class InternalPlugin(EPANETOutputFilePlugin.EOFTPlugin):
def __init__(self):
# TODO initialise translations?
# since this is an internal plugin, we don't need to do this.
self.parser = None
self.options = None
pass
def Test(self, eof):
#print("InternalPlugin:Test function")
pass
def Init(self, parser):
#print("%s:Init(%s)" % (self.__class__.__name__, parser))
parser.add_option('-a','--all',
action='store_true', dest = 'all', default=False,
help=_('display all output file sections (default)'))
parser.add_option('-s','--silent',
action='store_true', dest = 'silent', default=False,
help=_('don\'t display any output file sections'))
parser.add_option('-p','--prolog',
action='store_true', dest = 'prolog', default=False,
help=_('display prolog section'))
parser.add_option('-n','--prolog_node_csv',
action='store', type='string', dest = 'prolog_node_csv',
metavar = 'PROLOG_NODE_CSV',
help=_('write CSV for nodes from prolog to PROLOG_NODE_CSV'))
parser.add_option('-l','--prolog_link_csv',
action='store', type='string',
dest = 'prolog_link_csv', metavar = 'PROLOG_LINK_CSV',
help=_('write CSV for links from prolog to PROLOG_LINK_CSV'))
parser.add_option('-e','--energy_use',
action='store_true', dest = 'energy_use', default=False,
help=_('display energy use section'))
parser.add_option('-E','--energy_use_csv',
action='store', type='string', dest = 'energy_use_csv',
metavar = 'ENERGY_CSV',
help=_('write CSV from energy use section to ENERGY_CSV'))
parser.add_option('-d','--dynamic_results',
action='store_true', dest = 'dynamic_results', default=False,
help=_('display dynamic results section'))
parser.add_option('-N','--dynamic_node_csv',
action='store', type='string', dest = 'dynamic_node_csv',
metavar = 'DYNAMIC_NODE_CSV',
help=_('write CSV for nodes from dynamic results to DYNAMIC_NODE_CSV'))
parser.add_option('-L','--dynamic_link_csv',
action='store', type='string', dest = 'dynamic_link_csv',
metavar = 'DYNAMIC_LINK_CSV',
help=_('write CSV for links from dynamic results to DYNAMIC_LINK_CSV'))
parser.add_option('-c','--coda', '--epilog',
action='store_true', dest = 'epilog', default=False,
help=_('display file epilog'))
def FileInit(self, eof, options):
#print("InternalPlugin:FileInit(%s, %s)" % (eof, options))
# if user has not specified anything to do, dump everything
self.options = options
if (options.prolog == False
and options.energy_use == False
and options.dynamic_results == False
and options.epilog == False
and options.prolog_node_csv is None
and options.prolog_link_csv is None
and options.energy_use_csv is None
and options.dynamic_node_csv is None
and options.dynamic_link_csv is None):
if options.silent == False: options.all = True
# 'silent' wins over all printing except errors....
if options.silent == True: options.prolog = False
if options.silent == True: options.energy_use = False
if options.silent == True: options.dynamic_results = False
if options.silent == True: options.epilog = False
if options.silent == True: options.verbose = False
if options.verbose:
if options.prolog == True:
print(_("User requested display of file prolog section"))
if options.prolog_node_csv is not None:
print(_("User requested writing of prolog node info as CSV to: %s") % options.prolog_node_csv)
if options.prolog_link_csv is not None:
print(_("User requested writing of prolog link info as CSV to: %s") % options.prolog_link_csv)
if options.energy_use == True:
print(_("User requested display of energy use section"))
if options.energy_use_csv is not None:
print(_("User requested writing of energy use section as CSV to: %s") % options.energy_use_csv)
if options.dynamic_results == True:
print(_("User requested display of file dynamic results section"))
if options.dynamic_node_csv is not None:
print(_("User requested writing of dynamic node info as CSV to: %s") % options.dynamic_node_csv)
if options.dynamic_link_csv is not None:
print(_("User requested writing of dynamic link info as CSV to: %s") % options.dynamic_link_csv)
if options.epilog == True:
print(_("User requested display of file epilog section"))
if options.all == True:
print(_("User requested display of content from all file sections"))
def FileOpen(self, eof, progupdate):
'''File has been opened. No return value.
Args:
eof (EPANETOutputFile): output file loading coordinator
progupdate (None or function):
called as progupdate(% of task done (0-100), text description of current step)
'''
#print("InternalPlugin:FileOpen(%s)" % eof)
# read some info from end first to verify file type
if progupdate is not None: progupdate(5,_('Verifying file type...'))
# read some info from end first
eof.f.seek(-12,2)
eof.Epilog['nPeriods'], = struct.unpack('<i', eof.f.read(4))
eof.Epilog['WarningFlag'], = struct.unpack('<i', eof.f.read(4))
eof.Epilog['magic'], = magicend, = struct.unpack('<i', eof.f.read(4))
eof.f.seek(0)
eof.Prolog['magic'], = magicstart, = struct.unpack('<i', eof.f.read(4))
if magicstart != magicend:
print(_('ERROR: magic number in prolog (%(prologmagic)d) does not match magic number in epilog (%(epilogmagic)d)') % {'prologmagic': magicstart, 'epilogmagic': magicend})
raise Exception(_('ERROR: magic numbers do not match: probably not an EPANET output file'))
if progupdate is not None: progupdate(100,_('Verified file type.'))
def ReadProlog(self, eof, f, d, magicend, progupdate):
'''Read prolog from EPANET output file. No return value.
Args:
f (file): file in correct position to read prolog data
d (dictionary): dictionary provided to store prolog values in
magicend (int): magic value read from epilog which should match one in prolog
progupdate (None or function):
called as progupdate(% of task done (0-100), text description of current step)
'''
if progupdate is not None: progupdate(5,_('Reading prolog info'))
d['magic'], = struct.unpack('<i', f.read(4))
if d['magic'] != magicend:
print(_('ERROR: magic number in prolog (%(prologmagic)d) does not match magic number in epilog (%(epilogmagic)d)') % {'prologmagic': d['magic'], 'epilogmagic': magicend})
raise Exception(_('ERROR: magic numbers do not match: probably not an EPANET output file'))
d['version'], = struct.unpack('<i', f.read(4))
d['nNodes'], = struct.unpack('<i', f.read(4))
d['nResTanks'], = struct.unpack('<i', f.read(4))
d['nJunctions'] = d['nNodes'] - d['nResTanks']
d['nLinks'], = struct.unpack('<i', f.read(4))
d['nPumps'], = struct.unpack('<i', f.read(4))
d['nValves'], = struct.unpack('<i', f.read(4))
d['nPipes'] = d['nLinks'] - d['nPumps'] - d['nValves']
d['WaterQualityOptNum'], = struct.unpack('<i', f.read(4))
d['WaterQualityOption'] = eof.getWaterQualityOptionText(d['WaterQualityOptNum'])
# read source node index and make it 0-based
d['source_node_index'] = struct.unpack('<i', f.read(4))[0] - 1
d['FlowUnitsOptNum'], = struct.unpack('<i', f.read(4))
d['FlowUnitsOption'] = eof.getFlowUnitsOptionText(d['FlowUnitsOptNum'])
d['PressureUnitsOptNum'], = struct.unpack('<i', f.read(4))
d['PressureUnitsOption'] = eof.getPressureUnitsOptionText(d['PressureUnitsOptNum'])
d['TimeStatsOptNum'], = struct.unpack('<i', f.read(4))
d['TimeStatsOption'] = eof.getTimeStatsOption(d['TimeStatsOptNum'])
d['StartTime'], = struct.unpack('<i', f.read(4))
d['ReportTimeStep'], = struct.unpack('<i', f.read(4))
d['SimulationDuration'], = struct.unpack('<i', f.read(4))
d['Title1'] = f.read(80).strip('\0')
d['Title2'] = f.read(80).strip('\0')
d['Title3'] = f.read(80).strip('\0')
d['InputFile'] = f.read(260).strip('\0')
d['ReportFile'] = f.read(260).strip('\0')
d['ChemicalName'] = f.read(32).strip('\0')
d['ChemicalConcentrationUnits'] = f.read(32).strip('\0')
d['NodeID'] = []
d['NodeElev'] = []
d['NodeTankResIndex'] = []
if progupdate is not None: progupdate(20,_('Reading prolog node info'))
if eof.options.verbose:
print(_('Reading Node IDs (%(nNodes)d)...') % {'nNodes': d['nNodes']})
for i in range (0, d['nNodes']):
d['NodeID'].append(f.read(32).strip('\0'))
#print(' Node %d ID: %s' % (i, NodeID[i]))
d['NodeTankResIndex'].append(-1)
d['LinkID'] = []
d['LinkStart'] = []
d['LinkEnd'] = []
d['LinkType'] = []
d['LinkLength'] = []
d['LinkDiam'] = []
if progupdate is not None: progupdate(50,_('Reading prolog link info'))
if eof.options.verbose:
print(_('Reading Link IDs (%(nLinks)d)...') % {'nLinks': d['nLinks']})
for i in range (0, d['nLinks']):
d['LinkID'].append(f.read(32).strip('\0'))
#print(' Link %d ID: %s' % (i, d['LinkID[i]))
for i in range (0, d['nLinks']):
d['LinkStart'].append(struct.unpack('<i', f.read(4))[0]-1)
#print(' LinkStart %d ID: %d (0-based)' % (i, LinkStart[i]))
for i in range (0, d['nLinks']):
d['LinkEnd'].append(struct.unpack('<i', f.read(4))[0]-1)
#print(' LinkEnd %d ID: %d (0-based)' % (i, LinkEnd[i]))
for i in range (0, d['nLinks']):
d['LinkType'].append(struct.unpack('<i', f.read(4))[0])
#print(' LinkType %d ID: %d' % (i, LinkType[i]))
d['TankResIndex'] = []
if eof.options.verbose:
print(_('Reading Tank/Reservoir indexes (%(nResTanks)d)...') % {'nResTanks': d['nResTanks']})
for i in range (0, d['nResTanks']):
# read the index of tank/res and take off 1 to make it zero-based
d['TankResIndex'].append(struct.unpack('<i', f.read(4))[0] - 1)
#print(' Node index (0-based) of tank %d: %d' % (i, d['TankResIndex[i]))
# store index of tank or res in node array
d['NodeTankResIndex'][d['TankResIndex'][i]] = i
d['TankResXSectArea'] = []
if eof.options.verbose:
print(_('Reading Cross Sectional Areas of Tanks/Reservoirs (%(nResTanks)d)...') % {'nResTanks': d['nResTanks']})
nReservoirs = 0
for i in range (0, d['nResTanks']):
val = struct.unpack('<f', f.read(4))[0]
if val == 0.0:
nReservoirs += 1
d['TankResXSectArea'].append(val)
#print(' Cross Sectional Area of tank %d: %d' % (i, TankResXSectArea[i]))
d['nReservoirs'] = nReservoirs
d['nTanks'] = d['nResTanks'] - nReservoirs
for i in range (0, d['nNodes']):
d['NodeElev'].append(struct.unpack('<f', f.read(4))[0])
#print(' Node %d elevation: %s' % (i, d['NodeElev'][i]))
if progupdate is not None: progupdate(80,_('Reading prolog extra info'))
if eof.options.verbose:
print(_('Reading Link lengths (%(nLinks)d)...') % {'nLinks': d['nLinks']})
for i in range (0, d['nLinks']):
d['LinkLength'].append(struct.unpack('<f', f.read(4))[0])
#print(' Link %d length: %s' % (i, LinkLength[i]))
if eof.options.verbose:
print(_('Reading Link diameters (%(nLinks)d)...') % {'nLinks': d['nLinks']})
for i in range (0, d['nLinks']):
d['LinkDiam'].append(struct.unpack('<f', f.read(4))[0])
#print(' Link %d diameter: %s' % (i, LinkDiam[i]))
def PrologRead(self, eof, progupdate):
#print("%s:PrologRead(%s)" % (self.__class__.__name__, eof))
# read prolog (matching magic numbers at start and end already read
# and checked)
self.ReadProlog(eof, eof.f, eof.Prolog, eof.Epilog['magic'], progupdate)
def PrintProlog(self, eof, d):
'''Print EPANET output file prolog. No return value.
Args:
d (dictionary): Prolog dictionary to print
'''
print("")
headingtext = _("Prolog")
print(headingtext)
print('='*len(headingtext))
print(_('Magic number: %d') % d['magic'])
print(_('EPANET Version: %d') % d['version'])
print(_('Number of Nodes: %d') % d['nNodes'])
print(_('Number of Reservoirs (%(nRes)d) + Tanks (%(nTank)d): %(nResTank)d (so %(nJunc)d Junctions)')
% { 'nRes': d['nReservoirs'],
'nTank': d['nTanks'],
'nResTank': d['nResTanks'],
'nJunc': d['nJunctions']})
print(_('Number of Links: %d') % d['nLinks'])
print(_('Number of Pumps: %d') % d['nPumps'])
print(_('Number of Valves: %d') % d['nValves'])
print(_(' (Number of Pipes: %d)') % d['nPipes'])
print(_('Water Quality Option: | |
unzip
run_shell_cmd("gunzip {}.gz".format(out_results[subsample_bed_key]))
# -----------------------------------------
# ANALYSIS 4 - plot this out
# inputs: stable groups
# outputs: heatmaps with histone marks
# -----------------------------------------
logger.info("ANALYSIS: plot out the heatmaps with the ordering in the subsamples")
plot_dir = "{}/plots".format(results_dir)
# plot ATAC and histone marks
if not os.path.isdir(plot_dir):
run_shell_cmd("mkdir -p {}".format(plot_dir))
for subsample_bed_key in subsample_bed_keys:
# plot the ATAC alongside, using summits
ordered_subsample_summits_key = "{}.summits".format(subsample_bed_key)
out_results[ordered_subsample_summits_key] = "{}.summits.bed".format(
out_results[subsample_bed_key].split(".bed")[0])
if not os.path.isfile(out_results[ordered_subsample_summits_key]):
# get atac timeseries files
timeseries_dir = args.outputs["results"]["atac"]["timepoint_region_dir"]
atac_timeseries_files = sorted(
glob.glob("{}/*narrowPeak.gz".format(timeseries_dir)))
# get best summits
get_best_summit(
out_results[subsample_bed_key],
atac_timeseries_files,
out_results[ordered_subsample_summits_key])
# plot ATAC with the summits
if True:
# adjust fake clusters file here to get proper row seps
subsample_mat_key = "{}.mat".format(subsample_bed_key.split(".bed")[0])
plot_atac_ordered_subsample_file = "{}.plot.mat".format(
out_results[subsample_mat_key].split(".mat")[0])
fake_clusters_df = pd.read_table(
out_results[subsample_mat_key])
colnames = list(fake_clusters_df.columns)
colnames[-1] = "cluster"
colnames[0] = "fake_cluster"
fake_clusters_df.columns = colnames
fake_clusters_df.to_csv(
plot_atac_ordered_subsample_file,
sep="\t", index=False)
plot_clusters(
out_results[atac_fake_clusters_key],
plot_atac_ordered_subsample_file,
out_data[atac_stable_mat_key],
plot_dir,
os.path.basename(plot_atac_ordered_subsample_file), # prefix
plot_individual=False)
# plot the histone signal profiles with deeptools
histone_colors = args.inputs["chipseq"][args.cluster]["histones"]["ordered_deeptools_colors"]
histone_r_colors = args.inputs["chipseq"][args.cluster]["histones"]["ordered_r_colors"]
for histone_idx in range(len(histones)):
histone = histones[histone_idx]
histone_color = histone_colors[histone_idx]
histone_r_color = histone_r_colors[histone_idx]
histone_bigwigs = sorted(
glob.glob("{}/{}".format(
args.inputs["chipseq"][args.cluster]["data_dir"],
args.inputs["chipseq"][args.cluster]["histones"][histone]["pooled_bigwig_glob"])))
out_prefix = "{}/{}.{}_overlap.{}".format(
plot_dir,
prefix,
histone,
subsample_bed_key.split(".")[-4])
out_file = "{}.heatmap.profile.pdf".format(out_prefix)
if not os.path.isfile(out_file):
make_deeptools_heatmap(
out_results[subsample_bed_key],
histone_bigwigs,
out_prefix,
sort=False,
referencepoint="center",
color=histone_color)
# and then make own heatmap file in R with matrix output
row_sep_file = "{}/{}.row_seps.txt".format(plot_dir, os.path.basename(plot_atac_ordered_subsample_file))
out_mat_file = "{}.point.mat.gz".format(out_prefix)
out_r_file = "{}.replot.pdf".format(out_file.split(".pdf")[0])
if not os.path.isfile(out_r_file):
replot = (
"plot.profile_heatmaps.R {} {} {} {} {} "
"1,100 101,200 201,300").format(
out_mat_file,
histone,
row_sep_file,
out_r_file,
histone_r_color)
run_shell_cmd(replot)
# -----------------------------------------
# ANALYSIS 5 - bioinformatics
# inputs: BED dirs
# outputs: HOMER/GREAT results
# -----------------------------------------
logger.info("ANALYSIS: run HOMER and GREAT")
bioinformatics_bed_dirs = [mark_bed_dir, state_bed_dir]
for bioinformatics_bed_dir in bioinformatics_bed_dirs:
if not os.path.isdir("{}/homer_HOCOMOCO".format(bioinformatics_bed_dir)):
bed_files = glob.glob("{}/*.bed.gz".format(bioinformatics_bed_dir))
# make a background bed file: stable + this info?
background_bed_file = "{}/{}.atac.background.bed.gz".format(
bioinformatics_bed_dir, prefix)
make_background = (
"zcat {0} {1} | "
"sort -k1,1 -k2,2n | "
"gzip -c > {2}").format(
" ".join(bed_files),
out_data["atac.stable.bed"],
background_bed_file)
run_shell_cmd(make_background)
# run files
for bed_file in bed_files:
run_bioinformatics_on_bed(
bed_file,
background_bed_file,
bioinformatics_bed_dir,
mknown=args.outputs["annotations"]["pwms.renamed.nonredundant.homer"],
mknown_name="HOCOMOCO")
return args
def run_chromatin_states_workflow(args, prefix):
"""aggregate information into chromatin states map
"""
# logging and folder set up
logger = logging.getLogger(__name__)
logger.info("WORKFLOW: aggregate chromatin states")
# assertions
# setup data and results
data_dir = args.outputs["data"]["dir"]
out_data = args.outputs["data"]
results_dirname = "summary"
results_dir = "{}/{}".format(
args.outputs["results"]["epigenome"]["dir"],
results_dirname)
args.outputs["results"]["epigenome"][results_dirname] = {
"dir": results_dir}
run_shell_cmd("mkdir -p {}".format(results_dir))
out_results = args.outputs["results"]["epigenome"][results_dirname]
# out file
state_summary_key = "chrom_states.mat"
state_summary_file = "{}/chrom_states.summary.txt".format(results_dir)
out_results[state_summary_key] = state_summary_file
if os.path.isfile(out_results[state_summary_key]):
return args
# load in relevant matrices with data
# ATAC mat, H3K27ac mat, H3K4me1 mat, H3K27me3 mat, overlaps
_MIN_REGION_NUM = 500
atac_rlog_mat = pd.read_csv(out_data["atac.counts.pooled.rlog.mat"], sep="\t")
H3K27ac_rlog_mat = pd.read_csv(out_data["H3K27ac.counts.pooled.rlog.mat"], sep="\t")
H3K4me1_rlog_mat = pd.read_csv(out_data["H3K4me1.counts.pooled.rlog.mat"], sep="\t")
H3K27me3_rlog_mat = pd.read_csv(out_data["H3K27me3.counts.pooled.rlog.mat"], sep="\t")
# -----------------------------------------
# ANALYSIS - pull in dynamic set
# inputs: BED dirs
# outputs: data mat of chrom states
# -----------------------------------------
atac_dpgp_dir = args.outputs["results"]["atac"]["timeseries"]["dp_gp"]["dir"]
traj_dir = "{}/reproducible/hard/reordered/".format(atac_dpgp_dir)
traj_region_id_files = sorted(glob.glob("{}/*cluster*txt.gz".format(traj_dir)))
# load in overlaps
overlaps_dir = "{}/overlap_histone".format(
args.outputs["results"]["epigenome"]["dynamic"]["dir"])
H3K27ac_overlaps = convert_overlaps_bed_to_id_mappings(
"{}/ggr.atac.ends.counts.pooled.rlog.dynamic.overlap.H3K27ac.tmp.bed.gz".format(
overlaps_dir),
extend_len=args.inputs["params"]["histones"]["H3K27ac"]["overlap_extend_len"])
H3K4me1_overlaps = convert_overlaps_bed_to_id_mappings(
"{}/ggr.atac.ends.counts.pooled.rlog.dynamic.overlap.H3K4me1.tmp.bed.gz".format(
overlaps_dir),
extend_len=args.inputs["params"]["histones"]["H3K4me1"]["overlap_extend_len"])
overlaps_dir = "{}/overlap_histone.inactive".format(
args.outputs["results"]["epigenome"]["dynamic"]["dir"])
H3K27me3_overlaps = convert_overlaps_bed_to_id_mappings(
"{}/ggr.atac.ends.counts.pooled.rlog.dynamic.overlap.H3K27me3.tmp.bed.gz".format(
overlaps_dir),
extend_len=args.inputs["params"]["histones"]["H3K27me3"]["overlap_extend_len"])
dynamic_histones = [
("H3K27ac", H3K27ac_overlaps, H3K27ac_rlog_mat),
("H3K4me1", H3K4me1_overlaps, H3K4me1_rlog_mat),
("H3K27me3", H3K27me3_overlaps, H3K27me3_rlog_mat)]
# set up chrom states/marks dirs
epigenome_dynamic_dir = args.outputs["results"]["epigenome"]["dynamic"]["dir"]
mark_region_id_dir = "{}/clusters/by_mark/ids".format(epigenome_dynamic_dir)
state_region_id_dir = "{}/clusters/by_state/ids".format(epigenome_dynamic_dir)
total_states = 0
full_summary = None # track: TRAJ (15), ATAC (10), H3K27ac (3), H3K4me1 (3), H3K27me3 (3)
trajectories = ["TRAJ.{}".format(val+1) for val in range(len(traj_region_id_files))]
for traj_region_id_file in traj_region_id_files:
# get cluster num and matching chrom mark/state files
cluster_prefix = os.path.basename(traj_region_id_file).split(".")[-3]
cluster_num = int(cluster_prefix.split("_")[-1])
mark_files = glob.glob("{}/*atac-{}.*.txt.gz".format(mark_region_id_dir, cluster_prefix))
state_files = glob.glob("{}/*atac-{}.*.txt.gz".format(state_region_id_dir, cluster_prefix))
print cluster_num
print cluster_prefix
print mark_files,
print state_files
# track regions
traj_regions = pd.read_csv(traj_region_id_file, sep="\t", header=None).iloc[:,0].values
# check chrom states first. if chrom states exist, use those
for state_file in state_files:
print state_file
# get the region ids
state_name = os.path.basename(state_file).split(".")[-3]
state_regions = pd.read_csv(state_file, sep="\t", header=None).iloc[:,0].values
# extract data
state_summary = get_aggregate_chromatin_state_summary(
state_regions, trajectories, cluster_num, atac_rlog_mat, dynamic_histones,
index=state_name)
if full_summary is None:
full_summary = state_summary.copy()
else:
full_summary = pd.concat([full_summary, state_summary], axis=0)
# keep track of whatever is left over
traj_regions = traj_regions[np.isin(traj_regions, state_regions, invert=True)]
print traj_regions.shape
total_states += 1
# if many regions still left (>500?), go to marks and use those.
print "after chrom states, num remaining:", traj_regions.shape[0]
if traj_regions.shape[0] >= _MIN_REGION_NUM:
used_mark_regions = [] # TODO add to here as going through, and then reduce out at end
for mark_file in mark_files:
# load, and only keep if > 500 in traj_regions
mark_regions = pd.read_csv(mark_file, sep="\t", header=None).iloc[:,0].values
mark_not_used_indices = np.where(np.isin(mark_regions, traj_regions))[0]
if mark_not_used_indices.shape[0] < _MIN_REGION_NUM:
continue
# if keeping, extract data
print mark_file
mark_name = os.path.basename(mark_file).split(".")[-3]
mark_not_used_regions = mark_regions[mark_not_used_indices]
mark_summary = get_aggregate_chromatin_state_summary(
mark_not_used_regions, trajectories, cluster_num, atac_rlog_mat, dynamic_histones,
index=mark_name)
if full_summary is None:
full_summary = mark_summary.copy()
else:
full_summary = pd.concat([full_summary, mark_summary], axis=0)
#print mark_not_used_regions.shape
used_mark_regions += list(mark_not_used_regions)
print len(used_mark_regions)
# keep track of whatever is left over
traj_regions = traj_regions[np.isin(traj_regions, used_mark_regions, invert=True)]
# if remainder in traj that are NOT in union (chrom states, marks)
# is greater than 500, include as null state (just ATAC, no chrom marks)
if traj_regions.shape[0] >= _MIN_REGION_NUM:
group_name = "Null"
null_summary = get_aggregate_chromatin_state_summary(
traj_regions, trajectories, cluster_num, atac_rlog_mat, dynamic_histones,
index=group_name)
if full_summary is None:
full_summary = null_summary.copy()
else:
full_summary = pd.concat([full_summary, null_summary], axis=0)
# -----------------------------------------
# ANALYSIS - pull in stable set
# inputs: BED dirs
# outputs: data mat of chrom states
# -----------------------------------------
stable_regions = pd.read_csv(
out_data["atac.counts.pooled.rlog.stable.mat"],
sep="\t", index_col=0).index.values
# load in overlaps
overlaps_dir = "{}/overlap_histone".format(
args.outputs["results"]["epigenome"]["stable"]["dir"])
H3K27ac_overlaps = convert_overlaps_bed_to_id_mappings(
"{}/ggr.atac.ends.counts.pooled.rlog.stable.overlap.H3K27ac.tmp.bed.gz".format(
overlaps_dir),
extend_len=args.inputs["params"]["histones"]["H3K27ac"]["overlap_extend_len"])
H3K4me1_overlaps = convert_overlaps_bed_to_id_mappings(
"{}/ggr.atac.ends.counts.pooled.rlog.stable.overlap.H3K4me1.tmp.bed.gz".format(
overlaps_dir),
extend_len=args.inputs["params"]["histones"]["H3K4me1"]["overlap_extend_len"])
H3K27me3_overlaps = convert_overlaps_bed_to_id_mappings(
"{}/ggr.atac.ends.counts.pooled.rlog.stable.overlap.H3K27me3.tmp.bed.gz".format(
overlaps_dir),
extend_len=args.inputs["params"]["histones"]["H3K27me3"]["overlap_extend_len"])
dynamic_histones = [
("H3K27ac", H3K27ac_overlaps, H3K27ac_rlog_mat),
("H3K4me1", H3K4me1_overlaps, H3K4me1_rlog_mat),
("H3K27me3", H3K27me3_overlaps, H3K27me3_rlog_mat)]
# set up chrom states/marks dirs
epigenome_stable_dir = args.outputs["results"]["epigenome"]["stable"]["dir"]
mark_region_id_dir = "{}/clusters/by_mark/ids".format(epigenome_stable_dir)
state_region_id_dir = "{}/clusters/by_state/ids".format(epigenome_stable_dir)
mark_files = sorted(glob.glob("{}/*.txt.gz".format(mark_region_id_dir)))
state_files = sorted(glob.glob("{}/*.txt.gz".format(state_region_id_dir)))
# look at chrom states
for state_file in state_files:
print state_file
# get the region ids
state_name = os.path.basename(state_file).split(".")[-3]
state_regions = pd.read_csv(state_file, sep="\t", header=None).iloc[:,0].values
# extract data
state_summary = get_aggregate_chromatin_state_summary(
state_regions, trajectories, None, atac_rlog_mat, dynamic_histones,
index=state_name)
if full_summary is None:
full_summary = state_summary.copy()
else:
full_summary = pd.concat([full_summary, state_summary], axis=0)
# keep track of whatever is left over
stable_regions = stable_regions[np.isin(stable_regions, state_regions, invert=True)]
print stable_regions.shape
total_states += 1
# sort and save out
sort_columns = trajectories + ["H3K27ac.max", "H3K4me1.max", "H3K27me3.max"]
full_summary = full_summary.sort_values(sort_columns, ascending=False)
full_summary.to_csv(out_results[state_summary_key], sep="\t")
return args
def _agg_deeptools_mat_data(mat_file, assay, extend_dist, bin_total):
"""summarize agg data
"""
mat = pd.read_csv(mat_file, sep="\t", header=None, comment="@")
mat = mat.iloc[:,6:]
mat = mat.mean(axis=0)
positions = np.arange(-extend_dist, extend_dist, bin_total)
positions = np.tile(positions, 3)
days = np.concatenate([
np.tile(["d0"], bin_total),
np.tile(["d3"], bin_total),
np.tile(["d6"], bin_total)])
agg_mat = pd.DataFrame({
"value": mat,
"position": positions,
"day": days})
agg_mat["assay"] = assay
return agg_mat
def plot_profile_heatmaps_workflow(args, tss_file, plot_prefix):
"""plot histone heatmaps for a specific set of tss regions (ordered
"""
# make a subsample file if necessary
atac_plot_file = "{}.ATAC.heatmap.profile.pdf".format(plot_prefix)
if not os.path.isfile(atac_plot_file):
tss_data = pd.read_csv(tss_file, sep="\t")
if tss_data.shape[0] > 1000:
step_size = int(tss_data.shape[0] / 1000)
keep_indices = np.linspace(0, tss_data.shape[0]-1, num=1000).astype(int)
#keep_indices = np.arange(0, tss_data.shape[0], step=step_size)
print tss_data.shape
tss_data = tss_data.iloc[keep_indices]
print tss_data.shape
subsample_file = "{}.subsample.tmp.bed.gz".format(plot_prefix)
tss_data.to_csv(subsample_file, sep="\t",
header=False, index=False, compression="gzip")
tss_file = subsample_file
# NOTE rowseps won't match if subsampling!!
print "NOTE rowseps not adjusted!!"
# plot ATAC-seq
#if not os.path.isfile("{}.atac.heatmap.pdf".format(plot_prefix)):
if False:
r_cmd = "~/git/ggr-project/R/plot.tss.timeseries_heatmap.R {} {} {}.atac ATAC-seq".format(
"{}.promoter_data.mat.txt.gz".format(plot_prefix),
"{}.row_seps.txt.gz".format(plot_prefix),
plot_prefix)
print r_cmd
os.system(r_cmd)
# plot ATAC seq as profile map
atac_bigwigs = sorted(
glob.glob("{}/{}".format(
args.inputs["atac"][args.cluster]["data_dir"],
args.inputs["atac"][args.cluster]["bigwig_pooled_glob"])))
atac_bigwigs = [atac_bigwigs[0], atac_bigwigs[6], atac_bigwigs[12]]
out_prefix = "{}.ATAC".format(plot_prefix)
plot_file = "{}.heatmap.profile.pdf".format(out_prefix)
if not os.path.isfile(plot_file):
make_deeptools_heatmap(
tss_file,
atac_bigwigs,
out_prefix,
sort=False,
referencepoint="center",
extend_dist=5000, #10000
bin_total=100, # note: do not adjust
color="Blues")
# make profile heatmap in R, with strand oriented TSS
row_sep_file = "{}.row_seps.txt.gz".format(plot_prefix)
out_mat_file = "{}.point.mat.gz".format(out_prefix)
out_r_file = "{}.replot.pdf".format(plot_file.split(".pdf")[0])
if not os.path.isfile(out_r_file):
# TODO - note - strands already adjusted
replot = (
"~/git/ggr-project/R/plot.profile_heatmaps.not_stranded.R {} {} | |
i in range(len(U) - 1):
area += (U[i + 1][0] - U[i][0]) * (U[i + 1][1] + U[i][1])
part = area * 1.0 / G
cuts = []
iL = 0
iU = 0
w = U[0][1] - L[0][1]
a = 0
return cuts
def func_845fd3d315d84ec1a089aba57a7df9de(U, G, L):
for i in range(len(U) - 1):
area += (U[i + 1][0] - U[i][0]) * (U[i + 1][1] + U[i][1])
part = area * 1.0 / G
cuts = []
iL = 0
iU = 0
w = U[0][1] - L[0][1]
a = 0
return a
def func_a7cf64d8b8234c7fbf2989bd2e46082f(U, G, L):
for i in range(len(U) - 1):
area += (U[i + 1][0] - U[i][0]) * (U[i + 1][1] + U[i][1])
part = area * 1.0 / G
cuts = []
iL = 0
iU = 0
w = U[0][1] - L[0][1]
a = 0
return i
def func_2964406a78304ec6956b66fe162c27eb(U, G, L):
for i in range(len(U) - 1):
area += (U[i + 1][0] - U[i][0]) * (U[i + 1][1] + U[i][1])
part = area * 1.0 / G
cuts = []
iL = 0
iU = 0
w = U[0][1] - L[0][1]
a = 0
return area
def func_bcf41b20339145d1b612b61b85e020b0(U, G, L):
for i in range(len(U) - 1):
area += (U[i + 1][0] - U[i][0]) * (U[i + 1][1] + U[i][1])
part = area * 1.0 / G
cuts = []
iL = 0
iU = 0
w = U[0][1] - L[0][1]
a = 0
return iU
def func_a2610eccacb14075b09e71bfe1e5d869(U, G, L):
for i in range(len(U) - 1):
area += (U[i + 1][0] - U[i][0]) * (U[i + 1][1] + U[i][1])
part = area * 1.0 / G
cuts = []
iL = 0
iU = 0
w = U[0][1] - L[0][1]
a = 0
return iL
def func_214216e0ace5488d9dc1402935c61518(U, G, L):
for i in range(len(U) - 1):
area += (U[i + 1][0] - U[i][0]) * (U[i + 1][1] + U[i][1])
part = area * 1.0 / G
cuts = []
iL = 0
iU = 0
w = U[0][1] - L[0][1]
a = 0
return part
def func_653177098d4c4b6baffd02c2a23867d9(U, G, L):
for i in range(len(U) - 1):
area += (U[i + 1][0] - U[i][0]) * (U[i + 1][1] + U[i][1])
part = area * 1.0 / G
cuts = []
iL = 0
iU = 0
w = U[0][1] - L[0][1]
a = 0
return w
def func_d2a78c9c121b436fa848672b75832eac(U, area, G, L):
part = area * 1.0 / G
cuts = []
iL = 0
iU = 0
w = U[0][1] - L[0][1]
a = 0
x = 0
return iL
def func_503e8f0c3c60418fbba7855bc7038332(U, area, G, L):
part = area * 1.0 / G
cuts = []
iL = 0
iU = 0
w = U[0][1] - L[0][1]
a = 0
x = 0
return w
def func_dca0bf793a8645e2955e229d4af7faa1(U, area, G, L):
part = area * 1.0 / G
cuts = []
iL = 0
iU = 0
w = U[0][1] - L[0][1]
a = 0
x = 0
return cuts
def func_a104e87559f845a0917034644287c9a4(U, area, G, L):
part = area * 1.0 / G
cuts = []
iL = 0
iU = 0
w = U[0][1] - L[0][1]
a = 0
x = 0
return a
def func_7d8d253922204f658368f018eecbf0ca(U, area, G, L):
part = area * 1.0 / G
cuts = []
iL = 0
iU = 0
w = U[0][1] - L[0][1]
a = 0
x = 0
return part
def func_df087a8908644067a3962322184711c7(U, area, G, L):
part = area * 1.0 / G
cuts = []
iL = 0
iU = 0
w = U[0][1] - L[0][1]
a = 0
x = 0
return x
def func_515a46e864dd4feeb19f09ab82f28dc5(U, area, G, L):
part = area * 1.0 / G
cuts = []
iL = 0
iU = 0
w = U[0][1] - L[0][1]
a = 0
x = 0
return iU
def func_eb856bd9cbb1460c973837505c11b0ff(U, part, L):
cuts = []
iL = 0
iU = 0
w = U[0][1] - L[0][1]
a = 0
x = 0
while True:
sL = (L[iL + 1][1] - L[iL][1]) * 1.0 / (L[iL + 1][0] - L[iL][0])
sU = (U[iU + 1][1] - U[iU][1]) * 1.0 / (U[iU + 1][0] - U[iU][0])
s = sU - sL
nxL = L[iL + 1][0]
nxU = U[iU + 1][0]
nx = min(nxL, nxU)
na = 2 * w * (nx - x) + s * (nx - x) * (nx - x)
if a + na >= part:
dx = (part - a) * 1.0 / (w + math.sqrt(w * w + (part - a) * s))
x += dx
a += 2 * w * dx + s * dx * dx
cuts.append(x)
w += s * dx
a = 0
else:
dx = nx - x
a += 2 * w * dx + s * dx * dx
x = nx
w += s * dx
if nx == nxL:
iL += 1
if nx == nxU:
iU += 1
if iL >= len(L) - 1:
break
if iU >= len(U) - 1:
break
return nx
def func_81369f0341024a4cab4d9d6598d4228d(U, part, L):
cuts = []
iL = 0
iU = 0
w = U[0][1] - L[0][1]
a = 0
x = 0
while True:
sL = (L[iL + 1][1] - L[iL][1]) * 1.0 / (L[iL + 1][0] - L[iL][0])
sU = (U[iU + 1][1] - U[iU][1]) * 1.0 / (U[iU + 1][0] - U[iU][0])
s = sU - sL
nxL = L[iL + 1][0]
nxU = U[iU + 1][0]
nx = min(nxL, nxU)
na = 2 * w * (nx - x) + s * (nx - x) * (nx - x)
if a + na >= part:
dx = (part - a) * 1.0 / (w + math.sqrt(w * w + (part - a) * s))
x += dx
a += 2 * w * dx + s * dx * dx
cuts.append(x)
w += s * dx
a = 0
else:
dx = nx - x
a += 2 * w * dx + s * dx * dx
x = nx
w += s * dx
if nx == nxL:
iL += 1
if nx == nxU:
iU += 1
if iL >= len(L) - 1:
break
if iU >= len(U) - 1:
break
return nxL
def func_9e59887caf6d4b65b8e4b64fd3854397(U, part, L):
cuts = []
iL = 0
iU = 0
w = U[0][1] - L[0][1]
a = 0
x = 0
while True:
sL = (L[iL + 1][1] - L[iL][1]) * 1.0 / (L[iL + 1][0] - L[iL][0])
sU = (U[iU + 1][1] - U[iU][1]) * 1.0 / (U[iU + 1][0] - U[iU][0])
s = sU - sL
nxL = L[iL + 1][0]
nxU = U[iU + 1][0]
nx = min(nxL, nxU)
na = 2 * w * (nx - x) + s * (nx - x) * (nx - x)
if a + na >= part:
dx = (part - a) * 1.0 / (w + math.sqrt(w * w + (part - a) * s))
x += dx
a += 2 * w * dx + s * dx * dx
cuts.append(x)
w += s * dx
a = 0
else:
dx = nx - x
a += 2 * w * dx + s * dx * dx
x = nx
w += s * dx
if nx == nxL:
iL += 1
if nx == nxU:
iU += 1
if iL >= len(L) - 1:
break
if iU >= len(U) - 1:
break
return w
def func_8c820020ff334cfc81d1a4717a86d3b3(U, part, L):
cuts = []
iL = 0
iU = 0
w = U[0][1] - L[0][1]
a = 0
x = 0
while True:
sL = (L[iL + 1][1] - L[iL][1]) * 1.0 / (L[iL + 1][0] - L[iL][0])
sU = (U[iU + 1][1] - U[iU][1]) * 1.0 / (U[iU + 1][0] - U[iU][0])
s = sU - sL
nxL = L[iL + 1][0]
nxU = U[iU + | |
= type_view_form_data(action="copy",
type_type_id="testtype",
type_entity_id="copytype"
)
u = entitydata_edit_url(
"copy", "testcoll", "testtype", entity_id="entity1", view_id="Type_view"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 401)
self.assertEqual(r.reason_phrase, "Unauthorized")
return
def test_post_copy_entity_cancel(self):
self.assertFalse(EntityData.exists(self.testdata, "copytype"))
f = type_view_form_data(action="copy",
type_type_id="testtype",
type_entity_id="copytype",
cancel="Cancel"
)
u = entitydata_edit_url("copy", "testcoll", "testtype", entity_id="entity1", view_id="Type_view")
r = self.client.post(u, f)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
self.assertEqual(r['location'], entitydata_list_type_url("testcoll", "testtype"))
# Check that target record type still does not exist
self.assertFalse(EntityData.exists(self.testdata, "copytype"))
return
def test_post_copy_entity_blank_id(self):
f = type_view_form_data(action="copy",
type_type_id="testtype",
type_entity_id="", orig_id="entity1"
)
u = entitydata_edit_url(
"copy", "testcoll", "testtype", entity_id="entity1", view_id="Type_view"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.reason_phrase, "OK")
self.assertContains(r, "<h3>Problem with entity identifier</h3>")
expect_context = type_view_context_data(
action="copy", type_type_id="testtype",
orig_id="entity1",
record_type="annal:EntityData"
)
self.assertDictionaryMatch(context_bind_fields(r.context), expect_context)
return
def test_post_copy_entity_missing_id(self):
# self.assertFalse(EntityData.exists(self.testdata, "orig_entity_id"))
f = type_view_form_data(action="copy",
type_type_id="testtype",
type_entity_id=None, orig_id="orig_entity_id",
update="updated1"
)
u = entitydata_edit_url(
"copy", "testcoll", "testtype", entity_id="entity1", view_id="Type_view"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
self.assertEqual(
r['location'], entitydata_list_type_url("testcoll", "testtype")
)
# Check that new record type exists with default id
self.assertTrue(EntityData.exists(self.testdata, "entity1"))
self._check_entity_data_values(entity_id="orig_entity_id", update="updated1")
return
def test_post_copy_entity_invalid_id(self):
f = type_view_form_data(action="copy",
type_type_id="testtype",
type_entity_id="!badentity", orig_id="entity1",
)
u = entitydata_edit_url(
"copy", "testcoll", "testtype", entity_id="entity1", view_id="Type_view"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.reason_phrase, "OK")
self.assertContains(r, "<h3>Problem with entity identifier</h3>")
expect_context = type_view_context_data(action="copy",
type_type_id="testtype",
type_entity_id="!badentity", orig_id="entity1",
record_type="annal:EntityData"
)
self.assertDictionaryMatch(context_bind_fields(r.context), expect_context)
return
def test_post_copy_entity_add_view_field(self):
self._create_entity_data("entyity1")
self._check_entity_data_values("entyity1")
f = type_view_form_data(action="copy",
type_type_id="testtype",
type_entity_id="entityaddfield",
update="Updated entity",
add_view_field="View_fields"
)
u = entitydata_edit_url("copy", "testcoll", "testtype", entity_id="entity1", view_id="Type_view")
r = self.client.post(u, f)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
v = entitydata_edit_url("edit", "testcoll", "_view", view_id="View_view", entity_id="Type_view")
w = entitydata_edit_url("edit", "testcoll", "testtype", entity_id="entityaddfield", view_id="Type_view")
c = continuation_url_param(w)
a = "add_field=View_fields"
self.assertIn(v, r['location'])
self.assertIn(c, r['location'])
self.assertIn(a, r['location'])
self._check_entity_data_values("entityaddfield", update="Updated entity")
return
def test_post_copy_entity_add_view_field_no_login(self):
self.client.logout()
f = type_view_form_data(action="copy",
type_type_id="testtype",
type_entity_id="entityaddfield",
update="Updated entity",
add_view_field="View_fields"
)
u = entitydata_edit_url("copy", "testcoll", "testtype", entity_id="entity1", view_id="Type_view")
r = self.client.post(u, f)
self.assertEqual(r.status_code, 401)
self.assertEqual(r.reason_phrase, "Unauthorized")
return
def test_post_copy_entity_edit_view(self):
self._create_entity_data("entyity1")
self._check_entity_data_values("entyity1")
f = type_view_form_data(action="copy",
type_type_id="testtype",
type_entity_id="entityeditview",
update="Updated entity",
open_view="Edit view"
)
u = entitydata_edit_url(
"copy", "testcoll", "testtype", entity_id="entity1", view_id="Type_view"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
v = entitydata_edit_url(
"edit", "testcoll", "_view", view_id="View_view", entity_id="Type_view"
)
w = entitydata_edit_url(
"edit", "testcoll", "testtype", entity_id="entityeditview", view_id="Type_view"
)
c = continuation_url_param(w)
self.assertIn(v, r['location'])
self.assertIn(c, r['location'])
self._check_entity_data_values("entityeditview", update="Updated entity")
return
def test_post_copy_entity_edit_view_no_login(self):
self.client.logout()
f = type_view_form_data(action="copy",
type_type_id="testtype",
type_entity_id="entityeditview",
update="Updated entity",
open_view="Edit view"
)
u = entitydata_edit_url(
"copy", "testcoll", "testtype", entity_id="entity1", view_id="Type_view"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 401)
self.assertEqual(r.reason_phrase, "Unauthorized")
return
def test_post_copy_entity_use_view(self):
self._create_entity_data("entityuseview")
self._check_entity_data_values("entityuseview")
f = default_view_form_data(action="copy",
entity_id="entityuseview1",
update="Updated entity",
use_view="_view/Type_view",
)
u = entitydata_edit_url("copy", "testcoll", "testtype", entity_id="entityuseview", view_id="Default_view")
r = self.client.post(u, f)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
v = entitydata_edit_url("edit", "testcoll", "testtype", entity_id="entityuseview1", view_id="Type_view")
c = continuation_url_param("/testsite/c/testcoll/d/testtype/")
self.assertIn(v, r['location'])
self.assertIn(c, r['location'])
self._check_entity_data_values("entityuseview1", update="Updated entity")
return
def test_post_copy_entity_use_view_no_login(self):
self.client.logout()
f = default_view_form_data(action="copy",
entity_id="entityuseview1",
update="Updated entity",
use_view="_view/Type_view",
)
u = entitydata_edit_url("copy", "testcoll", "testtype", entity_id="entityuseview", view_id="Default_view")
r = self.client.post(u, f)
self.assertEqual(r.status_code, 401)
self.assertEqual(r.reason_phrase, "Unauthorized")
return
def test_post_copy_entity_new_view(self):
self._create_entity_data("entitynewview")
self._check_entity_data_values("entitynewview")
f = default_view_form_data(action="copy",
entity_id="entitynewview1",
update="Updated entity",
new_view="New view"
)
u = entitydata_edit_url("copy", "testcoll", "testtype", entity_id="entitynewview", view_id="Default_view")
r = self.client.post(u, f)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
v = entitydata_edit_url("new", "testcoll", "_view", view_id="View_view")
w = entitydata_edit_url("edit", "testcoll", "testtype", entity_id="entitynewview1", view_id="Default_view")
c = continuation_url_param(w)
self.assertIn(v, r['location'])
self.assertIn(c, r['location'])
self._check_entity_data_values("entitynewview1", update="Updated entity")
return
def test_post_copy_entity_new_view_no_login(self):
self.client.logout()
f = default_view_form_data(action="copy",
entity_id="entitynewview1",
update="Updated entity",
new_view="New view"
)
u = entitydata_edit_url("copy", "testcoll", "testtype", entity_id="entitynewview", view_id="Default_view")
r = self.client.post(u, f)
self.assertEqual(r.status_code, 401)
self.assertEqual(r.reason_phrase, "Unauthorized")
return
def test_post_copy_entity_new_field(self):
self._create_entity_data("entitynewfield")
self._check_entity_data_values("entitynewfield")
f = default_view_form_data(action="copy",
entity_id="entitynewfield1",
update="Updated entity",
new_field="New field"
)
u = entitydata_edit_url("copy", "testcoll", "testtype", entity_id="entitynewfield", view_id="Default_view")
r = self.client.post(u, f)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
v = entitydata_edit_url("new", "testcoll", "_field", view_id="Field_view")
w = entitydata_edit_url("edit", "testcoll", "testtype", entity_id="entitynewfield1", view_id="Default_view")
c = continuation_url_param(w)
self.assertIn(v, r['location'])
self.assertIn(c, r['location'])
self._check_entity_data_values("entitynewfield1", update="Updated entity")
return
def test_post_copy_entity_new_field_no_login(self):
self.client.logout()
f = default_view_form_data(action="copy",
entity_id="entitynewfield1",
update="Updated entity",
new_field="New field"
)
u = entitydata_edit_url("copy", "testcoll", "testtype", entity_id="entitynewfield", view_id="Default_view")
r = self.client.post(u, f)
self.assertEqual(r.status_code, 401)
self.assertEqual(r.reason_phrase, "Unauthorized")
return
def test_post_copy_entity_new_type(self):
self._create_entity_data("entitynewtype")
self._check_entity_data_values("entitynewtype")
f = default_view_form_data(action="copy",
entity_id="entitynewtype1",
update="Updated entity",
new_type="New type"
)
u = entitydata_edit_url("copy", "testcoll", "testtype", entity_id="entitynewtype", view_id="Default_view")
r = self.client.post(u, f)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
v = entitydata_edit_url("new", "testcoll", "_type", view_id="Type_view")
w = entitydata_edit_url("edit", "testcoll", "testtype", entity_id="entitynewtype1", view_id="Default_view")
c = continuation_url_param(w)
self.assertIn(v, r['location'])
self.assertIn(c, r['location'])
self._check_entity_data_values("entitynewtype1", update="Updated entity")
return
def test_post_copy_entity_new_type_no_login(self):
self.client.logout()
f = default_view_form_data(action="copy",
entity_id="entitynewtype1",
update="Updated entity",
new_type="New type"
)
u = entitydata_edit_url("copy", "testcoll", "testtype", entity_id="entitynewtype", view_id="Default_view")
r = self.client.post(u, f)
self.assertEqual(r.status_code, 401)
self.assertEqual(r.reason_phrase, "Unauthorized")
return
# -------- edit type --------
def test_post_edit_entity(self):
self._create_entity_data("entityedit")
e1 = self._check_entity_data_values("entityedit")
f = type_view_form_data(action="edit",
type_type_id="testtype",
type_entity_id="entityedit",
update="Updated entity"
)
u = entitydata_edit_url(
"edit", "testcoll", "testtype", entity_id="entityedit", view_id="Type_view"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
self.assertEqual(r['location'], entitydata_list_type_url("testcoll", "testtype"))
self._check_entity_data_values("entityedit", update="Updated entity")
return
def test_post_edit_entity_blank_label_comment(self):
self._create_entity_data("entityedit")
e1 = self._check_entity_data_values("entityedit")
f = type_view_form_data(action="edit",
type_type_id="testtype",
type_entity_id="entityedit",
update="Updated entity"
)
f['Type_label'] = ""
f['Type_comment'] = ""
u = entitydata_edit_url(
"edit", "testcoll", "testtype", entity_id="entityedit", view_id="Type_view"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
self.assertEqual(
r['location'], entitydata_list_type_url("testcoll", "testtype")
)
self._check_entity_data_values("entityedit", update="Updated entity",
update_dict=
{ 'rdfs:label': "Entityedit"
, 'rdfs:comment': "Entityedit"
}
)
return
def test_post_edit_entity_no_login(self):
self.client.logout()
f = type_view_form_data(action="edit", type_entity_id="edittype")
u = entitydata_edit_url(
"edit", "testcoll", "testtype", entity_id="entity1", view_id="Type_view"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 401)
self.assertEqual(r.reason_phrase, "Unauthorized")
return
def test_post_edit_entity_new_id(self):
# Also tests continuation URL update whejn entity Id is changed
self._create_entity_data("entityeditid1")
e1 = self._check_entity_data_values("entityeditid1")
c1 = entitydata_edit_url("view", "testcoll", "testtype", entity_id="entityeditid1", view_id="Type_view")
# Now post edit form submission with different values and new id
f = type_view_form_data(action="edit",
type_type_id="testtype",
type_entity_id="entityeditid2", orig_id="entityeditid1"
)
f['continuation_url'] = c1
u = entitydata_edit_url("edit", "testcoll", "testtype", entity_id="entityeditid1", view_id="Type_view")
r = self.client.post(u, f)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
c2 = entitydata_edit_url("view", "testcoll", "testtype", entity_id="entityeditid2", view_id="Type_view")
self.assertEqual(r['location'], c2)
# self.assertEqual(r['location'], entitydata_list_type_url("testcoll", "testtype"))
# Check that new record type exists and old does not
self.assertFalse(EntityData.exists(self.testdata, "entityeditid1"))
self._check_entity_data_values("entityeditid2", update="RecordType")
return
def test_post_edit_entity_new_type(self):
# NOTE that the RecordType_viewform does not include a type field, but the new-type
# logic is checked by the test_entitydefaultedit suite
self._create_entity_data("entityedittype")
self._check_entity_data_values("entityedittype")
self.assertFalse(RecordType.exists(self.testcoll, "newtype"))
newtype = RecordType.create(self.testcoll, "newtype", recordtype_create_values("newtype"))
newtypedata = RecordTypeData(self.testcoll, "newtype")
self.assertTrue(RecordType.exists(self.testcoll, "newtype"))
self.assertFalse(RecordTypeData.exists(self.testcoll, "newtype"))
# Now post edit form submission with new type id
f = type_view_form_data(action="edit",
type_entity_id="entityedittype", orig_id="entityedittype",
type_type_id="newtype", orig_type="testtype"
)
u = entitydata_edit_url("edit", "testcoll", "testtype", entity_id="entityedittype", view_id="Default_view")
r = self.client.post(u, f)
# log.info("***********\n"+r.content)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
self.assertEqual(r['location'], entitydata_list_type_url("testcoll", "testtype"))
self.assertFalse(EntityData.exists(self.testdata, "entityedittype"))
self.assertTrue(EntityData.exists(newtypedata, "entityedittype"))
return
def test_post_edit_entity_new_builtin_type(self):
# Test logic for changing type to built-in type (_field)
self._create_entity_data("entityedittype")
self._check_entity_data_values("entityedittype")
self.assertFalse(RecordType.exists(self.testcoll, "newtype"))
# Now post edit form submission with new type id set to "_field"
f = type_view_form_data(action="edit",
type_entity_id="entityedittype", orig_id="entityedittype",
type_type_id="_field", orig_type="testtype"
)
u = entitydata_edit_url("edit", "testcoll", "testtype", entity_id="entityedittype", view_id="Default_view")
r = self.client.post(u, f)
# log.info("***********\n"+r.content)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
self.assertEqual(r['location'], entitydata_list_type_url("testcoll", "_field"))
self.assertFalse(EntityData.exists(self.testdata, "entityedittype"))
self.assertTrue(RecordField.exists(self.testcoll, "entityedittype"))
return
def test_post_edit_entity_cancel(self):
self._create_entity_data("edittype")
self._check_entity_data_values("edittype")
# Post from cancelled edit form
f = type_view_form_data(action="edit",
type_type_id="testtype",
type_entity_id="edittype",
update="Updated entity",
cancel="Cancel"
)
u = entitydata_edit_url(
"edit", "testcoll", "testtype", entity_id="edittype", view_id="Type_view"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
self.assertEqual(r['location'], entitydata_list_type_url("testcoll", "testtype"))
# Check that target record type still does not exist and unchanged
self._check_entity_data_values("edittype")
return
def test_post_edit_entity_blank_id(self):
self._create_entity_data("edittype")
self._check_entity_data_values("edittype")
# Form post with ID missing
f = type_view_form_data(action="edit",
type_type_id="testtype",
type_entity_id="", orig_id="edittype",
update="Updated entity"
)
u = entitydata_edit_url(
"edit", "testcoll", "testtype", entity_id="edittype", view_id="Type_view"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.reason_phrase, "OK")
self.assertContains(r, "<h3>Problem with entity identifier</h3>")
# Test context for re-rendered form
expect_context = type_view_context_data(action="edit",
type_type_id="testtype",
orig_id="edittype",
record_type="test:testtype",
update="Updated entity"
)
self.assertDictionaryMatch(context_bind_fields(r.context), expect_context)
# Check stored entity is unchanged
self._check_entity_data_values("edittype")
return
def test_post_edit_entity_missing_id(self):
self.assertTrue(EntityData.exists(self.testdata, "entity1"))
f = type_view_form_data(action="edit",
type_type_id="testtype",
type_entity_id=None, orig_id="entity1",
update="updated1"
)
u = entitydata_edit_url(
"edit", "testcoll", "testtype", entity_id="entity1", view_id="Type_view"
)
r = self.client.post(u, f)
| |
depressivo grave sem sintomas psicóticos'),
('F32.3', 'Episódio depressivo grave com sintomas psicóticos'),
('F32.8', 'Outros episódios depressivos'),
('F32.9', 'Episódio depressivo não especificado'),
('F33.0', 'Transtorno depressivo recorrente, episódio atual leve'),
('F33.1', 'Transtorno depressivo recorrente, episódio atual moderado'),
('F33.2', 'Transtorno depressivo recorrente, episódio atual grave sem sintomas psicóticos'),
('F33.3', 'Transtorno depressivo recorrente, episódio atual grave com sintomas psicóticos'),
('F33.4', 'Transtorno depressivo recorrente, atualmente em remissão'),
('F33.8', 'Outros transtornos depressivos recorrentes'),
('F33.9', 'Transtorno depressivo recorrente sem especificação'),
('F34.0', 'Ciclotimia'),
('F34.1', 'Distimia'),
('F34.8', 'Outros transtornos do humor [afetivos] persistentes'),
('F34.9', 'Transtorno do humor [afetivo] persistente não especificado'),
('F38.0', 'Outros transtornos do humor [afetivos] isolados'),
('F38.1', 'Outros transtornos do humor [afetivos] recorrentes'),
('F38.8', 'Outros transtornos especificados do humor [afetivos]'),
('F39', ' Transtorno do humor [afetivo] não especificado'),
('F40.0', 'Agorafobia'),
('F40.1', 'Fobias sociais'),
('F40.2', 'Fobias específicas (isoladas)'),
('F40.8', 'Outros transtornos fóbico-ansiosos'),
('F40.9', 'Transtorno fóbico-ansioso não especificado'),
('F41.0', 'Transtorno de pânico [ansiedade paroxística episódica]'),
('F41.1', 'Ansiedade generalizada'),
('F41.2', 'Transtorno misto ansioso e depressivo'),
('F41.3', 'Outros transtornos ansiosos mistos'),
('F41.8', 'Outros transtornos ansiosos especificados'),
('F41.9', 'Transtorno ansioso não especificado'),
('F42.0', 'Com predominância de idéias ou de ruminações obsessivas'),
('F42.1', 'Com predominância de comportamentos compulsivos [rituais obsessivos]'),
('F42.2', 'Forma mista, com idéias obsessivas e comportamentos compulsivos'),
('F42.8', 'Outros transtornos obsessivo-compulsivos'),
('F42.9', 'Transtorno obsessivo-compulsivo não especificado'),
('F43.0', 'Reação aguda ao "stress"'),
('F43.1', 'Estado de "stress" pós-traumático'),
('F43.2', 'Transtornos de adaptação'),
('F43.8', 'Outras reações ao "stress" grave'),
('F43.9', 'Reação não especificada a um "stress" grave'),
('F44.0', 'Amnésia dissociativa'),
('F44.1', 'Fuga dissociativa'),
('F44.2', 'Estupor dissociativo'),
('F44.3', 'Estados de transe e de possessão'),
('F44.4', 'Transtornos dissociativos do movimento'),
('F44.5', 'Convulsões dissociativas'),
('F44.6', 'Anestesia e perda sensorial dissociativas'),
('F44.7', 'Transtorno dissociativo misto [de conversão]'),
('F44.8', 'Outros transtornos dissociativos [de conversão]'),
('F44.9', 'Transtorno dissociativo [de conversão] não especificado'),
('F45.0', 'Transtorno de somatização'),
('F45.1', 'Transtorno somatoforme indiferenciado'),
('F45.2', 'Transtorno hipocondríaco'),
('F45.3', 'Transtorno neurovegetativo somatoforme'),
('F45.4', 'Transtorno doloroso somatoforme persistente'),
('F45.8', 'Outros transtornos somatoformes'),
('F45.9', 'Transtorno somatoforme não especificado'),
('F48.0', 'Neurastenia'),
('F48.1', 'Síndrome de despersonalização-desrealização'),
('F48.8', 'Outros transtornos neuróticos especificados'),
('F48.9', 'Transtorno neurótico não especificado'),
('F50.0', 'Anorexia nervosa'),
('F50.1', 'Anorexia nervosa atípica'),
('F50.2', 'Bulimia nervosa'),
('F50.3', 'Bulimia nervosa atípica'),
('F50.4', 'Hiperfagia associada a outros distúrbios psicológicos'),
('F50.5', 'Vômitos associados a outros distúrbios psicológicos'),
('F50.8', 'Outros transtornos da alimentação'),
('F50.9', 'Transtorno de alimentação não especificado'),
('F51.0', 'Insônia não-orgânica'),
('F51.1', 'Hipersonia não-orgânica'),
('F51.2', 'Transtorno do ciclo vigília-sono devido a fatores não-orgânicos'),
('F51.3', 'Sonambulismo'),
('F51.4', 'Terrores noturnos'),
('F51.5', 'Pesadelos'),
('F51.8', 'Outros transtornos do sono devidos a fatores não-orgânicos'),
('F51.9', 'Transtorno do sono devido a fatores não-orgânicos não especificados'),
('F52.0', 'Ausência ou perda do desejo sexual'),
('F52.1', 'Aversão sexual e ausência de prazer sexual'),
('F52.2', 'Falha de resposta genital'),
('F52.3', 'Disfunção orgásmica'),
('F52.4', 'Ejaculação precoce'),
('F52.5', 'Vaginismo não-orgânico'),
('F52.6', 'Dispareunia não-orgânica'),
('F52.7', 'Apetite sexual excessivo'),
('F52.8', 'Outras disfunções sexuais não devidas a transtorno ou à doença orgânica'),
('F52.9', 'Disfunção sexual não devida a transtorno ou à doença orgânica, não especificada'),
('F53.0', 'Transtornos mentais e comportamentais leves associados ao puerpério não classificados em outra parte'),
('F53.1', 'Transtornos mentais e comportamentais graves associados ao puerpério não classificados em outra parte'),
('F53.8', 'Outros transtornos mentais e comportamentais associados ao puerpério não classificados em outra parte'),
('F53.9', 'Transtorno mental e comportamental associado ao puerpério, não especificado'),
('F54', ' Fatores psicológicos ou comportamentais associados a doença ou a transtornos classificados em outra parte'),
('F55', ' Abuso de substâncias que não produzem dependência'),
('F59', ' Síndromes comportamentais associados a transtornos das funções fisiológicas e a fatores físicos, não especificadas'),
('F60.0', 'Personalidade paranóica'),
('F60.1', 'Personalidade esquizóide'),
('F60.2', 'Personalidade dissocial'),
('F60.3', 'Transtorno de personalidade com instabilidade emocional'),
('F60.4', 'Personalidade histriônica'),
('F60.5', 'Personalidade anancástica'),
('F60.6', 'Personalidade ansiosa [esquiva]'),
('F60.7', 'Personalidade dependente'),
('F60.8', 'Outros transtornos específicos da personalidade'),
('F60.9', 'Transtorno não especificado da personalidade'),
('F61', ' Transtornos mistos da personalidade e outros transtornos da personalidade'),
('F62.0', 'Modificação duradoura da personalidade após uma experiência catastrófica'),
('F62.1', 'Modificação duradoura da personalidade após doença psiquiátrica'),
('F62.8', 'Outras modificações duradouras da personalidade'),
('F62.9', 'Modificação duradoura da personalidade, não especificada'),
('F63.0', 'Jogo patológico'),
('F63.1', 'Piromania'),
('F63.2', 'Roubo patológico [cleptomania]'),
('F63.3', 'Tricotilomania'),
('F63.8', 'Outros transtornos dos hábitos e dos impulsos'),
('F63.9', 'Transtorno dos hábitos e impulsos, não especificado'),
('F64.0', 'Transexualismo'),
('F64.1', 'Travestismo bivalente'),
('F64.2', 'Transtorno de identidade sexual na infância'),
('F64.8', 'Outros transtornos da identidade sexual'),
('F64.9', 'Transtorno não especificado da identidade sexual'),
('F65.0', 'Fetichismo'),
('F65.1', 'Travestismo fetichista'),
('F65.2', 'Exibicionismo'),
('F65.3', 'Voyeurismo'),
('F65.4', 'Pedofilia'),
('F65.5', 'Sadomasoquismo'),
('F65.6', 'Transtornos múltiplos da preferência sexual'),
('F65.8', 'Outros transtornos da preferência sexual'),
('F65.9', 'Transtorno da preferência sexual, não especificado'),
('F66.0', 'Transtorno da maturação sexual'),
('F66.1', 'Orientação sexual egodistônica'),
('F66.2', 'Transtorno do relacionamento sexual'),
('F66.8', 'Outros transtornos do desenvolvimento psicossexual'),
('F66.9', 'Transtorno do desenvolvimento sexual, não especificado'),
('F68.0', 'Sintomas físicos aumentados por fatores psicológicos'),
('F68.1', 'Produção deliberada ou simulação de sintomas ou de incapacidades, físicas ou psicológicas [transtorno factício]'),
('F68.8', 'Outros transtornos especificados da personalidade e do comportamento do adulto'),
('F69', ' Transtorno da personalidade e do comportamento do adulto, não especificado'),
('F70.0', 'Retardo mental leve - menção de ausência de ou de comprometimento mínimo do comportamento'),
('F70.1', 'Retardo mental leve - comprometimento significativo do comportamento, requerendo vigilância ou tratamento'),
('F70.8', 'Retardo mental leve - outros comprometimentos do comportamento'),
('F70.9', 'Retardo mental leve - sem menção de comprometimento do comportamento'),
('F71.0', 'Retardo mental moderado - menção de ausência de ou de comprometimento mínimo do comportamento'),
('F71.1', 'Retardo mental moderado - comprometimento significativo do comportamento, requerendo vigilância ou tratamento'),
('F71.8', 'Retardo mental moderado - outros comprometimentos do comportamento'),
('F71.9', 'Retardo mental moderado - sem menção de comprometimento do comportamento'),
('F72.0', 'Retardo mental grave - menção de ausência de ou de comprometimento mínimo do comportamento'),
('F72.1', 'Retardo mental grave - comprometimento significativo do comportamento, requerendo vigilância ou tratamento'),
('F72.8', 'Retardo mental grave - outros comprometimentos do comportamento'),
('F72.9', 'Retardo mental grave - sem menção de comprometimento do comportamento'),
('F73.0', 'Retardo mental profundo - menção de ausência de ou de comprometimento mínimo do comportamento'),
('F73.1', 'Retardo mental profundo - comprometimento significativo do comportamento, requerendo vigilância ou tratamento'),
('F73.8', 'Retardo mental profundo - outros comprometimentos do comportamento'),
('F73.9', 'Retardo mental profundo - sem menção de comprometimento do comportamento'),
('F78.0', 'Outro retardo mental - menção de ausência de ou de comprometimento mínimo do comportamento'),
('F78.1', 'Outro retardo mental - comprometimento significativo do comportamento, requerendo vigilância ou tratamento'),
('F78.8', 'Outro retardo mental - outros comprometimentos do comportamento'),
('F78.9', 'Outro retardo mental - sem menção de comprometimento do comportamento'),
('F79.0', 'Retardo mental não especificado - menção de ausência de ou de comprometimento mínimo do comportamento'),
('F79.1', 'Retardo mental não especificado - comprometimento significativo do comportamento, requerendo vigilância ou tratamento'),
('F79.8', 'Retardo mental não especificado - outros comprometimentos do comportamento'),
('F79.9', 'Retardo mental não especificado - sem menção de comprometimento do comportamento'),
('F80.0', 'Transtorno específico da articulação da fala'),
('F80.1', 'Transtorno expressivo de linguagem'),
('F80.2', 'Transtorno receptivo da linguagem'),
('F80.3', 'Afasia adquirida com epilepsia [síndrome de Landau-Kleffner]'),
('F80.8', 'Outros transtornos de desenvolvimento da fala ou da linguagem'),
('F80.9', 'Transtorno não especificado do desenvolvimento da fala ou da linguagem'),
('F81.0', 'Transtorno específico de leitura'),
('F81.1', 'Transtorno específico da soletração'),
('F81.2', 'Transtorno específico da habilidade em aritmética'),
('F81.3', 'Transtorno misto de habilidades escolares'),
('F81.8', 'Outros transtornos do desenvolvimento das habilidades escolares'),
('F81.9', 'Transtorno não especificado do desenvolvimento das habilidades escolares'),
('F82', ' Transtorno específico do desenvolvimento motor'),
('F83', ' Transtornos específicos misto do desenvolvimento'),
('F84.0', 'Autismo infantil'),
('F84.1', 'Autismo atípico'),
('F84.2', 'Síndrome de Rett'),
('F84.3', 'Outro transtorno desintegrativo da infância'),
('F84.4', 'Transtorno com hipercinesia associada a retardo mental e a movimentos estereotipados'),
('F84.5', 'Síndrome de Asperger'),
('F84.8', 'Outros transtornos globais do desenvolvimento'),
('F84.9', 'Transtornos globais não especificados do desenvolvimento'),
('F88', ' Outros transtornos do desenvolvimento psicológico'),
('F89', ' Transtorno do desenvolvimento psicológico não especificado'),
('F90.0', 'Distúrbios da atividade e da atenção'),
('F90.1', 'Transtorno hipercinético de conduta'),
('F90.8', 'Outros transtornos hipercinéticos'),
('F90.9', 'Transtorno hipercinético não especificado'),
('F91.0', 'Distúrbio de conduta restrito ao contexto familiar'),
('F91.1', 'Distúrbio de conduta não-socializado'),
('F91.2', 'Distúrbio de conduta do tipo socializado'),
('F91.3', 'Distúrbio desafiador e de oposição'),
('F91.8', 'Outros transtornos de conduta'),
('F91.9', 'Transtorno de conduta não especificado'),
('F92.0', 'Distúrbio depressivo de conduta'),
('F92.8', 'Outros transtornos mistos da conduta e das emoções'),
('F92.9', 'Transtorno misto da conduta e das emoções não especificado'),
('F93.0', 'Transtorno ligado à angústia de separação'),
('F93.1', 'Transtorno fóbico ansioso da infância'),
('F93.2', 'Distúrbio de ansiedade social da infância'),
('F93.3', 'Transtorno de rivalidade entre irmãos'),
('F93.8', 'Outros transtornos emocionais da infância'),
('F93.9', 'Transtorno emocional da infância não especificado'),
('F94.0', 'Mutismo eletivo'),
('F94.1', 'Distúrbio reativo de vinculação da infância'),
('F94.2', 'Transtorno de fixação da infância, com desinibição'),
('F94.8', 'Outros transtornos do funcionamento social na infância'),
('F94.9', 'Transtorno do funcionamento social da infância não especificado'),
('F95.0', 'Tique transitório'),
('F95.1', 'Tique motor ou vocal crônico'),
('F95.2', 'Tiques vocais e motores múltiplos combinados [doença de Gilles de la Tourette]'),
('F95.8', 'Outros tiques'),
('F95.9', 'Tique não especificado'),
('F98.0', 'Enurese de origem não-orgânica'),
('F98.1', 'Encoprese de origem não-orgânica'),
('F98.2', 'Transtorno de alimentação na infância'),
('F98.3', 'Pica do lactente ou da criança'),
('F98.4', 'Estereotipias motoras'),
('F98.5', 'Gagueira [tartamudez]'),
('F98.6', 'Linguagem precipitada'),
('F98.8', 'Outros transtornos comportamentais e emocionais especificados com início habitualmente na infância ou adolescência'),
('F98.9', 'Transtornos comportamentais e emocionais não especificados com início habitualmente na infância ou adolescência'),
('F99', ' Transtorno mental não especificado em outra | |
self.shifted_pdb_info:
self.shifted_pdb_info.show_summary(out=out)
if self.shifted_ncs_info:
self.shifted_ncs_info.show_summary(out=out)
if self.output_ncs_au_pdb_info:
print("\nOutput PDB file with dummy atoms representing the NCS AU:", file=out)
self.output_ncs_au_pdb_info.show_summary(out=out)
if self.output_ncs_au_mask_info or self.output_ncs_au_map_info:
print("\nOutput map files showing just the NCS AU (same size", end=' ', file=out)
if self.origin_shift and self.origin_shift != (0,0,0):
print("\nand location as shifted map files:\n", file=out)
else:
print("\nand location as input map:\n", file=out)
if self.output_ncs_au_mask_info:
self.output_ncs_au_mask_info.show_summary(out=out)
if self.output_ncs_au_map_info:
self.output_ncs_au_map_info.show_summary(out=out)
if self.output_box_mask_info or self.output_box_map_info:
print("\nOutput cut-out map files trimmed to contain just "+\
"the \nNCS AU (superimposed on", end=' ', file=out)
if self.origin_shift and self.origin_shift != (0,0,0):
print("shifted map files, note origin offset):\n", file=out)
else:
print("input map, note origin offset):\n", file=out)
if self.output_box_mask_info:
self.output_box_mask_info.show_summary(out=out)
if self.output_box_map_info:
self.output_box_map_info.show_summary(out=out)
if self.output_region_pdb_info_list:
print("\nOutput PDB files representing one region of connected"+\
" density.\nThese are useful for marking where to look in cut-out map"+\
" files.", file=out)
for output_region_pdb_info in self.output_region_pdb_info_list:
output_region_pdb_info.show_summary(out=out)
if self.output_region_map_info_list:
print("\nOutput cut-out map files trimmed to contain just "+\
"one region of \nconnected density (superimposed on", end=' ', file=out)
if self.origin_shift and self.origin_shift != (0,0,0):
print("shifted map files, note origin offset):\n", file=out)
else:
print(" input map, note origin offset):\n", file=out)
for output_region_map_info in self.output_region_map_info_list:
output_region_map_info.show_summary(out=out)
print("\n"+50*"="+"\n", file=out)
class make_ccp4_map: # just a holder so map_to_structure_factors will run
def __init__(self,map=None,unit_cell=None):
self.data=map
self.unit_cell_parameters=unit_cell.parameters()
self.space_group_number=1
self.unit_cell_grid=map.all()
def crystal_symmetry(self):
return crystal.symmetry(self.unit_cell_parameters,
self.space_group_number)
class b_vs_region_info:
def __init__(self):
self.b_iso=0.
self.b_vs_region_dict={}
self.sa_sum_v_vs_region_dict={}
self.sa_nn_vs_region_dict={}
self.sa_ratio_b_vs_region_dict={}
class box_sharpening_info:
def __init__(self,tracking_data=None,
crystal_symmetry=None,
solvent_fraction=None,
b_iso=None,
resolution=None,
d_min_ratio=None,
scale_max=None,
lower_bounds=None,
upper_bounds=None,
wrapping=None,
n_real=None,
n_buffer=None,
map_data=None,
smoothing_radius=None,
smoothed_box_mask_data=None,
original_box_map_data=None,
):
from libtbx import adopt_init_args
adopt_init_args(self, locals())
del self.tracking_data # do not save it
if tracking_data:
self.crystal_symmetry=tracking_data.crystal_symmetry
self.solvent_fraction=tracking_data.solvent_fraction
self.wrapping=tracking_data.params.crystal_info.use_sg_symmetry
def get_gaussian_weighting(self,out=sys.stdout):
# return a gaussian function centered on center of the map, fall-off
# based on smoothing_radius
# Calculate weight map, max near location of centers_ncs_cart
# U=rmsd**2
# (b_eff=8*3.14159**2*U)
# rmsd is at least distance between centers, not too much bigger than
# unit cell size, typically 10-20 A,
print("\nFall-off of local weight is 1/%6.1f A\n" %(
self.smoothing_radius), file=out)
u=self.smoothing_radius**2
from cctbx import xray
xrs,scatterers=set_up_xrs(crystal_symmetry=self.crystal_symmetry)
unit_cell=self.crystal_symmetry.unit_cell()
for xyz_fract in [(0.5,0.5,0.5,)]:
scatterers.append( xray.scatterer(scattering_type="H", label="H",
site=xyz_fract, u=u, occupancy=1.0))
xrs = xray.structure(xrs, scatterers=scatterers)
f_array,phases=get_f_phases_from_map(map_data=self.map_data,
crystal_symmetry=self.crystal_symmetry,
d_min=self.resolution,
scale_max=self.scale_max,
d_min_ratio=self.d_min_ratio,
get_remove_aniso_object=False,# don't need it
out=out)
weight_f_array=f_array.structure_factors_from_scatterers(
algorithm = 'direct',
xray_structure = xrs).f_calc()
weight_map=get_map_from_map_coeffs(map_coeffs=weight_f_array,
crystal_symmetry=self.crystal_symmetry,n_real=self.map_data.all())
min_value=weight_map.as_1d().min_max_mean().min
weight_map=weight_map-min_value # all positive or zero
max_value=weight_map.as_1d().min_max_mean().max
weight_map=weight_map/max(1.e-10,max_value) # normalize; max=1 now
min_value=1.e-10 # just a small value for all distances far from center
s = (weight_map <min_value ) # make extra sure every point is above this
weight_map=weight_map.set_selected(s,min_value)
return weight_map
def remove_buffer_from_bounds(self,minimum=1):
# back off by n_buffer in each direction, leave at
# least minimum grid on either side of center
adjusted_lower_bounds,adjusted_upper_bounds=[],[]
delta_lower_bounds,delta_upper_bounds=[],[]
for lb,ub in zip(self.lower_bounds,self.upper_bounds):
sum=lb+ub
if sum >=0:
mid=(1+sum)//2
else:
mid=(-1+sum)//2
alb=min(mid-minimum,lb+self.n_buffer)
aub=max(mid+minimum,ub-self.n_buffer)
adjusted_lower_bounds.append(alb)
adjusted_upper_bounds.append(aub)
delta_lower_bounds.append(alb-lb)
delta_upper_bounds.append(aub-ub)
return adjusted_lower_bounds,adjusted_upper_bounds,\
delta_lower_bounds,delta_upper_bounds
def merge_into_overall_map(self,overall_map=None):
# Smoothly fill out edges of the small map with overall_map
assert self.smoothed_box_mask_data is not None
assert self.original_box_map_data is not None
self.map_data= (self.map_data * self.smoothed_box_mask_data) + \
(self.original_box_map_data * (1-self.smoothed_box_mask_data))
def remove_buffer(self,out=sys.stdout):
# remove the buffer from this box
new_lower_bounds,new_upper_bounds,delta_lower,delta_upper=\
self.remove_buffer_from_bounds()
cut_out_lower_bounds=[]
cut_out_upper_bounds=[]
for o,a,dlb,dub in zip(self.map_data.origin(),self.map_data.all(),
delta_lower,delta_upper):
cut_out_lower_bounds.append(o+dlb)
cut_out_upper_bounds.append(a+dub-1)
self.map_data,self.crystal_symmetry,\
self.smoothed_box_mask_data,self.original_box_map_data=cut_out_map(
map_data=self.map_data,
crystal_symmetry=self.crystal_symmetry,
soft_mask=False,
resolution=self.resolution,
shift_origin=True,
min_point=cut_out_lower_bounds,
max_point=cut_out_upper_bounds,out=out)
self.lower_bounds=new_lower_bounds
self.upper_bounds=new_upper_bounds
class sharpening_info:
def __init__(self,
tracking_data=None,
crystal_symmetry=None,
is_crystal=None,
sharpening_method=None,
solvent_fraction=None,
n_residues=None,
ncs_copies=None,
ncs_file=None,
seq_file=None,
sequence=None,
n_real=None,
region_weight=None,
n_bins=None,
eps=None,
d_min=None,
d_min_ratio=None,
scale_max=None,
input_d_cut=None,
b_blur_hires=None,
rmsd=None,
rmsd_resolution_factor=None,
k_sol=None,
b_sol=None,
fraction_complete=None,
wrapping=None,
sharpening_target=None,
residual_target=None,
fraction_occupied=None,
nproc=None,
multiprocessing=None,
queue_run_command=None,
resolution=None, # changed from d_cut
resolution_dependent_b=None, # linear sharpening
normalize_amplitudes_in_resdep=None, # linear sharpening
b_sharpen=None,
b_iso=None, # expected B_iso after applying b_sharpen
k_sharpen=None,
optimize_b_blur_hires=None,
iterate=None,
optimize_d_cut=None,
kurtosis=None,
adjusted_sa=None,
sa_ratio=None,
normalized_regions=None,
score=None,
input_weight_map_pickle_file=None,
output_weight_map_pickle_file=None,
read_sharpened_maps=None,
write_sharpened_maps=None,
select_sharpened_map=None,
output_directory=None,
smoothing_radius=None,
local_sharpening=None,
local_aniso_in_local_sharpening=None,
overall_before_local=None,
use_local_aniso=None,
original_aniso_obj=None,
auto_sharpen=None,
box_in_auto_sharpen=None,
density_select_in_auto_sharpen=None,
density_select_threshold_in_auto_sharpen=None,
use_weak_density=None,
discard_if_worse=None,
max_box_fraction=None,
cc_cut=None,
max_cc_for_rescale=None,
scale_using_last=None,
density_select_max_box_fraction=None,
mask_atoms=None,
mask_atoms_atom_radius=None,
value_outside_atoms=None,
soft_mask=None,
allow_box_if_b_iso_set=None,
search_b_min=None,
search_b_max=None,
search_b_n=None,
adjust_region_weight=None,
region_weight_method=None,
region_weight_factor=None,
region_weight_buffer=None,
region_weight_default=None,
target_b_iso_ratio=None,
signal_min=None,
target_b_iso_model_scale=None,
box_sharpening_info_obj=None,
chain_type=None,
target_scale_factors=None,
remove_aniso=None,
d_min_list=None,
verbose=None,
resolve_size=None,
pdb_inp=None, # XXX probably do not need this
local_solvent_fraction=None,
wang_radius=None,
buffer_radius=None,
pseudo_likelihood=None,
preliminary_sharpening_done=False,
):
from libtbx import adopt_init_args
adopt_init_args(self, locals())
del self.tracking_data # don't need it as part of the object
del self.box_sharpening_info_obj# don't need it as part of the object
del self.pdb_inp # don't need it as part of the object
if tracking_data: # use tracking data information
self.update_with_tracking_data(tracking_data=tracking_data)
if box_sharpening_info_obj: # update information
self.update_with_box_sharpening_info(
box_sharpening_info_obj=box_sharpening_info_obj)
if self.resolution_dependent_b is None:
self.resolution_dependent_b=[0,0,0]
if self.target_scale_factors and \
self.sharpening_method!='model_sharpening' \
and self.sharpening_method!='half_map_sharpening':
assert self.sharpening_method is None # XXX may want to print out error
self.sharpening_method='model_sharpening'
if self.sharpening_method=='b_iso' and self.k_sharpen is not None:
self.k_sharpen=None
if pdb_inp:
self.sharpening_method='model_sharpening'
self.box_in_auto_sharpen=True
self.density_select_in_auto_sharpen=False
self.sharpening_target='model'
def get_d_cut(self):
if self.input_d_cut is not None:
return self.input_d_cut
else:
return self.resolution
def get_target_b_iso(self):
if self.target_b_iso_ratio is None:
return None
if self.resolution is None:
return None
return self.target_b_iso_ratio*self.resolution**2
def set_resolution_dependent_b(self,
resolution_dependent_b=None,
sharpening_method='resolution_dependent'):
if resolution_dependent_b:
self.resolution_dependent_b=resolution_dependent_b
if sharpening_method:
self.sharpening_method=sharpening_method
def sharpening_is_defined(self):
if self.sharpening_method is None:
return False
if self.target_scale_factors:
return True
if self.sharpening_method=='target_b_iso_to_d_cut':
return True
if self.b_iso is not None or \
self.b_sharpen is not None or \
(self.resolution_dependent_b is not None and
self.resolution_dependent_b!=[0,0,0]):
return True
return False
def update_with_box_sharpening_info(self,box_sharpening_info_obj=None):
if not box_sharpening_info_obj:
return self
self.crystal_symmetry=box_sharpening_info_obj.crystal_symmetry
self.solvent_fraction=box_sharpening_info_obj.solvent_fraction
self.wrapping=box_sharpening_info_obj.wrapping
self.n_real=box_sharpening_info_obj.n_real
return self
def update_with_tracking_data(self,tracking_data=None):
self.update_with_params(params=tracking_data.params,
crystal_symmetry=tracking_data.crystal_symmetry,
solvent_fraction=tracking_data.solvent_fraction,
n_residues=tracking_data.n_residues,
ncs_copies=tracking_data.input_ncs_info.number_of_operators)
return self
def update_with_params(self,params=None,
crystal_symmetry=None,
is_crystal=None,
solvent_fraction=None,
auto_sharpen=None,
sharpening_method=None,
pdb_inp=None,
half_map_data_list=None,
n_residues=None,ncs_copies=None):
self.crystal_symmetry=crystal_symmetry
self.is_crystal=is_crystal
self.solvent_fraction=solvent_fraction
self.auto_sharpen=auto_sharpen
self.n_residues=n_residues
self.ncs_copies=ncs_copies
self.seq_file=params.input_files.seq_file
self.chain_type=params.crystal_info.chain_type
self.verbose=params.control.verbose
self.resolve_size=params.control.resolve_size
self.multiprocessing=params.control.multiprocessing
self.nproc=params.control.nproc
self.queue_run_command=params.control.queue_run_command
self.wrapping=params.crystal_info.use_sg_symmetry
self.fraction_occupied=params.map_modification.fraction_occupied
self.sa_percent=params.map_modification.sa_percent
self.region_weight=params.map_modification.region_weight
self.max_regions_to_test=params.map_modification.max_regions_to_test
self.regions_to_keep=params.map_modification.regions_to_keep
self.d_min_ratio=params.map_modification.d_min_ratio
self.scale_max=params.map_modification.scale_max
self.input_d_cut=params.map_modification.input_d_cut
self.b_blur_hires=params.map_modification.b_blur_hires
self.rmsd=params.map_modification.rmsd
self.rmsd_resolution_factor=params.map_modification.rmsd_resolution_factor
self.k_sol=params.map_modification.k_sol
self.b_sol=params.map_modification.b_sol
self.fraction_complete=params.map_modification.fraction_complete
self.resolution=params.crystal_info.resolution # changed from d_cut
# NOTE:
# resolution=X-ray resolution or nominal resolution of cryoEM map
# high-res cutoff of reflections is d_min*d_min_ratio
self.buffer_radius=params.crystal_info.buffer_radius
self.wang_radius=params.crystal_info.wang_radius
self.pseudo_likelihood=params.crystal_info.pseudo_likelihood
self.max_box_fraction=params.map_modification.max_box_fraction
self.cc_cut=params.map_modification.cc_cut
self.max_cc_for_rescale=params.map_modification.max_cc_for_rescale
self.scale_using_last=params.map_modification.scale_using_last
self.density_select_max_box_fraction=params.map_modification.density_select_max_box_fraction
self.mask_atoms=params.map_modification.mask_atoms
self.mask_atoms_atom_radius=params.map_modification.mask_atoms_atom_radius
self.value_outside_atoms=params.map_modification.value_outside_atoms
self.soft_mask=params.map_modification.soft_mask
self.allow_box_if_b_iso_set=params.map_modification.allow_box_if_b_iso_set
self.k_sharpen=params.map_modification.k_sharpen
self.optimize_b_blur_hires=params.map_modification.optimize_b_blur_hires
self.iterate=params.map_modification.iterate
self.optimize_d_cut=params.map_modification.optimize_d_cut
self.sharpening_target=params.map_modification.sharpening_target
self.residual_target=params.map_modification.residual_target
self.eps=params.map_modification.eps
self.n_bins=params.map_modification.n_bins
self.input_weight_map_pickle_file=params.input_files.input_weight_map_pickle_file
self.output_weight_map_pickle_file=params.output_files.output_weight_map_pickle_file
self.read_sharpened_maps=params.map_modification.read_sharpened_maps
self.write_sharpened_maps=params.map_modification.write_sharpened_maps
self.select_sharpened_map=params.map_modification.select_sharpened_map
self.output_directory=params.output_files.output_directory
self.smoothing_radius=params.map_modification.smoothing_radius
self.local_sharpening=params.map_modification.local_sharpening
self.local_aniso_in_local_sharpening=\
params.map_modification.local_aniso_in_local_sharpening
self.overall_before_local=\
params.map_modification.overall_before_local
self.box_in_auto_sharpen=params.map_modification.box_in_auto_sharpen
self.density_select_in_auto_sharpen=params.map_modification.density_select_in_auto_sharpen
self.density_select_threshold_in_auto_sharpen=params.map_modification.density_select_threshold_in_auto_sharpen
self.use_weak_density=params.map_modification.use_weak_density
self.discard_if_worse=params.map_modification.discard_if_worse
self.box_center=params.map_modification.box_center
self.box_size=params.map_modification.box_size
self.target_n_overlap=params.map_modification.target_n_overlap
self.restrict_map_size=params.map_modification.restrict_map_size
self.remove_aniso=params.map_modification.remove_aniso
self.min_ratio_of_ncs_copy_to_first=\
params.segmentation.min_ratio_of_ncs_copy_to_first
self.max_ratio_to_target=params.segmentation.max_ratio_to_target
self.min_ratio_to_target=params.segmentation.min_ratio_to_target
self.residues_per_region=params.segmentation.residues_per_region
self.mask_padding_fraction=\
params.segmentation.mask_padding_fraction
self.fraction_of_max_mask_threshold=\
params.segmentation.fraction_of_max_mask_threshold
self.cell_cutoff_for_solvent_from_mask=\
params.segmentation.cell_cutoff_for_solvent_from_mask
self.starting_density_threshold=\
params.segmentation.starting_density_threshold
self.density_threshold=params.segmentation.density_threshold
self.min_ratio=params.segmentation.min_ratio
self.min_volume=params.segmentation.min_volume
self.search_b_min=params.map_modification.search_b_min
self.search_b_max=params.map_modification.search_b_max
self.search_b_n=params.map_modification.search_b_n
self.adjust_region_weight=params.map_modification.adjust_region_weight
self.region_weight_method=params.map_modification.region_weight_method
self.region_weight_factor=params.map_modification.region_weight_factor
self.region_weight_buffer=params.map_modification.region_weight_buffer
self.region_weight_default=params.map_modification.region_weight_default
self.target_b_iso_ratio=params.map_modification.target_b_iso_ratio
self.signal_min=params.map_modification.signal_min
self.target_b_iso_model_scale=params.map_modification.target_b_iso_model_scale
if sharpening_method is not None:
self.sharpening_method=sharpening_method
if not self.sharpening_method and \
len(params.map_modification.auto_sharpen_methods)==1:
self.sharpening_method=params.map_modification.auto_sharpen_methods[0]
if half_map_data_list or self.sharpening_method=='half_map_sharpening':
self.sharpening_method='half_map_sharpening'
self.sharpening_target='half_map'
elif pdb_inp or self.sharpening_method=='model_sharpening':
self.sharpening_method='model_sharpening'
self.box_in_auto_sharpen=True
self.density_select_in_auto_sharpen=False
self.sharpening_target='model'
elif params.map_modification.b_iso is not None or \
params.map_modification.b_sharpen is not None:
if self.sharpening_method is None:
raise Sorry("b_iso is not set")
# if sharpening values are specified, set them
if params.map_modification.b_iso is not None:
self.b_iso=params.map_modification.b_iso # but we need b_sharpen
elif params.map_modification.b_sharpen is not None:
self.b_sharpen=params.map_modification.b_sharpen
elif (params.map_modification.resolution_dependent_b is not None
and params.map_modification.resolution_dependent_b!=[0,0,0]):
self.sharpening_method='resolution_dependent'
self.resolution_dependent_b=\
params.map_modification.resolution_dependent_b
if self.sharpening_method=='b_iso' and self.k_sharpen is not None:
self.k_sharpen=None
return self
def show_summary(self,verbose=False,out=sys.stdout):
method_summary_dict={
'b_iso':"Overall b_iso sharpening",
'b_iso_to_d_cut':"b_iso sharpening to high_resolution cutoff",
'resolution_dependent':"Resolution-dependent sharpening",
'model_sharpening':"Model sharpening",
'half_map_sharpening':"Half-map sharpening",
'no_sharpening':"No sharpening",
None:"No sharpening",
}
target_summary_dict={
'adjusted_sa':"Adjusted surface area",
'kurtosis':"Map kurtosis",
'model':"Map-model CC",
}
print("\nSummary of sharpening:\n", file=out)
print("Sharpening method used: %s\n" %(
method_summary_dict.get(self.sharpening_method)), file=out)
if self.sharpening_method=="b_iso":
if self.b_sharpen is not None:
print("Overall b_sharpen applied: %7.2f A**2" %(
self.b_sharpen), file=out)
if self.b_iso is not None:
print("Final b_iso obtained: %7.2f A**2" %(self.b_iso), file=out)
elif self.sharpening_method=="b_iso_to_d_cut":
if self.b_sharpen is not None:
print("Overall b_sharpen applied: %7.2f A**2" %(
self.b_sharpen), file=out)
if self.b_iso is not None:
print("Final b_iso obtained: %7.2f A**2" %(self.b_iso), file=out)
if self.input_d_cut:
print("High-resolution cutoff: %7.2f A" %(self.input_d_cut), file=out)
else:
print("High-resolution cutoff: %7.2f A" %(self.resolution), file=out)
elif self.sharpening_method=="resolution_dependent":
print("Resolution-dependent b values (%7.2f,%7.2f,%7.2f)\n" %(
tuple(self.resolution_dependent_b)), file=out)
print("Effective b_iso vs resolution obtained:", file=out)
from cctbx.maptbx.refine_sharpening import get_effective_b_values
d_min_values,b_values=get_effective_b_values(
d_min_ratio=self.d_min_ratio,
resolution_dependent_b=self.resolution_dependent_b,
resolution=self.resolution)
print(" Resolution Effective B-iso", file=out)
print(" (A) (A**2)", file=out)
for dd,b in zip(d_min_values,b_values):
print(" %7.1f %7.2f " %(
dd,b), file=out)
elif self.sharpening_method=="model_sharpening":
print("Resolution-dependent model sharpening", file=out)
if self.d_min_list and self.target_scale_factors:
print("Scale vs resolution:", file=out)
for d_min,sc in zip(
self.d_min_list,
self.target_scale_factors):
print("Dmin: %7.2f Scale: %9.6f" %(d_min,sc), file=out)
elif self.sharpening_method=="half_map_sharpening":
print("Resolution-dependent half-map sharpening", file=out)
if self.d_min_list and self.target_scale_factors:
print("Scale vs resolution:", file=out)
for d_min,sc in zip(
self.d_min_list,
self.target_scale_factors):
print("Dmin: %7.2f Scale: %9.6f" %(d_min,sc), file=out)
if self.sharpening_method in ["b_iso_to_d_cut"] and \
self.k_sharpen and self.resolution:
print("Transition from sharpening"+\
" to not sharpening (k_sharpen):%7.2f " %(self.k_sharpen), file=out)
print("\nSharpening target used: %s" %(
target_summary_dict.get(self.sharpening_target)), file=out)
if self.adjusted_sa is not None:
print("Final | |
Array index 25
aver_dark_B4,
vari_dark_B4, -- Array index 27
bias
FROM image_t
WHERE state >= :state
AND type = :type
AND session = :session
AND night = :night
ORDER BY tstamp ASC
''', row)
return cursor
# we are not using the image_v VIEW for the time being
# We display the RAW data without dark and bias substraction
def export_all_iterable(connection):
row = {'state': STATS_COMPUTED, 'type': LIGHT_FRAME}
cursor = connection.cursor()
cursor.execute(
'''
SELECT session,
observer,
organization,
location,
type,
tstamp,
name,
model,
iso,
roi,
dark_roi,
exptime,
aver_signal_R1,
vari_signal_R1, -- Array index 13
aver_signal_G2,
vari_signal_G2, -- Array index 15
aver_signal_G3,
vari_signal_G3, -- Array index 17
aver_signal_B4,
vari_signal_B4, -- Array index 19
aver_dark_R1,
vari_dark_R1, -- Array index 21
aver_dark_G2,
vari_dark_G2, -- Array index 23
aver_dark_G3,
vari_dark_G3, -- Array index 25
aver_dark_B4,
vari_dark_B4, -- Array index 27
bias
FROM image_t
WHERE state >= :state
AND type = :type
ORDER BY observer ASC, tstamp ASC
''', row)
return cursor
def var2std(item):
'''From Variance to StdDev in several columns'''
index, value = item
# Calculate stddev from variance and round to one decimal place
if index in [13, 15, 17, 19, 21, 23, 25, 27]:
value = round(math.sqrt(value),1)
# Round the aver_signal channels too
elif index in [12, 14, 16, 18, 20, 22, 24, 26]:
value = round(value, 1)
return value
def get_file_path(connection, night, work_dir, options):
# This is for automatic reductions mainly
key, ext = os.path.splitext(options.config)
key = os.path.basename(key)
#wdtag = os.path.basename(work_dir)
filename = "-".join([key, night + '.csv'])
if options.multiuser:
subdir = os.path.join(options.csv_dir, key)
os.makedirs(subdir, exist_ok=True)
file_path = os.path.join(subdir, filename)
else:
file_path = os.path.join(options.csv_dir, filename)
return file_path
def do_export_work_dir(connection, session, work_dir, options):
'''Export a working directory of image redictions to a single file'''
fieldnames = ["session","observer","organization","location","type"]
fieldnames.extend(EXPORT_HEADERS)
if not session_processed(connection, session):
log.info("No new CSV file generation")
return
for (night,) in night_iterable(connection, session):
# Write a session CSV file
session_csv_file = get_file_path(connection, night, work_dir, options)
with myopen(session_csv_file, 'w') as csvfile:
writer = csv.writer(csvfile, delimiter=';')
writer.writerow(fieldnames)
for row in export_session_iterable(connection, session, night):
row = map(var2std, enumerate(row))
writer.writerow(row)
log.info("Saved data to session CSV file {0}".format(session_csv_file))
def do_export_all(connection, options):
'''Exports all the database to a single file'''
fieldnames = ["session","observer","organization","location","type"]
fieldnames.extend(EXPORT_HEADERS)
with myopen(options.csv_file, 'w') as csvfile:
writer = csv.writer(csvfile, delimiter=';')
writer.writerow(fieldnames)
for row in export_all_iterable(connection):
row = map(var2std, enumerate(row))
writer.writerow(row)
log.info("Saved data to global CSV file {0}".format(options.csv_file))
# ==================================
# Image List subcommands and options
# ==================================
EXIF_HEADERS = [
'Name',
'Session',
'Timestamp',
'Model',
'Exposure',
'ISO',
'Focal',
'f/'
]
GLOBAL_HEADERS = [
'Name',
'Type',
'Session',
'Observer',
'Organiztaion',
'Location',
'ROI',
]
STATE_HEADERS = [
"Name",
"Session",
"Type",
"State",
]
DATA_HEADERS = [
"Name", "ROI", "Bias",
"\u03BC R1", "\u03C3^2 R1",
"\u03BC G2", "\u03C3^2 G2",
"\u03BC G3", "\u03C3^2 G3",
"\u03BC B4", "\u03C3^2 B4",
]
RAW_DATA_HEADERS = [
"Name", "ROI" , "Bias",
"Raw \u03BC R1", "Raw \u03C3^2 R1",
"Raw \u03BC G2", "Raw \u03C3^2 G2",
"Raw \u03BC G3", "Raw \u03C3^2 G3",
"Raw \u03BC B4", "Raw \u03C3^2 B4",
]
DARK_DATA_HEADERS = [
"Name", "ROI" , "Bias",
"Dark \u03BC R1", "Dark \u03C3^2 R1",
"Dark \u03BC G2", "Dark \u03C3^2 G2",
"Dark \u03BC G3", "Dark \u03C3^2 G3",
"Dark \u03BC B4", "Dark \u03C3^2 B4",
]
def view_session_count(cursor, session):
row = {'session': session}
cursor.execute(
'''
SELECT COUNT(*)
FROM image_t
WHERE session = :session
''', row)
return cursor.fetchone()[0]
def view_all_count(cursor):
cursor.execute(
'''
SELECT COUNT(*)
FROM image_t
''')
return cursor.fetchone()[0]
# --------------
# Image metadata
# --------------
def view_meta_exif_all_iterable(connection, session):
cursor = connection.cursor()
count = view_all_count(cursor)
cursor.execute(
'''
SELECT name, session, tstamp, model, exptime, iso, focal_length, f_number
FROM image_t
ORDER BY session DESC, name ASC
''')
return cursor, count
def view_meta_exif_session_iterable(connection, session):
'''session may be None for NULL'''
row = {'session': session}
cursor = connection.cursor()
count = view_session_count(cursor, session)
cursor.execute(
'''
SELECT name, session, tstamp, model, exptime, iso, focal_length, f_number
FROM image_t
WHERE session = :session
ORDER BY name DESC
''', row)
return cursor, count
# ------------
# Image General
# -------------
def view_meta_global_all_iterable(connection, session):
cursor = connection.cursor()
count = view_all_count(cursor)
cursor.execute(
'''
SELECT name, type, session, observer, organization, email, location, roi
FROM image_t
ORDER BY session DESC
''')
return cursor, count
def view_meta_global_session_iterable(connection, session):
'''session may be None for NULL'''
row = {'session': session}
cursor = connection.cursor()
count = view_session_count(cursor, session)
cursor.execute(
'''
SELECT name, type, session, observer, organization, email, location, roi
FROM image_t
WHERE session = :session
ORDER BY name ASC
''', row)
return cursor, count
# -----------
# Image State
# -----------
def view_state_session_iterable(connection, session):
row = {'session': session}
cursor = connection.cursor()
count = view_session_count(cursor, session)
cursor.execute(
'''
SELECT name, session, type, s.label
FROM image_t
JOIN state_t AS s USING(state)
WHERE session = :session
ORDER BY session DESC, name ASC
''', row)
return cursor, count
def view_state_all_iterable(connection, session):
row = {'session': session}
cursor = connection.cursor()
count = view_all_count(cursor)
cursor.execute(
'''
SELECT name, session, type, s.label
FROM image_t
JOIN state_t AS s USING(state)
ORDER BY session DESC, name ASC
''', row)
return cursor, count
# -----------
# Image Data
# -----------
def view_data_session_iterable(connection, session):
row = {'session': session}
cursor = connection.cursor()
count = view_session_count(cursor, session)
cursor.execute(
'''
SELECT
name, roi, bias,
aver_signal_R1, vari_signal_R1,
aver_signal_G2, vari_signal_G2,
aver_signal_G3, vari_signal_G3,
aver_signal_B4, vari_signal_B4
FROM image_v
WHERE session = :session
ORDER BY name ASC
''', row)
return cursor, count
def view_data_all_iterable(connection, session):
cursor = connection.cursor()
count = view_all_count(cursor)
cursor.execute(
'''
SELECT
name, roi, bias,
aver_signal_R1, vari_signal_R1,
aver_signal_G2, vari_signal_G2,
aver_signal_G3, vari_signal_G3,
aver_signal_B4, vari_signal_B4
FROM image_v
ORDER BY session DESC, name ASC
''', row)
return cursor, count
# -------------
# Raw Image Data
# --------------
def view_raw_data_session_iterable(connection, session):
row = {'session': session, 'light': LIGHT_FRAME, 'unknown': UNKNOWN}
cursor = connection.cursor()
count = view_session_count(cursor, session)
cursor.execute(
'''
SELECT
name, roi, bias,
aver_signal_R1, vari_signal_R1,
aver_signal_G2, vari_signal_G2,
aver_signal_G3, vari_signal_G3,
aver_signal_B4, vari_signal_B4
FROM image_t
WHERE session = :session
AND ((type = :light) OR (type = :unknown))
ORDER BY name ASC
''', row)
return cursor, count
def view_raw_data_all_iterable(connection, session):
row = {'light': LIGHT_FRAME, 'unknown': UNKNOWN}
cursor = connection.cursor()
count = view_all_count(cursor)
cursor.execute(
'''
SELECT
name, roi, bias,
aver_signal_R1, vari_signal_R1,
aver_signal_G2, vari_signal_G2,
aver_signal_G3, vari_signal_G3,
aver_signal_B4, vari_signal_B4
FROM image_t
WHERE type = :type
AND ((type = :light) OR (type = :unknown))
ORDER BY session DESC, name ASC
''', row)
return cursor, count
# --------------
# Dark Image Data
# ---------------
def view_dark_data_session_iterable(connection, session):
row = {'session': session}
cursor = connection.cursor()
count = view_session_count(cursor, session)
cursor.execute(
'''
SELECT
name, roi, bias
aver_dark_R1, vari_dark_R1,
aver_dark_G2, vari_dark_G2,
aver_dark_G3, vari_dark_G3,
aver_dark_B4, vari_dark_B4
FROM image_t
WHERE session = :session
ORDER BY name ASC
''', row)
return cursor, count
def view_dark_data_all_iterable(connection, session):
cursor = connection.cursor()
count = view_all_count(cursor)
cursor.execute(
'''
SELECT
name, roi, bias
aver_dark_R1, vari_dark_R1,
aver_dark_G2, vari_dark_G2,
aver_dark_G3, vari_dark_G3,
aver_dark_B4, vari_dark_B4
FROM image_t
ORDER BY session DESC, name ASC
''', row)
return cursor, count
# ----------------
# View Master Dark
# -----------------
def view_master_dark_all_iterable(connection, session):
row = {'tolerance': 0.2}
cursor = connection.cursor()
cursor.execute("SELECT COUNT(*) FROM master_dark_t")
count = cursor.fetchone()[0]
cursor.execute(
'''
SELECT
session, N, roi,
(max_exptime - min_exptime) <= :tolerance as good_flag,
aver_R1, vari_R1,
aver_G2, vari_G2,
aver_G3, vari_G3,
aver_B4, vari_B4
FROM master_dark_t
ORDER BY session DESC
''', row)
return cursor, count
def view_master_dark_session_iterable(connection, session):
row = {'tolerance': 0.2, 'session': session}
cursor = connection.cursor()
cursor.execute("SELECT COUNT(*) FROM master_dark_t WHERE session = :session", row)
count = cursor.fetchone()[0]
cursor.execute(
'''
SELECT
session, N, roi,
(max_exptime - min_exptime) <= :tolerance as good_flag,
aver_R1, vari_R1,
aver_G2, vari_G2,
aver_G3, vari_G3,
aver_B4, vari_B4
FROM master_dark_t
WHERE session = :session
''', row)
return cursor, count
MASTER_DARK_HEADERS = [
"Session",
"# Darks",
"ROI",
"Good?",
"\u03BC R1", "\u03C3^2 R1",
"\u03BC G2", "\u03C3^2 G2",
"\u03BC G3", "\u03C3^2 G3",
"\u03BC B4", "\u03C3^2 B4",
]
# ---------
# View Dark
# ----------
def view_dark_session_iterable(connection, session):
row = {'session': session, 'type': DARK_FRAME}
cursor = connection.cursor()
count = view_session_count(cursor, session)
cursor.execute(
'''
SELECT
name, roi, bias,
aver_signal_R1, vari_signal_R1,
aver_signal_G2, vari_signal_G2,
aver_signal_G3, vari_signal_G3,
aver_signal_B4, vari_signal_B4
FROM image_t
WHERE session = :session
AND type = :type
ORDER BY name ASC
''', row)
return cursor, count
def view_dark_all_iterable(connection, session):
row = {'session': session, 'type': DARK_FRAME}
cursor = connection.cursor()
count = view_all_count(cursor)
cursor.execute(
'''
SELECT
name, roi, bias,
aver_signal_R1, | |
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from decimal import Decimal
from django.contrib.auth.models import User
from django.core.cache import cache
from django.core.exceptions import ValidationError
from django.test import TestCase
from projects.models import Project
from challenges.models import (Challenge, Submission, Phase, Category,
ExclusionFlag, Judgement, JudgingCriterion,
PhaseCriterion, PhaseRound, SubmissionParent,
SubmissionVersion, SubmissionHelp)
from challenges.tests.fixtures import (challenge_setup, create_submissions,
create_users, challenge_teardown)
from challenges.tests.fixtures.ignite_fixtures import (setup_ignite_challenge,
teardown_ignite_challenge,
setup_development_rounds_phase,
create_submission,
create_user)
from ignite.tests.decorators import ignite_skip
from nose.tools import ok_, eq_
def _create_project_and_challenge():
"""Create and return a sample project with a sample challenge."""
project = Project.objects.create(name='Project', slug='project',
allow_participation=True)
end_date = datetime.utcnow() + timedelta(days=365)
challenge = Challenge.objects.create(title='Challenge',
slug='challenge',
end_date=end_date,
project=project)
return project, challenge
class PermalinkTest(TestCase):
def setUp(self):
self.project, self.challenge = _create_project_and_challenge()
@ignite_skip
def test_permalink(self):
self.assertEqual(self.challenge.get_absolute_url(),
'/project/challenges/challenge/')
def tearDown(self):
for model in [Challenge, Project]:
model.objects.all().delete()
class SingleChallengePermalinkTest(TestCase):
urls = 'challenges.tests.single_challenge_urls'
def setUp(self):
self.project, self.challenge = _create_project_and_challenge()
def test_single_challenge_permalink(self):
"""Test permalink generation on an Ignite-style one-challenge site."""
self.assertEqual(self.challenge.get_absolute_url(), '/')
def tearDown(self):
for model in [Challenge, Project]:
model.objects.all().delete()
class EntriesToLive(TestCase):
def setUp(self):
self.project = Project.objects.create(
name=u'A project for a test',
allow_participation=True
)
self.challenge = Challenge.objects.create(
title=u'Testing my submissions',
end_date=u'2020-11-30 12:23:28',
project=self.project,
moderate=True
)
self.phase = Phase.objects.create(
name=u'Phase 1', challenge=self.challenge, order=1
)
self.user = User.objects.create_user('bob', '<EMAIL>', 'bob')
self.category = Category.objects.create(name='Misc', slug='misc')
self.submission = Submission.objects.create(
title=u'A submission to test',
description=u'<h3>Testing bleach</h3>',
phase=self.phase,
created_by=self.user.get_profile(),
category=self.category
)
self.submission_marked = Submission.objects.create(
title=u'A submission with markdown',
description=u'I **really** like cake',
phase=self.phase,
created_by=self.user.get_profile(),
category=self.category
)
def test_phase_unicode(self):
"""Test the string representation of a phase."""
self.assertEqual(unicode(self.phase),
u'Phase 1 (Testing my submissions)')
def test_bleach_clearning(self):
"""
Check that we're stripping out bad content - <h3> isn't in our
allowed list
"""
self.assertEqual(self.submission.description_html,
'<h3>Testing bleach</h3>')
def test_markdown_conversion(self):
"""
Check that we're converting markdown before outputting
"""
self.assertEqual(self.submission_marked.description_html,
'<p>I <strong>really</strong> like cake</p>')
class CategoryManager(TestCase):
def setUp(self):
self.project = Project.objects.create(
name=u'Test Project',
allow_participation=True
)
self.challenge = Challenge.objects.create(
title=u'Testing categories',
end_date=u'2020-11-30 12:23:28',
description=u'Blah',
project=self.project,
moderate=False
)
self.phase = Phase.objects.create(
name=u'Phase 1',
order=1,
challenge=self.challenge
)
self.user = User.objects.create_user('bob', '<EMAIL>', 'bob')
self.c1 = Category.objects.create(
name=u'Testing',
slug=u'testing'
)
self.c2 = Category.objects.create(
name=u'Ross',
slug=u'ross'
)
def test_initial_return(self):
"""
Test that with no categories containing submissions returns nothing
"""
self.assertEqual(Category.objects.get_active_categories(), False)
def test_results_after_submission(self):
"""
Test that we return only categories with submissions in
"""
Submission.objects.create(
title=u'Category',
brief_description=u'Blah',
description=u'Foot',
phase=self.phase,
created_by=self.user.get_profile(),
category=self.c1
)
self.cats = Category.objects.get_active_categories()
self.assertEqual(len(self.cats), 1)
def tearDown(self):
for model in [Challenge, Project, Phase, User, Category, Submission]:
model.objects.all().delete()
class Phases(TestCase):
def setUp(self):
self.project, self.challenge = _create_project_and_challenge()
def tearDown(self):
for model in [Challenge, Project, Phase]:
model.objects.all().delete()
def create_open_phase(self):
now = datetime.utcnow()
data = {
'challenge': self.challenge,
'name': 'Ideation',
'start_date': now - relativedelta(days=2),
'end_date': now + relativedelta(days=30),
'order': 1,
}
phase = Phase.objects.create(**data)
eq_(phase.days_remaining, 29)
eq_(len(phase.phase_rounds), 0)
eq_(phase.current_round, None)
eq_(phase.judiging_phase_round, None)
ok_(phase.is_open)
def create_closed_phase(self):
now = datetime.utcnow()
data = {
'challenge': self.challenge,
'name': 'Ideation',
'start_date': now - relativedelta(days=30),
'end_date': now - relativedelta(days=32),
'order': 1,
}
phase = Phase.objects.create(**data)
eq_(phase.days_remaining, 0)
eq_(len(phase.phase_rounds), 0)
eq_(phase.current_round, None)
eq_(phase.judiging_phase_round, None)
eq_(phase.is_open, False)
class Criteria(TestCase):
def test_value_range(self):
c = JudgingCriterion(question='How awesome is this idea?', max_value=5)
self.assertEqual(list(c.range), [1, 2, 3, 4, 5])
def test_good_range(self):
c = JudgingCriterion(question='How awesome is this idea?', max_value=5)
c.clean()
def test_bad_range(self):
c = JudgingCriterion(question='How awesome is this idea?',
max_value=-5)
self.assertRaises(ValidationError, c.clean)
def test_single_unit_range(self):
c = JudgingCriterion(question='How awesome is this idea?', max_value=0)
# A range of 0 to 0 is theoretically valid, but you can't weight it
self.assertRaises(ValidationError, c.clean)
class JudgementScoring(TestCase):
def setUp(self):
challenge_setup()
user = User.objects.create_user('bob', '<EMAIL>', 'bob')
create_submissions(1)
self.phase = Phase.objects.get()
self.submission = Submission.objects.get()
self.judge = user.get_profile()
def test_equal_weighting(self):
for i in range(4):
c = JudgingCriterion.objects.create(question='Question %d' % i,
max_value=10)
PhaseCriterion.objects.create(phase=self.phase, criterion=c,
weight=25)
judgement = Judgement.objects.create(submission=self.submission,
judge=self.judge)
ratings = [3, 5, 7, 8]
for criterion, rating in zip(JudgingCriterion.objects.all(), ratings):
judgement.answers.create(criterion=criterion, rating=rating)
self.assertTrue(judgement.complete)
self.assertEqual(judgement.get_score(), Decimal('57.5'))
def test_unequal_weighting(self):
for i, weight in zip(range(4), [15, 25, 25, 35]): # Total: 100
c = JudgingCriterion.objects.create(question='Question %d' % i,
max_value=10)
PhaseCriterion.objects.create(phase=self.phase, criterion=c,
weight=weight)
judgement = Judgement.objects.create(submission=self.submission,
judge=self.judge)
ratings = [3, 5, 7, 8]
criteria = JudgingCriterion.objects.all().order_by('question')
for criterion, rating in zip(criteria, ratings):
judgement.answers.create(criterion=criterion, rating=rating)
self.assertTrue(judgement.complete)
# 3 * 1.5 + 5 * 2.5 + 7 * 2.5 + 8 * 3.5
self.assertEqual(judgement.get_score(), Decimal('62.5'))
def test_incomplete_judgement(self):
"""Test that scoring an incomplete judgement raises an error."""
for i in range(4):
c = JudgingCriterion.objects.create(question='Question %d' % i,
max_value=10)
PhaseCriterion.objects.create(phase=self.phase, criterion=c,
weight=25)
judgement = Judgement.objects.create(submission=self.submission,
judge=self.judge)
ratings = [3, 5, 7]
# Only three ratings, so zip will ignore the last criterion
for criterion, rating in zip(JudgingCriterion.objects.all(), ratings):
judgement.answers.create(criterion=criterion, rating=rating)
self.assertFalse(judgement.complete)
self.assertRaises(Judgement.Incomplete, judgement.get_score)
def test_no_criteria(self):
"""Test behaviour when there are no criteria."""
judgement = Judgement.objects.create(submission=self.submission,
judge=self.judge)
self.assertTrue(judgement.complete)
self.assertEqual(judgement.get_score(), Decimal('0.00'))
class TestSubmissions(TestCase):
def setUp(self):
challenge_setup()
create_submissions(3)
self.phase = Phase.objects.get()
cache.clear()
def test_no_exclusions(self):
self.assertEqual(Submission.objects.eligible(self.phase).count(), 3)
def test_exclusion(self):
excluded = Submission.objects.all()[0]
ExclusionFlag.objects.create(submission=excluded, notes='Blah blah')
self.assertEqual(Submission.objects.eligible(self.phase).count(), 2)
class TestSubmissionsMultiplePhases(TestCase):
def setUp(self):
self.initial_data = setup_development_rounds_phase(**setup_ignite_challenge())
self.ideation = self.initial_data['ideation_phase']
self.development = self.initial_data['dev_phase']
self.user = create_user('bob')
self.round_a = self.initial_data['round_a']
self.round_b = self.initial_data['round_b']
def tearDown(self):
teardown_ignite_challenge()
def test_exclude_submission_phases(self):
create_submission(created_by=self.user, phase=self.ideation)
self.assertEqual(Submission.objects.eligible(self.ideation).count(), 1)
self.assertEqual(Submission.objects.eligible(self.development).count(), 0)
self.assertEqual(Submission.objects.count(), 1)
def test_exclude_submission_rounds(self):
create_submission(created_by=self.user, phase=self.development,
phase_round=self.round_a)
self.assertEqual(Submission.objects.eligible(self.development,
self.round_a).count(), 1)
self.assertEqual((Submission.objects.eligible(self.development,
self.round_b).count()), 0)
self.assertEqual(Submission.objects.eligible(self.ideation).count(), 0)
def test_exclude_submission_version(self):
submission_ideation = create_submission(created_by=self.user,
phase=self.ideation)
new_sub = create_submission(title='Replacement', created_by=self.user,
phase=self.development, with_parent=False)
submission_ideation.parent.update_version(new_sub)
self.assertEqual(Submission.objects.eligible(self.ideation).count(), 0)
self.assertEqual(Submission.objects.eligible(self.development).count(), 1)
def test_exclude_drafts(self):
create_submission(created_by=self.user, phase=self.ideation, is_draft=True)
self.assertEqual(Submission.objects.eligible(self.ideation).count(), 0)
class DraftSubmissionTest(TestCase):
def setUp(self):
challenge_setup()
create_users()
alex_profile = User.objects.get(username='alex').get_profile()
create_submissions(5, creator=alex_profile)
self.draft_submission = Submission.objects.all()[0]
self.draft_submission.is_draft = True
self.draft_submission.save()
cache.clear()
def test_draft_not_public(self):
assert self.draft_submission not in Submission.objects.visible()
def test_non_draft_visible(self):
"""Test live submissions are visible to anyone and everyone."""
alex, bob = [User.objects.get(username=u) for u in ['alex', 'bob']]
s = Submission.objects.all()[3]
assert s in Submission.objects.visible()
for user in [alex, bob]:
assert s in Submission.objects.visible(user=user)
assert user.has_perm('challenges.view_submission', obj=s)
def test_draft_not_visible_to_user(self):
bob = User.objects.get(username='bob')
assert self.draft_submission not in Submission.objects.visible(user=bob)
assert not bob.has_perm('challenges.view_submission',
obj=self.draft_submission)
def test_draft_visible_to_author(self):
alex = User.objects.get(username='alex')
assert self.draft_submission in Submission.objects.visible(user=alex)
assert alex.has_perm('challenges.view_submission',
obj=self.draft_submission)
class PhaseRoundTest(TestCase):
def setUp(self):
challenge_setup()
def tearDown(self):
for model in [Challenge, Project, Phase, User, Category, Submission,
PhaseRound]:
model.objects.all().delete()
def _create_phase_round(self, phase, **kwargs):
delta = relativedelta(hours=1)
now = datetime.utcnow()
defaults = {
'name': 'Round A',
'phase': phase,
'start_date': now - delta,
'end_date': now + delta,
}
if kwargs:
defaults.update(kwargs)
return PhaseRound.objects.create(**defaults)
def test_create_phase(self):
data = {
'name': 'Round A',
'phase': Phase.objects.all()[0],
'start_date': datetime.utcnow(),
'end_date': datetime.utcnow() + relativedelta(hours=1),
}
phase = PhaseRound.objects.create(**data)
assert phase.slug, 'Slug missing on: %s' % phase
self.assertTrue(phase.is_active)
def test_open_phase_round(self):
phase = Phase.objects.all()[0]
phase_round = self._create_phase_round(phase)
# reload
phase = Phase.objects.all()[0]
eq_(phase.current_round, phase_round)
ok_(phase.is_open)
def test_close_phase_round(self):
phase = Phase.objects.all()[0]
now = datetime.utcnow()
delta = relativedelta(hours=1)
close_data = {
'start_date': now + delta,
'end_date': now + delta + delta,
}
phase_round = self._create_phase_round(phase, **close_data)
# reload
phase = Phase.objects.all()[0]
eq_(phase.current_round, None)
eq_(phase.is_open, False)
class SubmissionParentTest(TestCase):
def setUp(self):
challenge_setup()
profile_list = create_users()
self.phase = Phase.objects.all()[0]
self.created_by = profile_list[0]
self.category = Category.objects.all()[0]
def create_submission(self, **kwargs):
defaults = {
'title': 'Title',
'brief_description': 'A submission',
'description': 'A really good submission',
'phase': self.phase,
'created_by': self.created_by,
'category': self.category,
}
if kwargs:
defaults.update(kwargs)
return Submission.objects.create(**defaults)
def tearDown(self):
for model in [Challenge, Project, Phase, User, Category, Submission,
SubmissionParent]:
model.objects.all().delete()
def test_parent_creation(self):
"""Create a ``SubmissionParent`` with the less possible data"""
submission = self.create_submission(title='a')
parent = SubmissionParent.objects.create(submission=submission)
assert parent.id, "SubmissionParent creation failure"
self.assertEqual(parent.status, SubmissionParent.ACTIVE)
self.assertEqual(parent.slug, submission.id)
self.assertEqual(parent.name, submission.title)
def test_parent_visibility(self):
submission = self.create_submission(title='a')
parent = SubmissionParent.objects.create(submission=submission)
self.assertEqual(Submission.objects.visible().count(), 1)
parent.status = SubmissionParent.INACTIVE
parent.save()
self.assertEqual(Submission.objects.visible().count(), 0)
def test_submission_without_parent(self):
submission = self.create_submission(title='a')
self.assertEqual(Submission.objects.visible().count(), 0)
class SubmissionParentVersioningTest(TestCase):
def setUp(self):
challenge_setup()
profile_list = create_users()
self.phase = Phase.objects.all()[0]
self.created_by = profile_list[0]
self.category = Category.objects.all()[0]
self.submission_a = self.create_submission(title='a')
self.submission_b = self.create_submission(title='b')
self.parent = SubmissionParent.objects.create(submission=self.submission_a)
def create_submission(self, **kwargs):
defaults = {
'title': 'Title',
'brief_description': 'A submission',
'description': 'A really good submission',
'phase': self.phase,
'created_by': self.created_by,
'category': self.category,
}
if kwargs:
defaults.update(kwargs)
return Submission.objects.create(**defaults)
def test_update_parent_history(self):
self.parent.update_version(self.submission_b)
submission_versions = SubmissionVersion.objects.all()
self.assertEqual(len(submission_versions), 1)
submission_version = submission_versions[0]
self.assertEqual(submission_version.submission, self.submission_a)
self.assertEqual(self.parent.submission, self.submission_b)
def test_update_parent_values(self):
self.parent.update_version(self.submission_b)
self.assertEqual(self.parent.submission, self.submission_b)
self.assertEqual(self.parent.slug, self.submission_a.id)
self.assertEqual(self.parent.name, self.submission_b.title)
def test_visible_submission(self):
"""Test a versioned Submission is not visible on all listing"""
self.parent.update_version(self.submission_b)
assert self.submission_a not in Submission.objects.visible()
assert self.submission_a in Submission.objects.all()
class SubmissionHelpTest(TestCase):
def setUp(self):
challenge_setup()
profile_list = create_users()
self.phase = Phase.objects.all()[0]
self.created_by = profile_list[0]
self.category = Category.objects.all()[0]
create_submissions(1, self.phase, self.created_by)
self.submission_a | |
x_axis_type(self, value: int):
"""
:param int value:
"""
self.axis_system.XAxisType = value
@property
def y_axis_direction(self) -> Reference:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property YAxisDirection() As Reference
|
| Reads or sets the geometric point, line or plane which defines the
| direction of the Y axis.
| AxisDirection is and must be a reference on a 3D point or 3D line or
| plane.
|
| Example:
| The following example sets the point Point.1 of the Geometrical Set
| Geometrical Set.1 as the direction of the Y axis of the axis system
| AxisSystem0:
|
| Dim HybridBody4 As AnyObject
| Set HybridBody4 = Body1.HybridBodies.Item ( "Geometrical Set.1" )
| Dim HybridShapePointCoord5 As AnyObject
| Set HybridShapePointCoord5 = HybridBody4.HybridShapes.Item ( "Point.1" )
| Dim Reference6 As Reference
| Set Reference6 = CATIA.ActiveDocument.Part.
| CreateReferenceFromGeometry(HybridShapePointCoord5 )
| AxisSystem0.YAxisDirection = Reference6
:return: Reference
:rtype: Reference
"""
return Reference(self.axis_system.YAxisDirection)
@y_axis_direction.setter
def y_axis_direction(self, value: Reference):
"""
:param Reference value:
"""
self.axis_system.YAxisDirection = value
@property
def y_axis_type(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property YAxisType() As CATAxisSystemAxisType
|
| Reads or sets the way the Y axis is specified.
| An axis X,Y, or Z of the axis system can be defined by a geometric point,
| line or plane, or by coordinates.
| AxisType = 0 : The axis is defined by a geometric point, line or plane and with the same
| direction.
| AxisType = 1 : The axis direction is defined by the three coordinates x,y,z, of a vector, to
| which the axis will always stay parallel.
| AxisType = 2 : the axis is defined by a geometric point, line or plane and with the opposite
| direction. Notice : If the Y axis is neither defined by coordinates nor by a
| point,line or plane, the axis will be automatically computed in order to build an
| orthogonal axis system with the other specified axes.
|
| Example:
| The following example prints the Y axis type :
|
| Catia.SystemService.Print " YAxisType = " & axisSystem.YAxisType
|
| The following example sets the Y axis type to 1 :
|
| axisSystem.YAxisType = 1
:return: int
:rtype: int
"""
return self.axis_system.YAxisType
@y_axis_type.setter
def y_axis_type(self, value: int):
"""
:param int value:
"""
self.axis_system.YAxisType = value
@property
def z_axis_direction(self) -> Reference:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property ZAxisDirection() As Reference
|
| Reads or sets the geometric point, line or plane which defines the
| direction of the Z axis.
| AxisDirection is and must be a reference on a 3D point or 3D line or
| plane.
|
| Example:
| The following example sets the point Point.1 of the Geometrical Set
| Geometrical Set.1 as the direction of the Z axis of the axis system
| AxisSystem0:
|
| Dim HybridBody4 As AnyObject
| Set HybridBody4 = Body1.HybridBodies.Item ( "Geometrical Set.1" )
| Dim HybridShapePointCoord5 As AnyObject
| Set HybridShapePointCoord5 = HybridBody4.HybridShapes.Item ( "Point.1" )
| Dim Reference6 As Reference
| Set Reference6 = CATIA.ActiveDocument.Part.
| CreateReferenceFromGeometry(HybridShapePointCoord5)
| AxisSystem0.ZAxisDirection = Reference6
:return: Reference
:rtype: Reference
"""
return Reference(self.axis_system.ZAxisDirection)
@z_axis_direction.setter
def z_axis_direction(self, value: Reference):
"""
:param Reference value:
"""
self.axis_system.ZAxisDirection = value
@property
def z_axis_type(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property ZAxisType() As CATAxisSystemAxisType
|
| Reads or sets the way the Z axis is specified.
| An axis X,Y, or Z of the axis system can be defined by a geometric point,
| line or plane, or by coordinates.
| AxisType = 0 : The axis is defined by a geometric point, line or plane and with the same
| direction.
| AxisType = 1 : The axis direction is defined by the three coordinates x,y,z, of a vector, to
| which the axis will always stay parallel.
| AxisType = 2 : the axis is defined by a geometric point, line or plane and with the opposite
| direction. Notice : If the Z axis is neither defined by coordinates nor by a
| point,line or plane, the axis will be automatically computed in order to build an
| orthogonal axis system with the other specified axes.
|
| Example:
| The following example prints the Z axis type :
|
| Catia.SystemService.Print " ZAxisType = " & axisSystem.ZAxisType
|
| The following example sets the Z axis type to 1 :
|
| axisSystem.ZAxisType = 1
:return: int
:rtype: int
"""
return self.axis_system.ZAxisType
@z_axis_type.setter
def z_axis_type(self, value: int):
"""
:param int value:
"""
self.axis_system.ZAxisType = value
def get_euler_angles(self) -> tuple:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub GetEulerAngles(Angle oFirstAngle,
| Angle oSecondAngle,
| Angle ThirdAngle)
|
| Returns the Euler Angles of an axis system. Succeeds only if the axis
| system is defined by Euler angles, wich means its type is
| catAxisSystemEulerAngles.
:param Angle o_first_angle:
:param Angle o_second_angle:
:param Angle third_angle:
:return: None
:rtype: None
"""
vba_function_name = 'get_euler_angles'
vba_code = """
Public Function get_euler_angles(axis_system)
Dim oFirstAngle (2)
axis_system.GetEulerAngles oFirstAngle
get_euler_angles = oFirstAngle
End Function
"""
system_service = self.application.system_service
return system_service.evaluate(vba_code, 0, vba_function_name, [self.com_object])
def get_origin(self) -> tuple:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub GetOrigin(CATSafeArrayVariant oOrigin)
|
| Returns the coordinates X,Y,Z of the origin point of the axis
| system.
|
| Parameters:
|
| oOrigin
| A Safe Array made up of 3 doubles: X, Y, Z, representing the
| coordinates in model space of the origin point of the axis system.
|
|
| Example:
| The following example retrieves in originCoord the coordinates of the
| origin point of the axisSystem axis system:
|
| Dim originCoord(2)
| axisSystem.GetOrigin originCoord
:param tuple o_origin:
:return: None
:rtype: None
"""
vba_function_name = 'get_origin'
vba_code = """
Public Function get_origin(axis_system)
Dim oOrigin (2)
axis_system.GetOrigin oOrigin
get_origin = oOrigin
End Function
"""
system_service = self.application.system_service
return system_service.evaluate(vba_code, 0, vba_function_name, [self.com_object])
def get_vectors(self) -> tuple:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub GetVectors(CATSafeArrayVariant oVectorX,
| CATSafeArrayVariant oVectorY)
|
| Returns the coordinates X,Y,Z of the axes X and Y of the axis
| system.
|
| Parameters:
|
| oVectorX
| A Safe Array made up of 3 doubles: X, Y, Z, representing the
| coordinates in model space of the X axis vector of the axis system.
|
| oVectorY
| A Safe Array made up of 3 doubles: X, Y, Z, representing the
| coordinates in model space of the Y axis vector of the axis system.
|
|
| Example:
| The following example retrieves in vectorXCoord and vectorYCoord the
| coordinates of the vectors of the axisSystem axis
| system:
|
| Dim vectorXCoord(2)
| Dim vectorYCoord(2)
| axisSystem.GetVectors vectorXCoord, vectorYCoord
:param tuple o_vector_x:
:param tuple o_vector_y:
:return: None
:rtype: None
"""
vba_function_name = 'get_vectors'
vba_code = """
Public Function get_vectors(axis_system)
Dim oVectorX (2)
axis_system.GetVectors oVectorX
get_vectors = oVectorX
End Function
"""
system_service = self.application.system_service
return system_service.evaluate(vba_code, 0, vba_function_name, [self.com_object])
def get_x_axis(self) -> tuple:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub GetXAxis(CATSafeArrayVariant oXAxis)
|
| Returns the coordinates X,Y,Z of the X axis of the axis
| system.
|
| Parameters:
|
| oXAxis
| A Safe Array made up of | |
if len(sea_dev.additional_veas) > 0:
for vea in sea_dev.additional_veas:
if vea.pvid == port_vlan_id:
return sea_dev
return
def _add_vlan_to_existing_vea(self, host, vios, vlan_id, vea_dev, sea_dev):
"""
Add a VLAN id to an existing VeaDevice object.
:param host: The Host DOM object that IVM runs on
:param vios: The VioServer DOM object that IVM runs on
:param vlan_id: VLAN id to add.
:param vea_dev: VeaDevice object
:param sea_dev: SeaDevice object
"""
ras.function_tracepoint(LOG, __name__, ras.TRACE_DEBUG, "Enter")
if not vea_dev or not isinstance(vea_dev, dom.VirtualEthernetAdapter):
raise excp.IBMPowerVMInvalidVETHConfig(attr='not_vea_type')
if not sea_dev or not isinstance(sea_dev, dom.SharedEthernetAdapter):
raise excp.IBMPowerVMInvalidSEAConfig()
# We're about to use vlan_id, so remove it from the list of
# available vlan ids
vswitch_name = vea_dev.vswitch_name
if host.available_vid_pool[vswitch_name].count(vlan_id) > 0:
host.available_vid_pool[vswitch_name].remove(vlan_id)
else:
# vlan_id is not in the available list, which means it's
# used already. Throw an exception.
ras.trace(LOG, __name__, ras.TRACE_ERROR,
ras.msg('error', 'VLAN_NOT_AVAIL') %
{'vlan_id': str(vlan_id)})
raise excp.IBMPowerVMVlanNotAvailable()
vea_dev.add_vea_vlan_tag(vlan_id)
self._update_virt_adapters_with_vea(host=host,
vios=vios,
vea_dev=vea_dev,
sea_dev=sea_dev,
isupdate=True)
ras.function_tracepoint(LOG, __name__, ras.TRACE_DEBUG, "Exit")
def _rm_vlan_from_existing_vea(self, host, vios, vlan_id, vea_dev,
sea_dev):
"""
Remove a VLAN ID from an existing VEA configured on SEA.
:param host: The Host DOM object that IVM is running on
:param vlan_id: VLAN id to remove from VEA
:param ved_dev: VeaDevice object
:param sea_dev: SeaDevice object
"""
ras.function_tracepoint(LOG, __name__, ras.TRACE_DEBUG, "Enter")
if not vea_dev or not isinstance(vea_dev, dom.VirtualEthernetAdapter):
raise excp.IBMPowerVMInvalidVETHConfig(attr='not_vea_type')
if not sea_dev or not isinstance(sea_dev, dom.SharedEthernetAdapter):
raise excp.IBMPowerVMInvalidSEAConfig()
# remove vlan tag from add_vlan_ids list for VEA.
vea_dev.addl_vlan_ids.remove(vlan_id)
self._update_virt_adapters_with_vea(host=host,
vios=vios,
vea_dev=vea_dev,
sea_dev=sea_dev,
isupdate=True)
ras.function_tracepoint(LOG, __name__, ras.TRACE_DEBUG, "Exit")
def _add_vlan_to_new_vea(self, host, vios, vlan_id, sea_dev,
dom_factory=dom.DOM_Factory()):
"""
Add a VLAN id to a new VEA and configure it on the SEA
:param host: The Host DOM object that VIOS runs on
:param vios: The VioServer DOM object that VIOS runs on
:param vlan_id: vlan id to configure
:param sea_dev: DOM SharedEthernetAdapter object
:param dom_factory: Used to create DOM objects.
"""
ras.function_tracepoint(LOG, __name__, ras.TRACE_DEBUG, "Enter")
if (not sea_dev or
not isinstance(sea_dev, dom.SharedEthernetAdapter) or
not utils.is_valid_vlan_id(vlan_id) or
vios.lpar_id < 1):
ras.trace(LOG, __name__, ras.TRACE_ERROR,
ras.msg('error', 'SEA_INVALIDSTATE'))
raise excp.IBMPowerVMInvalidSEAConfig()
# Be sure there's an available slot
slotnum = self._find_first_avail_slot(vios)
if not slotnum:
raise excp.IBMPowerVMMaximumVEAsPerSEA()
# We're about to use vlan_id, so remove it from the list of available
# vlan ids
vswitch_name = sea_dev.get_primary_vea().vswitch_name
if host.available_vid_pool[vswitch_name].count(vlan_id) > 0:
host.available_vid_pool[vswitch_name].remove(vlan_id)
else:
# vlan_id is not in the available list, which means it's
# used already. Throw an exception.
ras.trace(LOG, __name__, ras.TRACE_ERROR,
ras.msg('error', 'VLAN_NOT_AVAIL') %
{'vlan_id': str(vlan_id)})
raise excp.IBMPowerVMVlanNotAvailable()
try:
arbitrary_pvid = host.available_vid_pool[vswitch_name].pop()
except IndexError:
ras.trace(LOG, __name__, ras.TRACE_ERROR,
ras.msg('error', 'SEA_OUTOFVIDPOOL'))
raise excp.IBMPowerVMOutOfAvailableVIDPool()
# The arbitrary pvid will be used as pvid VEA's port
# vlan id during VEA creation.
port_vlan_id = arbitrary_pvid
# Create the VEA on the system. Note, this could raise an exception
# if something goes wrong. We'll just let that bubble up.
vea_devname, snum = self._create_vea_on_vios(vios, sea_dev, slotnum,
port_vlan_id, [vlan_id])
# If this was HMC, they returned the newly created slot
if snum:
slotnum = snum
# Create the VirtualEthernetAdapter DOM object
vea_dev = dom_factory.create_vea(
vea_devname,
vios,
slotnum,
port_vlan_id,
True,
1,
'Available',
True,
sea_dev.get_primary_vea().vswitch_name,
[vlan_id])
# now insert the new VEA to the SEA's virt_adapters list on VIOS
# and update SeaDevice object if successful
self._update_virt_adapters_with_vea(host=host,
vios=vios,
vea_dev=vea_dev,
sea_dev=sea_dev,
isupdate=False)
ras.trace(LOG, __name__, ras.TRACE_DEBUG,
'Successfully add vlan %(vlanid)d to VEA %(devname)s' %
{'vlanid': vlan_id, 'devname': vea_dev.name})
def _update_virt_adapters_with_vea(self, host, vios, vea_dev, sea_dev,
isupdate, new_pvid=None):
"""
This function handles updating an existing VEA already
configured on the SEA or adding a new VEA to SEA.
It requires that VEA has been updated with the required VLAN
before this function is called. This function updates the
PowerVM SEA device.
:param host: The Host DOM object that VIOS runs on
:param vios: The Vios DOM object that VIOS runs on
:param vea_dev: VeaDevice object
:param sea_dev: SeaDevice object
:param isupdate: Whether this is a VEA update request. This is mostly
interesting on IVM since you can't dynamically update
VEAs on IVM. HMC allows dynamic updating, so this
param will likely be ignored for HMC.
:param new_pvid: Only required if changing the PVID of the VEA.
Represents the VLAN ID of the VEA after the operation.
The vea_dev passed in should have the original VLAN.
"""
ras.function_tracepoint(LOG, __name__, ras.TRACE_DEBUG, "Enter")
if not vea_dev or not isinstance(vea_dev, dom.VirtualEthernetAdapter):
raise excp.IBMPowerVMInvalidVETHConfig(attr='not_vea_type')
if not sea_dev or not isinstance(sea_dev, dom.SharedEthernetAdapter):
raise excp.IBMPowerVMInvalidSEAConfig()
virt_adpts_list = []
if len(sea_dev.additional_veas) > 0:
for dev in sea_dev.additional_veas:
virt_adpts_list.append(dev.name.strip())
# Are we updating an existing VEA already on a SEA?
if isupdate:
# If the VIOS supports dynamically updating VEAs (only HMC can do
# this) AND there will be addl_vlan_ids left on the VEA, do the
# update dynamically. If there are no addl_vlan_ids left, then
# we don't need this VEA any more, so we want to fall into the else
# leg where the VEA will just be removed. Since we don't operate
# on primary VEAs, we don't need to worry about this code removing
# a primary VEA, which would have no addl_vlan_ids either.
if self._supports_dynamic_update(vios) and\
len(vea_dev.addl_vlan_ids) > 0 and new_pvid is None:
self._dynamic_update_vea(vios, sea_dev, vea_dev)
else:
# IVM doesn't support dynamic adding VID to the addl_vlan_ids
# list, nor do older firmware versions of HMC, so it has to be
# done in two steps: remove the original addl_vlan_ids VEA and
# add it back with additional vid added to addl_vlan_ids list.
ras.trace(LOG, __name__, ras.TRACE_DEBUG,
'update virt_adapters')
sea_dev.remove_vea_from_sea(vea_dev)
# Remove VEA from local adapter list
if virt_adpts_list.count(vea_dev.name) > 0:
virt_adpts_list.remove(vea_dev.name)
else:
ras.trace(LOG, __name__, ras.TRACE_ERROR,
ras.msg('error', 'SEA_VEANOTFOUND') %
{'veaname': vea_dev.name,
'seaname': sea_dev.name})
raise excp.IBMPowerVMInvalidSEAConfig()
# Remove the VEA from the system
self._delete_vea_on_vios(vios, vea_dev, sea_dev,
virt_adpts_list)
# If there were no addl_vlan_ids on the VEA, then the vlan
# we're unplugging is the pvid of the VEA. Thus, we're just
# removing the old VEA and not adding a new.
if len(vea_dev.addl_vlan_ids) == 0:
# addl_vlan_ids is empty, this gets VEA removed and
# need to return port_vlan_id back to the available pool
vswitch_name = vea_dev.vswitch_name
host.available_vid_pool[vswitch_name].append(vea_dev.pvid)
ras.trace(LOG, __name__, ras.TRACE_DEBUG,
'virt_adapter %(veaname)s removed from SEA '
'%(seaname)s' %
{'veaname': vea_dev.name,
'seaname': sea_dev.name})
return
# If we are to update the PVID, set it on the vea_dev prior to
# the creation of the device.
if new_pvid is not None:
vea_dev.pvid = new_pvid
new_veadevname, snum = self._create_vea_on_vios(vios, sea_dev,
vea_dev.slot,
vea_dev.pvid,
vea_dev.
addl_vlan_ids)
# update existing VEA's devname in case it was changed
sea_dev.remove_vea_from_sea(vea_dev)
vea_dev.name = new_veadevname
# If we got a slot number back (ie, HMC), set it in the dev.
if snum:
vea_dev.slot = snum
sea_dev.add_vea_to_sea(vea_dev)
# Add the VEA to the local adapter list
virt_adpts_list.append(vea_dev.name)
# Attach VEA to SEA on the system
self._update_sea(sea_dev, virt_adpts_list)
ras.trace(LOG, __name__, ras.TRACE_DEBUG,
'Successfully updated VEA %(veaname)s on SEA '
'%(seaname)s' %
{'veaname': vea_dev.name, 'seaname': sea_dev.name})
else:
# Just adding a brand new VEA to a SEA. The VEA is actually
# already created, it just needs to be added to the SEA.
# Add the VEA to the local adapter list
virt_adpts_list.append(vea_dev.name)
# Attach VEA to SEA on the system
self._update_sea(sea_dev, virt_adpts_list)
# Update the DOM with the new setup
sea_dev.add_vea_to_sea(vea=vea_dev)
vios.add_adapter(sea_dev)
host.vio_servers = [vios]
ras.trace(LOG, __name__, ras.TRACE_DEBUG,
'Successfully add new VEA %(veaname)s to SEA '
'%(seaname)s' %
{'veaname': vea_dev.name, 'seaname': sea_dev.name})
"""
ALL METHODS BELOW MUST BE IMPLEMENTED BY SUBCLASSES
"""
def _dynamic_update_vea(self, vios, sea, vea):
"""
This method will only work if _supports_dynamic_update returns True.
Will dyanmically update VLANs on an existing VEA.
:param vios: VioServer DOM object representing VIOS this is being
created on.
:param sea: SharedEthernetAdapter DOM object that owns the VEA
:param vea: VirtualEthernetAdapter DOM object that represents the
element on the system to update. Should have the updated
information about the VLANs on it already.
"""
raise NotImplementedError('_dynamic_update_vea not implemented')
def _create_vea_on_vios(self, vios, sea, slotnum, port_vlan_id,
addl_vlan_ids):
"""
This method will create the 802.1Q VirtualEthernetAdapter on the
| |
<filename>tests/fairseq_layers.py
"""
Copyright 2021 The LightSeq Team
Copyright Facebook Fairseq
We use layers from Facebook Fairseq as our baseline for unit test
"""
from typing import Dict, List, Optional, Callable
import math
from copy import deepcopy
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.modules import LayerNorm, MultiheadAttention
from fairseq.modules.fairseq_dropout import FairseqDropout
from fairseq.modules.quant_noise import quant_noise
from torch import Tensor
class TransformerEncoderLayer(nn.Module):
"""Encoder layer implemented by fairseq.
This version only removes the "args" parameter, no other changes
In the original paper each operation (multi-head attention or FFN) is
postprocessed with: `dropout -> add residual -> layernorm`.
In the tensor2tensor code they suggest that learning is more robust when
preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
normalize_before to True.
"""
def __init__(
self,
embed_dim,
ffn_embed_dim,
nhead,
dropout,
attn_dropout,
activation_dropout,
normalize_before=True,
activation_fn="relu",
quant_noise=0,
quant_noise_block_size=8,
):
super().__init__()
self.embed_dim = embed_dim
self.quant_noise = quant_noise
self.quant_noise_block_size = quant_noise_block_size
self.self_attn = self.build_self_attention(self.embed_dim, nhead, attn_dropout)
self.self_attn_layer_norm = LayerNorm(self.embed_dim)
self.dropout_module = FairseqDropout(
dropout, module_name=self.__class__.__name__
)
self.activation_fn = utils.get_activation_fn(activation=activation_fn)
activation_dropout_p = activation_dropout
self.activation_dropout_module = FairseqDropout(
float(activation_dropout_p), module_name=self.__class__.__name__
)
self.normalize_before = normalize_before
self.fc1 = self.build_fc1(
self.embed_dim,
ffn_embed_dim,
self.quant_noise,
self.quant_noise_block_size,
)
self.fc2 = self.build_fc2(
ffn_embed_dim,
self.embed_dim,
self.quant_noise,
self.quant_noise_block_size,
)
self.final_layer_norm = LayerNorm(self.embed_dim)
def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size):
return quant_noise(
nn.Linear(input_dim, output_dim), p=q_noise, block_size=qn_block_size
)
def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size):
return quant_noise(
nn.Linear(input_dim, output_dim), p=q_noise, block_size=qn_block_size
)
def build_self_attention(self, embed_dim, nhead, attn_dropout):
return MultiheadAttention(
embed_dim,
nhead,
dropout=attn_dropout,
self_attention=True,
q_noise=self.quant_noise,
qn_block_size=self.quant_noise_block_size,
)
def residual_connection(self, x, residual):
return residual + x
def upgrade_state_dict_named(self, state_dict, name):
"""
Rename layer norm states from `...layer_norms.0.weight` to
`...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to
`...final_layer_norm.weight`
"""
layer_norm_map = {"0": "self_attn_layer_norm", "1": "final_layer_norm"}
for old, new in layer_norm_map.items():
for m in ("weight", "bias"):
k = "{}.layer_norms.{}.{}".format(name, old, m)
if k in state_dict:
state_dict["{}.{}.{}".format(name, new, m)] = state_dict[k]
del state_dict[k]
def forward(self, x, encoder_padding_mask, attn_mask: Optional[Tensor] = None):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor): binary ByteTensor of shape
`(batch, seq_len)` where padding elements are indicated by ``1``.
attn_mask (ByteTensor): binary tensor of shape `(tgt_len, src_len)`,
where `tgt_len` is the length of output and `src_len` is the
length of input, though here both are equal to `seq_len`.
`attn_mask[tgt_i, src_j] = 1` means that when calculating the
embedding for `tgt_i`, we exclude (mask out) `src_j`. This is
useful for strided self-attention.
Returns:
encoded output of shape `(seq_len, batch, embed_dim)`
"""
# anything in original attn_mask = 1, becomes -1e8
# anything in original attn_mask = 0, becomes 0
# Note that we cannot use -inf here, because at some edge cases,
# the attention weight (before softmax) for some padded element in query
# will become -inf, which results in NaN in model parameters
if attn_mask is not None:
attn_mask = attn_mask.masked_fill(attn_mask.to(torch.bool), -1e8)
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
x, _ = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=encoder_padding_mask,
attn_mask=attn_mask,
)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = self.activation_dropout_module(x)
x = self.fc2(x)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.final_layer_norm(x)
return x
class TransformerSentenceEncoderLayer(nn.Module):
"""
Implements a Transformer Encoder Layer used in BERT/XLM style pre-trained
models.
"""
def __init__(
self,
embedding_dim: int = 768,
ffn_embedding_dim: int = 3072,
num_attention_heads: int = 8,
dropout: float = 0.1,
attention_dropout: float = 0.1,
activation_dropout: float = 0.1,
activation_fn: str = "relu",
export: bool = False,
q_noise: float = 0.0,
qn_block_size: int = 8,
init_fn: Callable = None,
) -> None:
super().__init__()
if init_fn is not None:
init_fn()
# Initialize parameters
self.embedding_dim = embedding_dim
self.dropout_module = FairseqDropout(
dropout, module_name=self.__class__.__name__
)
self.activation_dropout_module = FairseqDropout(
activation_dropout, module_name=self.__class__.__name__
)
# Initialize blocks
self.activation_fn = utils.get_activation_fn(activation_fn)
self.self_attn = self.build_self_attention(
self.embedding_dim,
num_attention_heads,
dropout=attention_dropout,
self_attention=True,
q_noise=q_noise,
qn_block_size=qn_block_size,
)
# layer norm associated with the self attention layer
self.self_attn_layer_norm = LayerNorm(self.embedding_dim, export=export)
self.fc1 = self.build_fc1(
self.embedding_dim,
ffn_embedding_dim,
q_noise=q_noise,
qn_block_size=qn_block_size,
)
self.fc2 = self.build_fc2(
ffn_embedding_dim,
self.embedding_dim,
q_noise=q_noise,
qn_block_size=qn_block_size,
)
# layer norm associated with the position wise feed-forward NN
self.final_layer_norm = LayerNorm(self.embedding_dim, export=export)
def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size):
return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size)
def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size):
return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size)
def build_self_attention(
self,
embed_dim,
num_attention_heads,
dropout,
self_attention,
q_noise,
qn_block_size,
):
return MultiheadAttention(
embed_dim,
num_attention_heads,
dropout=dropout,
self_attention=True,
q_noise=q_noise,
qn_block_size=qn_block_size,
)
def forward(
self,
x: torch.Tensor,
self_attn_mask: Optional[torch.Tensor] = None,
self_attn_padding_mask: Optional[torch.Tensor] = None,
):
"""
LayerNorm is applied either before or after the self-attention/ffn
modules similar to the original Transformer implementation.
"""
residual = x
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
need_weights=False,
attn_mask=self_attn_mask,
)
x = self.dropout_module(x)
x = residual + x
x = self.self_attn_layer_norm(x)
residual = x
x = self.activation_fn(self.fc1(x))
x = self.activation_dropout_module(x)
x = self.fc2(x)
x = self.dropout_module(x)
x = residual + x
x = self.final_layer_norm(x)
return x, attn
class TransformerDecoderLayer(nn.Module):
"""Decoder layer implemented by fairseq.
This version only removes the "args" parameter, no other changes
"""
def __init__(
self,
embed_dim,
ffn_embed_dim,
nhead,
encoder_embed_dim,
dropout,
attn_dropout,
activation_dropout,
normalize_before=True,
activation_fn="relu",
quant_noise=0,
quant_noise_block_size=8,
cross_self_attention=False,
no_encoder_attn=False,
add_bias_kv=False,
add_zero_attn=False,
):
super().__init__()
self.embed_dim = embed_dim
self.dropout_module = FairseqDropout(
dropout, module_name=self.__class__.__name__
)
self.quant_noise = quant_noise
self.quant_noise_block_size = quant_noise_block_size
self.cross_self_attention = cross_self_attention
self.self_attn = self.build_self_attention(
self.embed_dim,
nhead,
attn_dropout,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
)
self.activation_fn = utils.get_activation_fn(activation=activation_fn)
activation_dropout_p = activation_dropout
self.activation_dropout_module = FairseqDropout(
float(activation_dropout_p), module_name=self.__class__.__name__
)
self.normalize_before = normalize_before
export = False
self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
if no_encoder_attn:
self.encodec_attn = None
self.encodec_attn_layer_norm = None
else:
self.encodec_attn = self.build_encoder_attention(
self.embed_dim, encoder_embed_dim, attn_dropout, nhead
)
self.encodec_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
self.fc1 = self.build_fc1(
self.embed_dim,
ffn_embed_dim,
self.quant_noise,
self.quant_noise_block_size,
)
self.fc2 = self.build_fc2(
ffn_embed_dim,
self.embed_dim,
self.quant_noise,
self.quant_noise_block_size,
)
self.final_layer_norm = LayerNorm(self.embed_dim, export=export)
self.need_attn = True
self.onnx_trace = False
def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size):
return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size)
def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size):
return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size)
def build_self_attention(
self, embed_dim, nhead, attn_dropout, add_bias_kv=False, add_zero_attn=False
):
return MultiheadAttention(
embed_dim,
nhead,
dropout=attn_dropout,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
self_attention=not self.cross_self_attention,
q_noise=self.quant_noise,
qn_block_size=self.quant_noise_block_size,
)
def build_encoder_attention(
self, embed_dim, encoder_embed_dim, attn_dropout, nhead
):
return MultiheadAttention(
embed_dim,
nhead,
kdim=encoder_embed_dim,
vdim=encoder_embed_dim,
dropout=attn_dropout,
encoder_decoder_attention=True,
q_noise=self.quant_noise,
qn_block_size=self.quant_noise_block_size,
)
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def residual_connection(self, x, residual):
return residual + x
def forward(
self,
x,
encoder_out: Optional[torch.Tensor] = None,
encoder_padding_mask: Optional[torch.Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
prev_self_attn_state: Optional[List[torch.Tensor]] = None,
prev_attn_state: Optional[List[torch.Tensor]] = None,
self_attn_mask: Optional[torch.Tensor] = None,
self_attn_padding_mask: Optional[torch.Tensor] = None,
need_attn: bool = False,
need_head_weights: bool = False,
):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor, optional): binary
ByteTensor of shape `(batch, src_len)` where padding
elements are indicated by ``1``.
need_attn (bool, optional): return attention weights
need_head_weights (bool, optional): return attention weights
for each head (default: return average over heads).
Returns:
encoded output of shape `(seq_len, batch, embed_dim)`
"""
if need_head_weights:
need_attn = True
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
if prev_self_attn_state is not None:
prev_key, prev_value = prev_self_attn_state[:2]
saved_state: Dict[str, Optional[Tensor]] = {
"prev_key": prev_key,
"prev_value": prev_value,
}
if len(prev_self_attn_state) >= 3:
saved_state["prev_key_padding_mask"] = prev_self_attn_state[2]
assert incremental_state is not None
self.self_attn._set_input_buffer(incremental_state, saved_state)
_self_attn_input_buffer = self.self_attn._get_input_buffer(incremental_state)
if self.cross_self_attention and not (
incremental_state is not None
and _self_attn_input_buffer is not None
and "prev_key" in _self_attn_input_buffer
):
if self_attn_mask is not None:
assert encoder_out is not None
self_attn_mask = torch.cat(
(x.new_zeros(x.size(0), encoder_out.size(0)), self_attn_mask), dim=1
)
if self_attn_padding_mask is not None:
if encoder_padding_mask is None:
assert encoder_out is not None
encoder_padding_mask = self_attn_padding_mask.new_zeros(
encoder_out.size(1), encoder_out.size(0)
)
self_attn_padding_mask = torch.cat(
(encoder_padding_mask, self_attn_padding_mask), dim=1
)
assert encoder_out is not None
y = torch.cat((encoder_out, x), dim=0)
else:
y = x
x, attn = self.self_attn(
query=x,
key=y,
value=y,
key_padding_mask=self_attn_padding_mask,
incremental_state=incremental_state,
need_weights=False,
attn_mask=self_attn_mask,
)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
if self.encodec_attn is not None and encoder_out is not None:
residual = x
if self.normalize_before:
x = self.encodec_attn_layer_norm(x)
if prev_attn_state is not None:
| |
#%%
import os
import xml
import heapq
import warnings
import numpy as np
import pandas as pd
from tqdm import tqdm
from shapely import wkt
import geopandas as gpd
from xml.dom import minidom
from collections import deque
import matplotlib.pyplot as plt
from haversine import haversine, Unit
from shapely.geometry import Point, LineString, box
from utils.classes import Digraph
from utils.pickle_helper import PickleSaver
from utils.log_helper import LogHelper, logbook
from utils.interval_helper import merge_intervals
from coords.coordTransfrom_shp import coord_transfer
from utils.geo_helper import gdf_to_geojson, gdf_to_postgis, edge_parallel_offset
from setting import filters as way_filters
from setting import SZ_BBOX, GBA_BBOX, PCL_BBOX, FT_BBOX
warnings.filterwarnings('ignore')
#%%
class Digraph_OSM(Digraph):
def __init__(self,
bbox=None,
xml_fn='../input/futian.xml',
road_info_fn='../input/osm_road_speed.xlsx',
combine_link=True,
reverse_edge=True,
two_way_offeset=True,
logger=None,
upload_to_db='shenzhen',
*args, **kwargs):
assert not(bbox is None and xml_fn is None), "Please define one of the bbox or the xml path."
if bbox is not None:
xml_fn = f"../cache/osm_{'_'.join(map(str, bbox))}.xml"
self.download_map(xml_fn, bbox, True)
self.df_nodes, self.df_edges = self.get_road_network(xml_fn, road_info_fn)
self.node_dis_memo = {}
self.route_planning_memo = {}
self.logger = logger
super().__init__(self.df_edges[['s', 'e', 'dist']].values, self.df_nodes.to_dict(orient='index'), *args, **kwargs)
self.df_edges.set_crs('EPSG:4326', inplace=True)
self.df_nodes.set_crs('EPSG:4326', inplace=True)
if combine_link:
self.df_edges = self.combine_rids()
self.df_edges.reset_index(drop=True, inplace=True)
if reverse_edge:
self.df_edges = self.add_reverse_edge(self.df_edges)
self.df_edges.reset_index(drop=True, inplace=True)
self.df_edges.loc[:, 'eid'] = self.df_edges.index
if combine_link or reverse_edge:
# self.df_nodes = self.df_nodes.loc[ np.unique(np.hstack((self.df_edges.s.values, self.df_edges.e.values))),:]
super().__init__(self.df_edges[['s', 'e', 'dist']].values, self.df_nodes.to_dict(orient='index'), *args, **kwargs)
if two_way_offeset:
self.df_edges = self.edge_offset()
order_atts = ['eid', 'rid', 'name', 's', 'e', 'order', 'road_type', 'dir', 'lanes', 'dist', 'oneway', 'is_ring', 'geometry', 'geom_origin']
self.df_edges = self.df_edges[order_atts]
def download_map(self, fn, bbox, verbose=False):
"""Download OSM map of bbox from Internet.
Args:
fn (function): [description]
bbox ([type]): [description]
verbose (bool, optional): [description]. Defaults to False.
"""
if os.path.exists(fn):
return
if verbose:
print("Downloading {}".format(fn))
if isinstance(bbox, list) or isinstance(bbox, np.array):
bbox = ",".join(map(str, bbox))
import requests
url = f'http://overpass-api.de/api/map?bbox={bbox}'
r = requests.get(url, stream=True)
with open(fn, 'wb') as ofile:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
ofile.write(chunk)
if verbose:
print("Downloaded success.\n")
return True
def get_road_network(self,
fn,
fn_road,
in_sys='wgs',
out_sys='wgs',
signals=True,
road_type_filter=way_filters['auto']['highway'],
keep_cols=['name', 'rid', 'order', 'road_type', 'lanes', 's', 'e', 'dist', 'oneway', 'maxspeed', 'geometry']
):
dom = xml.dom.minidom.parse(fn)
root = dom.documentElement
nodelist = root.getElementsByTagName('node')
waylist = root.getElementsByTagName('way')
# nodes
nodes = []
for node in tqdm(nodelist, 'Parse nodes: \t'):
pid = node.getAttribute('id')
taglist = node.getElementsByTagName('tag')
info = {'pid': int(pid),
'y':float(node.getAttribute('lat')),
'x':float(node.getAttribute('lon'))}
for tag in taglist:
if tag.getAttribute('k') == 'traffic_signals':
info['traffic_signals'] = tag.getAttribute('v')
nodes.append(info)
nodes = gpd.GeoDataFrame(nodes)
nodes.loc[:, 'geometry'] = nodes.apply(lambda i: Point(i.x, i.y), axis=1)
# FIXME "None of ['pid'] are in the columns"
nodes.set_index('pid', inplace=True)
if in_sys != out_sys:
nodes = coord_transfer(nodes, in_sys, out_sys)
nodes.loc[:,['x']], nodes.loc[:,['y']] = nodes.geometry.x, nodes.geometry.y
# traffic_signals
self.traffic_signals = nodes[~nodes.traffic_signals.isna()].index.unique()
# edges
edges = []
for way in tqdm(waylist, 'Parse ways: \t'):
taglist = way.getElementsByTagName('tag')
info = { tag.getAttribute('k'): tag.getAttribute('v') for tag in taglist }
if 'highway' not in info or info['highway'] in road_type_filter:
continue
info['rid'] = int(way.getAttribute('id'))
ndlist = way.getElementsByTagName('nd')
nds = []
for nd in ndlist:
nd_id = nd.getAttribute('ref')
nds.append( nd_id )
for i in range( len(nds)-1 ):
edges.append( { 'order': i, 's':nds[i], 'e':nds[i+1], 'road_type': info['highway'], **info} )
edges = pd.DataFrame( edges )
edges.loc[:, ['s','e']] = pd.concat((edges.s.astype(np.int), edges.e.astype(np.int)), axis=1)
edges = edges.merge( nodes[['x','y']], left_on='s', right_index=True ).rename(columns={'x':'x0', 'y':'y0'}) \
.merge( nodes[['x','y']], left_on='e', right_index=True ).rename(columns={'x':'x1', 'y':'y1'})
edges = gpd.GeoDataFrame( edges, geometry = edges.apply( lambda i: LineString( [[i.x0, i.y0], [i.x1, i.y1]] ), axis=1 ) )
edges.loc[:, 'dist'] = edges.apply(lambda i: haversine((i.y0, i.x0), (i.y1, i.x1), unit=Unit.METERS), axis=1)
edges.sort_values(['rid', 'order'], inplace=True)
# nodes filter
ls = np.unique(np.hstack((edges.s.values, edges.e.values)))
nodes = nodes.loc[ls,:]
if fn_road and os.path.exists(fn_road):
road_speed = pd.read_excel(fn_road)[['road_type', 'v']]
edges = edges.merge( road_speed, on ='road_type' )
keep_cols = [i for i in keep_cols if i in edges.columns]
return nodes, edges[keep_cols]
def add_reverse_edge(self, df_edges):
"""Add reverse edge.
Args:
df_edges (gpd.GeoDataFrame): The edge file parsed from OSM.
Check:
rid = 34900355
net.df_edges.query( f"rid == {rid} or rid == -{rid}" ).sort_values(['order','rid'])
"""
def _juedge_oneway(oneway_flag):
# https://wiki.openstreetmap.org/wiki/Key:oneway
# reversible, alternating: https://wiki.openstreetmap.org/wiki/Tag:oneway%3Dreversible
if oneway_flag == 'yes' or oneway_flag == '1' or oneway_flag == True:
flag = True
elif oneway_flag == '-1':
flag = True
# way.is_reversed = True
elif oneway_flag == 'no' or oneway_flag == '0' or oneway_flag == False:
flag = False
elif oneway_flag in ['reversible', 'alternating']:
flag = False
else:
flag = False
if self.logger is not None:
self.logger.warning(f'new road type detected at: {oneway_flag}')
return flag
df_edges.oneway = df_edges.oneway.fillna('no').apply(_juedge_oneway)
df_edges.loc[:, 'is_ring'] = df_edges.geometry.apply( lambda x: x.is_ring)
df_edge_rev = df_edges.query('oneway == False and not is_ring')
df_edge_rev.loc[:, 'order'] = -df_edge_rev.order - 1
df_edge_rev.loc[:, 'geometry'] = df_edge_rev.geometry.apply( lambda x: LineString(x.coords[::-1]) )
df_edge_rev.rename(columns={'s':'e', 'e':'s'}, inplace=True)
df_edge_rev.loc[:, 'dir'] = -1
df_edges.loc[:, 'dir'] = 1
return df_edges.append(df_edge_rev).reset_index(drop=True)
def get_intermediate_point(self):
"""Identify the road segment with nodes of 1 indegree and 1 outdegree.
Returns:
[list]: Road segement list.
"""
return self.degree.query( "indegree == 1 and outdegree == 1" ).index.unique().tolist()
def combine_links_of_rid(self, rid, omit_rids, df_edges, plot=False, save_folder=None):
"""Combine OSM link.
Args:
rid (int): The id of link in OSM.
omit_rids (df): Subset of df_edges, the start point shoule meet: 1) only has 1 indegree and 1 outdegree; 2) not the traffic_signals point.
df_edges (df, optional): [description]. Defaults to net.df_edges.
Returns:
pd.DataFrame: The links after combination.
Example:
`new_roads = combine_links_of_rid(rid=25421053, omit_rids=omit_rids, plot=True, save_folder='../cache')`
"""
new_roads = df_edges.query(f"rid == @rid").set_index('order')
combine_orders = omit_rids.query(f"rid == @rid").order.values
combine_seg_indxs = merge_intervals([[x-1, x] for x in combine_orders if x > 0])
drop_index = []
for start, end, _ in combine_seg_indxs:
segs = new_roads.query(f"{start} <= order <= {end}")
pids = np.append(segs.s.values, segs.iloc[-1]['e'])
new_roads.loc[start, 'geometry'] = LineString([[self.node[p]['x'], self.node[p]['y']] for p in pids])
new_roads.loc[start, 'dist'] = segs.dist.sum()
new_roads.loc[start, 'e'] = segs.iloc[-1]['e']
drop_index += [ i for i in range(start+1, end+1) ]
new_roads.drop(index=drop_index, inplace=True)
new_roads.reset_index(inplace=True)
if save_folder is not None:
gdf_to_geojson(new_roads, os.path.join(save_folder, f"road_{rid}_after_combination.geojson"))
if plot:
new_roads.plot()
return new_roads
def combine_rids(self, ):
omit_pids = [ x for x in self.get_intermediate_point() if x not in self.traffic_signals ]
omit_records = self.df_edges.query( f"s in @omit_pids" )
omit_rids = omit_records.rid.unique().tolist()
keep_records = self.df_edges.query( f"rid not in @omit_rids" )
res = []
for rid in tqdm(omit_rids, 'Combine links: \t'):
res.append(self.combine_links_of_rid(rid, omit_records, self.df_edges))
comb_rids = gpd.GeoDataFrame(pd.concat(res))
comb_rids = keep_records.append(comb_rids).reset_index(drop=True)
return comb_rids
def edge_offset(self,):
df_edge = self.df_edges.copy()
df_edge.loc[:, 'geom_origin'] = df_edge.geometry.apply(lambda x: x.to_wkt())
geom_offset = df_edge[~df_edge.oneway].apply( lambda x: edge_parallel_offset(x, logger=self.logger), axis=1 )
df_edge.loc[geom_offset.index, 'geometry'] = geom_offset
return df_edge
def cal_nodes_dis(self, o, d):
assert o in self.node and d in self.node, "Check the input o and d."
if (o, d) in self.node_dis_memo:
return self.node_dis_memo[(o, d)]
return haversine((self.node[o]['y'], self.node[o]['x']), (self.node[d]['y'], self.node[d]['x']), unit=Unit.METERS)
def a_star(self, origin, dest, max_layer=500, max_dist=10**4, verbose=False, plot=False):
"""Route planning by A star algm
Args:
origin ([type]): [description]
dest ([type]): [description]
verbose (bool, optional): [description]. Defaults to False.
plot (bool, optional): [description]. Defaults to False.
Returns:
dict: The route planning result with path, cost and status.
status_dict = {-1: 'unreachable'}
"""
if (origin, dest) in self.route_planning_memo:
res = self.route_planning_memo[(origin, dest)]
return res
if origin not in self.graph or dest not in self.graph:
# print(f"Edge({origin}, {dest})",
# f"{', origin not in graph' if origin not in self.graph else ', '}",
# f"{', dest not in graph' if dest not in self.graph else ''}")
return None
frontier = [(0, origin)]
came_from, distance = {}, {}
came_from[origin] = None
distance[origin] = 0
layer = 0
while frontier:
_, cur = heapq.heappop(frontier)
if cur == dest or layer > max_layer:
break
for nxt in self.graph[cur]:
if nxt not in self.graph:
continue
new_cost = distance[cur] + self.edge[(cur, nxt)]
if nxt not in distance or new_cost < distance[nxt]:
distance[nxt] = new_cost
if distance[nxt] > max_dist:
continue
heapq.heappush(frontier, (new_cost+self.cal_nodes_dis(dest, nxt), nxt) )
came_from[nxt] = cur
layer += 1
if cur != dest:
res = {'path': None, 'cost': np.inf, "status": -1}
self.route_planning_memo[(origin, dest)] = res
return res
# reconstruct the route
route, queue = [dest], deque([dest])
while queue:
node = queue.popleft()
# assert node in came_from, f"({origin}, {dest}), way to {node}"
if came_from[node] is None:
continue
route.append(came_from[node])
queue.append(came_from[node])
route = route[::-1]
res = {'path':route, 'cost': distance[dest], 'status':1}
self.route_planning_memo[(origin, dest)] = res
if plot:
path_lst = gpd.GeoDataFrame([ { 's': route[i], 'e': route[i+1]} for i in range(len(route)-1) ])
ax = path_lst.merge(self.df_edges, on=['s', 'e']).plot()
return res
| |
'T':7, 'F':11, 'H':10, 'K':9,
'D':8, 'C':6, 'R':11, 'P':7, 'Q':9, 'N':8, 'W':14, '-':0}
parse = GPCRDBParsingPDB()
ref_length = 0
conserved_count = 0
non_cons_count = 0
trimmed_res_num = 0
switched_count = 0
non_cons_res_templates, conserved_residues = OrderedDict(), OrderedDict()
trimmed_residues = []
inconsistencies = []
if self.revise_xtal==True:
ref_prot = self.reference_protein.parent
else:
ref_prot = self.reference_protein
for incons in self.statistics.info_dict['pdb_db_inconsistencies']:
inconsistencies.append(list(incons.keys())[0])
for ref_seg, temp_seg, aligned_seg in zip(reference_dict, template_dict, alignment_dict):
if len(ref_seg)>4:
segment = ref_seg[:4]
else:
segment = ref_seg
for ref_res, temp_res, aligned_res in zip(reference_dict[ref_seg], template_dict[temp_seg],
alignment_dict[aligned_seg]):
if self.revise_xtal==True and reference_dict[ref_seg][ref_res]!=template_dict[temp_seg][temp_res]:
alignment_dict[aligned_seg][aligned_res]='.'
if template_dict[temp_seg][temp_res]=='/':
continue
if reference_dict[ref_seg][ref_res]!='-':
ref_length+=1
else:
trimmed_residues.append(ref_res.replace('x','.'))
if '?' in temp_res:
trimmed_residues.append(ref_res.replace('x','.'))
trimmed_res_num+=1
non_cons_count+=1
continue
if '-term' in ref_seg and (template_dict[temp_seg][temp_res]=='-' or
reference_dict[ref_seg][ref_res]!=template_dict[temp_seg][temp_res] or
len(main_pdb_array[ref_seg][ref_res])<atom_num_dict[template_dict[temp_seg][temp_res]]):
trimmed_residues.append(ref_res.replace('x','.'))
trimmed_res_num+=1
non_cons_count+=1
continue
if (ref_res not in inconsistencies and
alignment_dict[aligned_seg][aligned_res]!='.' and
alignment_dict[aligned_seg][aligned_res]!='x' and
alignment_dict[aligned_seg][aligned_res]!='-' and
alignment_dict[aligned_seg][aligned_res]!='/' and
len(main_pdb_array[ref_seg][ref_res.replace('x','.')])>=atom_num_dict[template_dict[temp_seg][temp_res]]):
try:
rot_test = Rotamer.objects.filter(structure=self.main_structure,
residue__display_generic_number__label=dgn(ref_res,
self.main_structure.protein_conformation))
rot_test = self.right_rotamer_select(rot_test)
if rot_test.missing_atoms==True:
alignment_dict[aligned_seg][aligned_res]='.'
template_dict[temp_seg][temp_res]='G'
else:
raise Exception()
except:
conserved_residues[ref_res] = alignment_dict[aligned_seg][aligned_res]
conserved_count+=1
if 'x' not in ref_res:
num_in_loop = parse.gn_num_extract(ref_res,'|')[1]
try:
this_res = list(Residue.objects.filter(protein_conformation=self.prot_conf,
protein_segment__slug=segment))[num_in_loop-1]
except:
trimmed_residues.append(ref_res.replace('x','.'))
continue
seq_num = str(this_res.sequence_number)
try:
self.template_source = update_template_source(self.template_source,[seq_num],self.template_source[segment][seq_num][0],segment,
just_rot=True)
key_in_template_source = seq_num
except:
self.template_source = update_template_source(self.template_source,[ggn(this_res.display_generic_number.label)],
self.template_source[segment][ggn(this_res.display_generic_number.label)][0],
segment,just_rot=True)
key_in_template_source = ggn(this_res.display_generic_number.label)
else:
try:
self.template_source = update_template_source(self.template_source,[ref_res],self.template_source[segment][ref_res][0],segment,
just_rot=True)
key_in_template_source = ref_res
except:
missing_i = list(reference_dict[ref_seg].keys()).index(ref_res)
gaps_before = [x for x in list(reference_dict[ref_seg].keys())[:missing_i] if reference_dict[ref_seg][x]=='-']
this_loop = Residue.objects.filter(protein_conformation__protein=self.reference_protein, protein_segment__slug=ref_seg[:4])
right_res = str(this_loop[missing_i-len(gaps_before)].sequence_number)
self.template_source = update_template_source(self.template_source,[right_res],self.template_source[segment][right_res][0],segment,
just_rot=True)
key_in_template_source = right_res
if '_dis' in ref_seg or (ref_seg=='ECL2' and self.template_source['ECL2'][key_in_template_source][0]!=self.main_structure
and '|' in ref_res):
trimmed_residues.append(ref_res.replace('x','.'))
gn = ref_res
if ((gn in inconsistencies or alignment_dict[aligned_seg][aligned_res]=='.' and
reference_dict[ref_seg][gn]!=template_dict[temp_seg][gn]) or (template_dict[temp_seg][temp_res]!='x' and
len(main_pdb_array[ref_seg][ref_res.replace('x','.')])<atom_num_dict[template_dict[temp_seg][temp_res]])):
###########
# if (gn in inconsistencies) or (template_dict[temp_seg][temp_res]!='x' and
# len(main_pdb_array[ref_seg][ref_res.replace('x','.')])<atom_num_dict[template_dict[temp_seg][temp_res]]) or (reference_dict[ref_seg][gn]==template_dict[temp_seg][gn] or
# gn in inconsistencies or alignment_dict[aligned_seg][aligned_res]=='.'):
###########
non_cons_count+=1
gn_ = str(ref_res).replace('x','.')
no_match = True
if '|' in gn_:
try:
list_num = int(gn.split('|')[1])-1
gn = ggn(list(Residue.objects.filter(protein_conformation__protein=ref_prot,
protein_segment__slug=ref_seg.split('_')[0]))[list_num].display_generic_number.label)
gn_ = gn.replace('x','.')
except:
pass
this_state_structs_with_resi, other_state_structs_with_resi = [],[]
main_pdb_array, template_dict, non_cons_res_templates, switched_count, no_match = self.find_and_switch_rotamer(self.similarity_table, gn, gn_,
reference_dict, ref_seg, ref_res, main_pdb_array, atom_num_dict, template_dict, temp_seg, temp_res, non_cons_res_templates, switched_count, no_match, segment)
if no_match==True:
main_pdb_array, template_dict, non_cons_res_templates, switched_count, no_match = self.find_and_switch_rotamer(self.similarity_table_other_states, gn, gn_,
reference_dict, ref_seg, ref_res, main_pdb_array, atom_num_dict, template_dict, temp_seg, temp_res, non_cons_res_templates, switched_count, no_match, segment)
if no_match==True:
try:
if 'free' not in ref_seg:
residue = main_pdb_array[ref_seg][str(ref_res).replace('x','.')]
main_pdb_array[ref_seg][str(ref_res).replace('x','.')] = residue[0:5]
trimmed_residues.append(gn_)
trimmed_res_num+=1
if '_' in ref_seg:
orig_ref_seg = ref_seg.split('_')[0]
else:
orig_ref_seg = ref_seg
if ref_res in self.template_source[orig_ref_seg]:
self.template_source = update_template_source(self.template_source, [ref_res], None, orig_ref_seg, just_rot=True)
else:
fetched_key = list(self.template_source[orig_ref_seg].keys())[int(ref_res.split('|')[1])-1]
self.template_source = update_template_source(self.template_source,[fetched_key], None, orig_ref_seg, just_rot=True)
elif 'free' in ref_seg:
trimmed_residues.append(gn_)
trimmed_res_num+=1
except:
print("Missing atoms in {} at {}".format(self.main_structure,gn))
logging.warning("Missing atoms in {} at {}".format(self.main_structure,gn))
elif alignment_dict[aligned_seg][aligned_res]=='x':
trimmed_residues.append(gn.replace('x','.'))
trimmed_res_num+=1
self.statistics.add_info('ref_seq_length', ref_length)
self.statistics.add_info('conserved_num', conserved_count)
self.statistics.add_info('non_conserved_num', non_cons_count)
self.statistics.add_info('trimmed_residues_num', trimmed_res_num)
self.statistics.add_info('non_conserved_switched_num', switched_count)
self.statistics.add_info('conserved_residues', conserved_residues)
self.statistics.add_info('non_conserved_residue_templates', non_cons_res_templates)
return [main_pdb_array, reference_dict, template_dict, alignment_dict, trimmed_residues]
def find_and_switch_rotamer(self, similarity_table, gn, gn_, reference_dict, ref_seg, ref_res, main_pdb_array, atom_num_dict,
template_dict, temp_seg, temp_res, non_cons_res_templates, switched_count, no_match, segment):
parse = GPCRDBParsingPDB()
######################
# if self.revise_xtal and self.force_main_temp:
# s = Structure.objects.get(pdb_code__index='6CM4')
# temp_st = OrderedDict([(s, 100)])
# for st, sim in similarity_table.items():
# temp_st[st] = sim
# similarity_table = temp_st
######################
for struct in similarity_table:
try:
alt_temp = parse.fetch_residues_from_pdb(struct, [gn])
if reference_dict[ref_seg][ref_res]==PDB.Polypeptide.three_to_one(
alt_temp[gn_][0].get_parent().get_resname()):
orig_res = main_pdb_array[ref_seg][str(ref_res).replace('x','.')]
alt_res = alt_temp[gn_]
if len(alt_res)!=atom_num_dict[reference_dict[ref_seg][ref_res]]:
continue
superpose = sp.RotamerSuperpose(orig_res, alt_res)
new_atoms = superpose.run()
if self.debug:
print(struct, gn_, superpose.backbone_rmsd)
if superpose.backbone_rmsd>0.45:
continue
main_pdb_array[ref_seg][str(ref_res).replace('x','.')] = new_atoms
template_dict[temp_seg][temp_res] = reference_dict[ref_seg][ref_res]
non_cons_res_templates[gn] = struct
switched_count+=1
no_match = False
if 'x' not in ref_res:
num_in_loop = parse.gn_num_extract(ref_res,'|')[1]
seq_num = str(list(Residue.objects.filter(protein_conformation=self.prot_conf,
protein_segment__slug=segment))[num_in_loop-1].sequence_number)
self.template_source = update_template_source(self.template_source,[seq_num],struct,segment,just_rot=True)
else:
self.template_source = update_template_source(self.template_source,[ref_res],struct,segment,just_rot=True)
break
except:
pass
return main_pdb_array, template_dict, non_cons_res_templates, switched_count, no_match
def ECL3_disulfide(self, reference_dict):
c61, c62 = False, False
try:
if reference_dict['TM6']['6x61']=='C':
c61 = True
except:
pass
try:
if reference_dict['TM6']['6x62']=='C':
c62 = True
except:
pass
ecl3_lab = [i for i in reference_dict if i.startswith('ECL3')][0]
ecl3_c = []
for gn, res in reference_dict[ecl3_lab].items():
if res=='C':
ecl3_c.append(gn)
if c61==True and len(ecl3_c)>0:
return ['6x61', ecl3_c[0]]
elif c62==True and len(ecl3_c)>0:
return ['6x62', ecl3_c[0]]
elif len(ecl3_c)>=2:
return [ecl3_c[0], ecl3_c[1]]
else:
return [0,0]
def write_homology_model_pdb(self, filename, main_pdb_array, alignment, trimmed_residues=[], disulfide_pairs=[], complex=False):
''' Write PDB file from pdb array to file.
@param filename: str, filename of output file \n
@param main_pdb_array: OrderedDict(), of atoms of pdb, where keys are generic numbers/residue numbers and
values are list of atoms. Output of GPCRDBParsingPDB.pdb_array_creator().
@param alignment: AlignedReferenceTemplate class, alignment of reference and template.
@trimmed_residues: list, list of generic numbers that are trimmed/to be modeled by MODELLER.
'''
key = ''
res_num = 0
counter_num = 0
atom_num = 0
trimmed_resi_nums = OrderedDict()
helix_restraints = []
prev_seg = '0'
icl3_mid = None
disulfide_nums = [[0,0],[0,0]]
complex_start, beta_start, gamma_start = None, None, None
with open(filename,'w+') as f:
for seg_id, segment in main_pdb_array.items():
if seg_id!='TM1' and prev_seg!='0' and seg_id.startswith('T') and prev_seg.startswith('T'):
atom_num+=1
# f.write("\nTER{} {} {}{}".format(str(atom_num).rjust(8),atom.get_parent().get_resname(),str(self.main_template_preferred_chain)[0],str(res_num).rjust(4)))
trimmed_segment = OrderedDict()
for key in segment:
res_num+=1
counter_num+=1
for i, d_p in enumerate(disulfide_pairs):
for j, d in enumerate(d_p):
try:
if key==d.replace('x','.'):
disulfide_nums[i][j] = res_num
break
except:
pass
try:
if alignment.reference_dict[seg_id][key.replace('.','x')] in ['-','x']:
counter_num-=1
res_num-=1
continue
except:
try:
if alignment.reference_dict[seg_id][key] in ['-','x']:
counter_num-=1
res_num-=1
continue
except:
pass
if seg_id=='HN':
if complex_start==None:
complex_start = counter_num
if seg_id=='Beta':
if beta_start==None:
beta_start = counter_num
if seg_id=='Gamma':
if gamma_start==None:
gamma_start = counter_num
if segment[key]=='/':
atom_num+=1
icl3_mid = counter_num
res_num-=1
counter_num-=1
continue
if key in trimmed_residues:
trimmed_segment[key] = counter_num
if 'x' in segment[key]:
if '?' in key:
atom_num+=1
continue
else:
helix_restraints.append(counter_num)
continue
if 'x' in segment[key]:
atom_num+=1
continue
if '?' in key and '-' in segment[key]:
atom_num+=1
continue
if '-term' in seg_id and segment[key]=='-':
continue
for atom in main_pdb_array[seg_id][key]:
atom_num+=1
coord = list(atom.get_coord())
coord1 = "%8.3f"% (coord[0])
coord2 = "%8.3f"% (coord[1])
coord3 = "%8.3f"% (coord[2])
if str(atom.get_id())=='CA':
if len(key)==4:
bfact = "%6.2f"% (float(key))
elif '.' not in key:
bfact = "%6.2f"% (float(atom.get_bfactor()))
else:
key_split = key.split('.')
if '.' in key and len(key_split[1])==3:
bfact = " -%4.2f"% (float(key))
elif len(key_split)==3:
bfact = "%6.2f"% (float(atom.get_bfactor()))
else:
bfact = " %5.2f"% (float(key))
else:
bfact = "%6.2f"% (float(atom.get_bfactor()))
occupancy = "%6.2f"% (atom.get_occupancy())
template="""
ATOM{atom_num} {atom}{res} {chain}{res_num}{coord1}{coord2}{coord3}{occupancy}{bfactor}{atom_s} """
context={"atom_num":str(atom_num).rjust(7), "atom":str(atom.get_id()).ljust(4),
"res":atom.get_parent().get_resname(),
"chain":str(self.main_template_preferred_chain)[0],
"res_num":str(res_num).rjust(4), "coord1":coord1.rjust(12),
"coord2":coord2.rjust(8), "coord3":coord3.rjust(8),
"occupancy":str(occupancy).rjust(3),
"bfactor":str(bfact).rjust(4), "atom_s":str(str(atom.get_id())[0]).rjust(12)}
f.write(template.format(**context))
trimmed_resi_nums[seg_id] = trimmed_segment
prev_seg = seg_id[:4]
f.write("\nTER\n")
if self.reference_entry_name!=self.main_structure.protein_conformation.protein.parent.entry_name:
atom_num+=1
# f.write("\nTER{} {} {}{}".format(str(atom_num).rjust(8),atom.get_parent().get_resname(),str(self.main_template_preferred_chain)[0],str(res_num).rjust(4)))
return trimmed_resi_nums, helix_restraints, icl3_mid, disulfide_nums, complex_start, beta_start, gamma_start
def create_PIR_file(self, reference_dict, template_dict, template_file, hetatm_count, water_count):
''' Create PIR file from reference and template alignment (AlignedReferenceAndTemplate).
@param reference_dict: AlignedReferenceAndTemplate.reference_dict
@param template_dict: AlignedReferenceAndTempalte.template_dict
@template_file: str, name of template file with path
@param hetatm_count: int, number of hetero atoms
@param water_count: int, number of water atoms
'''
ref_sequence, temp_sequence = '',''
res_num = 1
with open(template_file,'r') as f:
lines = f.readlines()
for line in lines:
try:
pdb_re = re.search('(ATOM[A-Z\s\d]{13}\S{3}\s\S\s+)(\d+)([A-Z\s\d.-]{49,53})',line)
start_num = pdb_re.group(2)
break
except:
try:
pdb_re = re.search('(ATOM[A-Z\s\d]{13}\S{3}\s+)(\d+)([A-Z\s\d.-]{49,53})',line)
start_num = pdb_re.group(2)
break
except:
pass
for ref_seg, temp_seg in zip(reference_dict, template_dict):
if ref_seg in ['HN','Beta','Gamma']:
ref_sequence+='/'
temp_sequence+='/'
for ref_res, temp_res in zip(reference_dict[ref_seg], template_dict[temp_seg]):
if reference_dict[ref_seg][ref_res] in ['-','x']:
continue
else:
ref_sequence+=reference_dict[ref_seg][ref_res]
if template_dict[temp_seg][temp_res] in ['-','x']:
temp_sequence+='-'
else:
temp_sequence+=template_dict[temp_seg][temp_res]
res_num+=1
for i in range(hetatm_count):
ref_sequence+='.'
temp_sequence+='.'
for i in range(water_count):
ref_sequence+='w'
temp_sequence+='w'
self.model_sequence = temp_sequence
if self.complex:
pir_file = "./structure/PIR/{}_{}.pir".format(self.uniprot_id, self.target_signprot.entry_name)
else:
pir_file = "./structure/PIR/"+self.uniprot_id+"_"+self.state+".pir"
with open(pir_file, 'w+') as output_file:
template="""
>P1;{temp_file}
structure:{temp_file}:{start}:{chain}:{res_num}:{chain}::::
{temp_sequence}*
>P1;{uniprot}
sequence:{uniprot}::::::::
{ref_sequence}*
"""
context={"temp_file":template_file,
"start":start_num,
"chain":self.main_template_preferred_chain,
"res_num":res_num,
"temp_sequence":temp_sequence,
"uniprot":self.uniprot_id,
"ref_sequence":ref_sequence}
output_file.write(template.format(**context))
def run_MODELLER(self, pir_file, template, reference, number_of_models, output_file_name, atom_dict=None,
helix_restraints=[], icl3_mid=None, disulfide_nums=[], complex_start=None, beta_start=None, gamma_start=None):
''' Build homology model with MODELLER.
@param pir_file: str, file name of PIR file with path \n
@param template: str, file name of template with path \n
@param reference: str, Uniprot code of reference sequence \n
@param number_of_models: int, number of models to be built \n
@param output_file_name: str, name of output file
@param atom_dict: nested OrderedDict(), atoms to model with MODELLER organized by segments and generic
numbers, default=None
@param helix_restraints: list, list of generic numbers that should be modelled as helical regions by
MODELLER, default=[]
@param icl3_mid: int, position of the break in the middle of ICL3, default=None
'''
log.none()
env = environ(rand_seed=1028) #!!random number generator
if self.revise_xtal==True:
ref_prot = self.reference_protein.parent
else:
ref_prot = self.reference_protein
if ref_prot==self.main_structure.protein_conformation.protein.parent or self.keep_hetatoms:
env.io.hetatm = True
env.io.water = | |
input = """mask = 01111X0011X11110XX11X110111001X00001
mem[26252] = 2785
mem[5529] = 156
mem[43194] = 29224
mem[64799] = 11208
mem[1727] = 138064713
mem[51786] = 67480
mask = 00010000011011101X0000X001X01001X0X0
mem[8820] = 143936540
mem[33783] = 33161
mem[60979] = 17936311
mem[19136] = 48558314
mem[55023] = 718791450
mem[1315] = 258018313
mem[1093] = 104780852
mask = 10111X1000X0001XX11011X11X0011100X00
mem[31605] = 115835374
mem[50005] = 5
mask = 1X011000XX101X101X0011100010X0100001
mem[42546] = 58538740
mem[42808] = 3851323
mem[54043] = 1022
mem[45712] = 43197369
mem[10795] = 2548035
mem[57363] = 1159
mem[54202] = 412819
mask = 10X100100000X1111X001101X111011XXX00
mem[16201] = 117522292
mem[31496] = 66092617
mem[50341] = 428
mask = 101X1X00100010101110X01111101111X0X1
mem[20313] = 147
mem[61510] = 51976
mem[21178] = 411
mem[9107] = 760422
mem[38572] = 283
mem[60979] = 140105821
mask = 110X000100001X11101001100X00100X1001
mem[56936] = 6185
mem[8457] = 28530693
mem[13561] = 3096
mem[53450] = 15673093
mem[60581] = 1214
mem[5903] = 2000318
mem[60351] = 211747206
mask = 00X1X0010X10X1111X001X00001000011X11
mem[62073] = 220574384
mem[27246] = 6492
mem[18869] = 1906219
mask = 101101X0X11010101X100X101X1X01100101
mem[37005] = 513427066
mem[45638] = 1726
mem[14004] = 201617
mem[42296] = 153436
mask = 10X11X0X00XX01X0X10X111X111001000111
mem[24455] = 217264
mem[57413] = 6770857
mem[14692] = 45473217
mem[3769] = 4757
mask = 11X1110X0110XXX0101011X000011X01X000
mem[305] = 70009
mem[26973] = 289
mask = X0X1X00010000X010110110X011011001101
mem[37863] = 1455937
mem[49769] = 20835743
mask = 00X10XX01100010101X0100100111X1011XX
mem[3341] = 459515075
mem[36300] = 6327157
mem[20441] = 10899
mem[16517] = 710704945
mem[51274] = 1493811
mask = X0110XXX011011101000100000X000X1X11X
mem[20041] = 22125189
mem[36402] = 33516
mem[22847] = 4075
mem[10171] = 1203
mem[12044] = 202447973
mem[1315] = 246
mask = 10X1X000101001101X10X000X011X0111100
mem[51796] = 29992104
mem[55005] = 402525408
mem[13871] = 10257878
mask = X001X0001110X110101X01110X0010X0000X
mem[36402] = 60926
mem[45811] = 691
mem[49840] = 86896
mem[2590] = 342
mem[46418] = 7319
mem[54758] = 965
mask = 001X00XX011X11101000010110111XXX1001
mem[19224] = 374258
mem[61699] = 264
mem[19638] = 15742629
mem[57494] = 26490
mem[49865] = 23438
mask = X00100X01X0X01X1X1101X010110101XX101
mem[25484] = 42806608
mem[33783] = 145323384
mem[40315] = 594
mem[13333] = 254578
mem[39809] = 7252815
mem[52735] = 116218816
mem[6129] = 20907
mask = 0001000X0110111X10XX11X0000000XX1X11
mem[5622] = 21277
mem[45634] = 1011054
mask = 1X01000X000010X11X10X0X00000XX00100X
mem[23412] = 54574
mem[7920] = 54188515
mem[21517] = 25879915
mem[57363] = 431157108
mem[49601] = 17444
mem[15096] = 2554701
mem[23477] = 13459467
mask = X001X000X11X100011X01001011110001010
mem[36987] = 568
mem[36660] = 246893
mem[57035] = 4038555
mask = 10111000X0X0XX1011X00XX00010X01111X1
mem[8554] = 380367
mem[5403] = 7418
mem[51786] = 65666977
mem[26546] = 12245509
mem[40806] = 311238844
mem[21066] = 11206516
mask = 10110000011011X010X0X1X01011X010XX1X
mem[49946] = 1458
mem[4373] = 280
mem[46610] = 376919
mask = 101100000X10100010001000X100011X011X
mem[35241] = 51240
mem[60111] = 134251890
mem[801] = 6325093
mem[63511] = 27310
mem[18392] = 74381
mem[17308] = 156337
mask = X011000001X110001001X01X1001X1111X01
mem[36574] = 12877161
mem[62337] = 10873578
mem[11275] = 6282
mem[8472] = 48322182
mem[47950] = 101617693
mem[43611] = 178631
mem[60979] = 5012394
mask = 10X1X10X0000001X1110101X1110001X111X
mem[16908] = 166150535
mem[21306] = 42743118
mem[36720] = 6104
mem[44038] = 11620193
mem[14668] = 9418931
mem[44777] = 163129610
mem[35702] = 37603
mask = 11111X01111XX10XX0110100010010000111
mem[43990] = 39315
mem[13335] = 730899404
mem[11921] = 37123
mem[42166] = 854850
mem[9345] = 9837
mem[49342] = 8178
mask = X001X0001000011011X00110111111XX1111
mem[18711] = 2221
mem[61699] = 3796
mem[55230] = 15319
mem[10562] = 800723991
mask = 1001X00000001XX11X10010000001XX010X1
mem[15283] = 6355
mem[27385] = 14166
mem[19691] = 1799151
mem[33024] = 26655
mem[21178] = 74692
mem[20014] = 72056
mask = 10110X0000X101101001110X11X010X011X0
mem[25478] = 80407
mem[24903] = 351387
mask = 001X1000101XX0X0X100001110X110X0X000
mem[12774] = 660172
mem[34604] = 5164578
mem[16522] = 34157571
mem[3105] = 121121
mask = 1X1111010110001010X0111001XX01110101
mem[34774] = 5535
mem[27256] = 759
mem[18452] = 5363
mem[4157] = 13424661
mask = X0111000000X01XX110X001X010X0X010X01
mem[7861] = 458333
mem[11882] = 575762
mem[60268] = 487130831
mem[54836] = 44610883
mask = X011100010XXX01011X00X11X111101X1X01
mem[10849] = 66137755
mem[60399] = 1014
mem[13849] = 1174900
mask = X011100010X1101010000X100X0XX01X0X00
mem[672] = 1156
mem[9106] = 1684
mask = 1011010XX1101X001X1000001XX0XXX11100
mem[10432] = 60137761
mem[43319] = 207
mask = 0001001X1000X1XX111010010111100X11X1
mem[64234] = 54143
mem[16908] = 459845976
mem[1018] = 13137550
mask = 1011XXX000000X10X1X01X0111110010X00X
mem[49893] = 532
mem[22512] = 670461
mem[63851] = 512
mem[8898] = 163198
mem[55005] = 186443
mem[63054] = 56429
mask = 11111101XX1X110010XX110X01000X1111XX
mem[15322] = 383518
mem[42546] = 1079125
mem[1163] = 34518
mask = 001101X00110101X10001000100000111X0X
mem[53977] = 6180
mem[28254] = 951979767
mem[49063] = 154022824
mem[18890] = 246364
mem[39131] = 917
mem[41779] = 100495
mem[65066] = 19510941
mask = 10111XX000010X011100111001X1010X0000
mem[59185] = 55549841
mem[37281] = 285118
mask = 1111110XX111X1X0101XX10X0X00001100X1
mem[62254] = 49558752
mem[32998] = 46668
mem[34162] = 1253623
mem[49419] = 85881740
mem[16573] = 26588931
mem[32165] = 1947339
mask = 100110001X1X10X0110010000010X0100001
mem[45229] = 384427
mem[38234] = 464797
mem[4762] = 218891416
mem[35007] = 28537580
mem[58929] = 7239342
mem[43990] = 19245
mask = 10X100001000110101100X110X00011X0001
mem[21908] = 411908340
mem[58137] = 13722534
mask = 1011100001101X1010X01XX1X01011110011
mem[36713] = 16064432
mem[31727] = 438484
mem[17413] = 278018
mem[35768] = 7599
mem[42661] = 25977
mask = 10X1100011101X1010001001100X1110110X
mem[39277] = 2216
mem[35504] = 6296182
mem[56628] = 19239933
mem[40710] = 60793
mem[44970] = 1758
mem[32245] = 1505
mask = 1X01110X001X0X001X01111X0X11100X0110
mem[10951] = 436
mem[35680] = 256295979
mem[44237] = 65038
mem[55023] = 11041
mem[39396] = 66
mask = 1111X11X00001010010011X01XX100010011
mem[49448] = 7397923
mem[39459] = 1101
mem[27280] = 3265569
mem[64594] = 27252
mask = 10X1X0001000011X11100X0XX1X110001XX1
mem[40696] = 7902220
mem[26058] = 99398102
mask = X01X00000XX0100110001001001001000110
mem[36439] = 102588728
mem[52916] = 2157364
mem[22195] = 70028
mask = 101X1010000X0X10X11010X1111010100100
mem[38732] = 1165
mem[53917] = 830
mem[50041] = 3286062
mem[56385] = 143054
mem[31727] = 20014
mem[53816] = 21421460
mask = 101101XXXX1011001X00X0X010100100001X
mem[25322] = 1199118
mem[42107] = 286897269
mem[16908] = 472712299
mem[50391] = 402436
mem[32810] = 1927
mem[39002] = 21799
mask = 000100X0101XXX101000000X00101X010001
mem[27962] = 8649
mem[10849] = 215568951
mem[8987] = 204760128
mem[26672] = 583
mem[20360] = 16749005
mem[43804] = 53939
mask = 10111X0000000010X100001X0001001X0010
mem[51228] = 181554669
mem[65073] = 49475
mem[17951] = 677777
mem[64944] = 18570433
mem[28254] = 6932
mem[27199] = 683
mask = 0011000001101110100X1X00X110X0100110
mem[27292] = 5688
mem[3126] = 9820
mem[48169] = 13170629
mask = XX1110X01X11100X00X001000X0110010111
mem[4635] = 44951228
mem[42419] = 5452
mem[15942] = 411631231
mem[46076] = 7066434
mask = 001X1X00X110101010X011X1110100X0X001
mem[53450] = 2352
mem[38234] = 251
mem[36660] = 368602019
mem[31727] = 3801
mem[61573] = 29290313
mask = 10111000X110X01010001001101011X011X1
mem[54931] = 641804
mem[38064] = 107414
mem[6286] = 95588
mem[26186] = 19167809
mask = 10X1100000X0001X110X0X110X0001110101
mem[63607] = 196293012
mem[37397] = 74862
mem[19292] = 318924812
mem[29261] = 13665
mem[25250] = 264997
mem[30191] = 3641
mem[57494] = 63004509
mask = 1001X00011101110101X0100X01001100X0X
mem[1481] = 82418090
mem[25265] = 1815
mem[33881] = 172103
mem[32784] = 22874
mem[8554] = 217
mem[54202] = 444131
mask = X01110000X0X0110110X1X0X0111XX0100X0
mem[7552] = 7475
mem[57906] = 82302467
mem[31613] = 113299
mem[58678] = 26311134
mem[62253] = 65990543
mem[4065] = 611064
mask = 1X11X110X000X010X10010111100X000XX1X
mem[11119] = 320765
mem[3769] = 42837319
mem[18392] = 10173
mask = 00010XX110000XX01X1X11X10011100XX100
mem[33575] = 8598574
mem[53173] = 1808
mem[19850] = 16935469
mem[31392] = 313339239
mem[38549] = 116034
mem[25962] = 444898
mask = 10111000X110101010X01X0X1X000XXX10X0
mem[19150] = 6547
mem[8554] = 72222
mem[56029] = 14274328
mem[35745] = 3739
mask = X1X111010110XX101010000X01X001X011X1
mem[566] = 907385
mem[51443] = 10041
mem[9645] = 5338013
mem[57457] = 85973
mask = 001110X0X1X010X0X0001X11X00100X1X011
mem[49601] = 966206419
mem[16391] = 14774065
mem[10765] = 3943
mem[11385] = 1053441
mem[9645] = 3674
mem[33267] = 5557039
mask = X0111X001XXX0010110000000111X001000X
mem[1854] = 18498
mem[33158] = 41774920
mem[62418] = 7153
mem[16528] = 7205
mask = X01X11X11110111010XXX000100100111000
mem[56899] = 82703
mem[34774] = 10579573
mem[56593] = 5775
mem[25931] = 745674
mem[52011] = 9761367
mem[20639] = 104027
mask = 10X11000101X10101X0X001001100X1X00XX
mem[5566] = 1099
mem[10795] = 956222
mem[16528] = 21229
mem[57906] = 49042881
mem[17670] = 181567
mem[40960] = 3605052
mask = 0011100X101000000100010XX0X0100X1000
mem[33906] = 90511
mem[60990] = 145571142
mem[27402] = 47129
mem[42809] = 1037812
mem[26424] = 897
mask = 101110X011X1X1101X001101011X00101011
mem[29570] = 3354
mem[40337] = 44880
mem[28402] = 497858
mem[26650] = 476674190
mem[62418] = 34025
mem[24168] = 699
mask = 10111000000X00101X01001100X01X11X011
mem[9001] = 533135094
mem[3225] = 1428927
mem[24172] = 147342
mem[45846] = 81574
mem[31727] = 302
mem[12998] = 488031
mask = 1X11110XX11X111010X010XXX0X000110001
mem[1093] = 361839
mem[49946] = 1789883
mem[51600] = 41971870
mem[59005] = 31231040
mem[36667] = 82411211
mem[36908] = 186875212
mask = 0011100011XXXXX00000111XX0000X110011
mem[59622] = 13776
mem[53783] = 86556420
mem[19116] = 192038
mem[46435] = 15488
mem[28402] = 456865626
mem[24036] = 192281
mask = X111X10X11X111101011010100XX10101001
mem[12408] = 891354
mem[5182] = 45755
mem[60784] = 339
mem[16899] = 2232
mem[65066] = 11310211
mask = 101X11X0111X1110X0001010X0XX00010X10
mem[31850] = 36538
mem[65266] = 16393094
mem[20521] = 12820
mask = 10011000101XX01011010X0X001X10010110
mem[14008] = 5237
mem[25962] = 13943751
mem[19638] = 209720371
mem[3799] = 191312955
mask = 10110000X11X10X0100X1X0X1010X1010X01
mem[20810] = 2332135
mem[58823] = 26503
mem[63041] = 5806
mem[60395] = 3480418
mem[44633] = 200491
mem[33377] = 695773
mask = 100110XX10101X1010001X10001001110000
mem[52884] = 1743
mem[9778] = 119536
mem[21499] = 356474913
mem[3126] = 2292214
mem[6910] = 175610
mem[55643] = 224868
mask = 101X110000110X10110X0111111001X00011
mem[55193] = 1563727
mem[62254] = 130412
mem[20399] = 3220120
mem[57494] = 49142855
mem[39459] = 14038947
mem[34539] = 17820
mem[51526] = 231931008
mask = 10X1100XX11X111X1X000111000100X10011
mem[49851] = 7594
mem[25848] = 55481
mem[65066] = 460629725
mem[47532] = 377061
mask = 10XX00010000101111100X111011101110X0
mem[61616] = 32889296
mem[11200] = 7530
mem[15283] = 18655
mem[25349] = 781
mem[45657] = 76012
mask = 10011000101X101011XX0X1001X0000X010X
mem[17670] = 1299588
mem[13821] = 186350
mem[16908] = 1062419043
mask = 1X11X000X111011010000011X110111X0X01
mem[28453] = 5700
mem[60954] = 2308171
mem[53802] = 335
mem[25187] = 8053
mem[53364] = 284593257
mem[65434] = 539563
mask = X0010000XX1011X010X00XX0XX1001000000
mem[54811] = 272
mem[36316] = 15229
mem[16561] = 203000701
mem[19638] = 568787
mask = X0110100X0X0110011X00000X01001X01100
mem[49992] = 15226
mem[51782] = 76988
mask = 1011X100X11011X010X0101X100X00111X11
mem[48976] = 243767847
mem[11299] = 901
mem[30490] = 1762085
mask = 1011XX00000101101XX111X011X0010X0101
mem[18890] = 10
mem[52697] = 9594326
mem[24790] = 13695
mem[12968] = 52010
mem[24579] = 112903
mem[54271] = 1070949
mask = 101X0X0000000X1011X010111X0001100000
mem[28459] = 684240
mem[56112] = 246013
mem[83] = 25183036
mem[31733] = 1094402
mem[31496] = 16701642
mask = X0011000X1101110X0X0100X0000X0010000
mem[17413] = 249
mem[17008] = 3978
mem[25265] = 516
mask = 10110000111010X0100111X11110010X00X1
mem[15233] = 178
mem[8781] = 266520286
mem[53730] = 407022173
mem[16980] = 12836825
mem[3698] = 63442
mem[3412] = 221236
mask = 11X11100011101001X110X000101XX1000X1
mem[49264] = 64836
mem[34162] = 16026
mem[42056] = 10587492
mem[44797] = 5732530
mem[13630] = 6578
mask = 100X00X01X00XXXXX110011100X111110101
mem[6004] = 283
mem[64151] = 121946
mask = 100XX0100X0011111100111001100X100100
mem[62107] = 30785
mem[13120] = 9822
mem[53973] = 24321
mem[36620] = 202312409
mask = 0X0100111000X10010101101X11X11100X10
mem[33202] = 99747
mem[16528] = 29834536
mem[404] = 72130
mem[41644] = 65330
mem[1031] = 507451055
mask = 10X1X0101X0X01XX111000000X10X1110101
mem[7276] = 7501209
mem[47302] = 295292983
mem[8554] = 16542
mem[41279] = 59129190
mem[58373] = 54870
mask = 0001000011X0010X01X01X0001100110X101
mem[8871] = 65653734
mem[3105] = 477
mem[60373] = 3247
mem[28472] = 154970
mem[40070] = 29940
mem[7630] = 223
mem[21911] = 909386168
mask = 10110X00011010XX10X0000XX010X111X101
mem[34303] = 21055620
mem[64944] = 33910
mem[25566] = 2387
mem[25265] = 26329844
mem[54527] = 960
mem[65089] = 56573
mem[46242] = 163
mask = X011XX0001101X1010001XXX1011X0X11001
mem[15942] = 1701
mem[33933] = 5999624
mem[37519] = 121764815
mem[65291] = 3664
mem[53344] = 717
mem[8554] = 41226
mask = 1001X0X0X000X11111X0X10X011X01X00101
mem[62160] = 452
mem[38885] = 365458
mem[3306] = 51496"""
# input = """mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X
# mem[8] = 11
# mem[7] = 101
# mem[8] = 0"""
# input = """mask = 000000000000000000000000000000X1001X
# mem[42] = 100
# mask = 00000000000000000000000000000000X0XX
# mem[26] = 1"""
input = input.split("\n")
cur_mask = None
mem = {}
def apply_mask(value, mask):
binary = bin(value)[2:]
binary = binary.rjust(len(mask), "0")
binary = list(binary)
for i, m in enumerate(mask):
if m == "X":
continue
if m == "1":
binary[i] = 1
else:
binary[i] = 0
return int("".join([str(i) for i in binary]), 2)
def get_possible_addresses(value, mask):
binary = bin(value)[2:]
binary = binary.rjust(len(mask), "0")
binary = list(binary)
results = []
for i, m in enumerate(mask):
new_results = []
if m == "0":
if not results:
new_results = [binary[i]]
else:
for result in results:
new_results.append(result + binary[i])
if m == "1":
if not results:
new_results = ["1"]
else:
for result in results:
new_results.append(result + "1")
| |
cubic polynomial cannot compute
result_vector=None
segment_1_recalculate = True
realroots=[]
#Check for zero values in segment 1
for i in realroots:
if 0 < i < tList[2]: segment_1_recalculate=True #If a zero occurs in this segment, we need to split it into two segments, 1.1 and 1.2
if segment_1_recalculate: #go with biomial and linear segments
segment1_1_coeffs = (qList[1] / (tList[1] ** 2), 0, 0) # segment 1.1 binomial with zero intersect
self.uhCurveData.append((tList[1], segment1_1_coeffs))
segment1_2_slope = (qList[2] - qList[1]) / (
tList[2] - tList[1]) # segment 1.2, if used, is linear between t(1) and t(2)
segment1_2_intercept = qList[1] - segment1_2_slope * tList[1]
segment1_2_coeffs = (segment1_2_slope, segment1_2_intercept)
self.uhCurveData.append((tList[2], segment1_2_coeffs))
else:
self.uhCurveData.append((tList[2], coeffs))
# ------Compute Segment 2 t(2) to t(4)------
#reinitialize input matrix and ordinate vector for cubic polynomial
input_matrix = np.zeros((4, 4))
ordinate_vector = np.zeros(4)
#segment 2 has three known points (t(2-4), q(2-4)) and a known slope (0 at t(3)/Qpeak)
#the derivative of y=ax^3+by^2+cy+d is dy=3ax^2+2bx+c, which is represented as the third linear equation
#The equations used are:
# T(2)^3 * A + T(2)^2 * B + T(2) * C + D = Q(2) (point T(2),Q(2))
# T(3)^3 * A + T(3)^2 * B + T(3) * C + D = Q(3) (point T(3),Q(3))
# T(3)^2 * 3A + T(3) * 2B + C = 0 (Derivative at peak is 0)
# T(4)^3 * A + T(4)^2 * B + T(4) * C + D = Q(4) (point T(4),Q(4))
input_matrix[0, 0] = 1
input_matrix[0, 1] = tList[2]
input_matrix[0, 2] = tList[2] ** 2
input_matrix[0, 3] = tList[2] ** 3
input_matrix[1, 0] = 1
input_matrix[1, 1] = tList[3]
input_matrix[1, 2] = tList[3] ** 2
input_matrix[1, 3] = tList[3] ** 3
input_matrix[2, 0] = 0
input_matrix[2, 1] = 1
input_matrix[2, 2] = 2 * tList[3]
input_matrix[2, 3] = 3 * tList[3] ** 2
input_matrix[3, 0] = 1
input_matrix[3, 1] = tList[4]
input_matrix[3, 2] = tList[4] ** 2
input_matrix[3, 3] = tList[4] ** 3
ordinate_vector[0] = qList[2]
ordinate_vector[1] = qList[3]
ordinate_vector[2] = 0
ordinate_vector[3] = qList[4]
try: #compute cubic polynomial
result_vector = np.linalg.solve(input_matrix, ordinate_vector)
coeffs = np.flipud(result_vector)
dual_binomial_used = False
except: #if the cubic polynomial cannot compute
dual_binomial_used = True
result_vector = None
# I'll just copy the great explanation from the excel version:
# Now we need to check for inflections. To accomplish this daring feat of mathematics we
# will be taking the second derivative and solving for the zero. Since it's
# possible for there to be a 0 as the third order coefficient, we have to make sure to verify that it's not a
# constant before we start. (The leading coefficient being a zero is a good thing, this means a binomial fit,
# and thusly no inflections at all.)
if not dual_binomial_used and coeffs[0] <> 0: # A leading coefficient of zero indicates a binomial
# Check to see if A is zero. Since zero is good we'll only operate if A is non - zero.
# The second derivative ofa cubic polynomial is AX^3 + BX^2 + CX + D is 6AX + 2B so the zero is set at
# -2B / 6A which reduces to B / (A * (-3)).
zero_point=coeffs[1]/(coeffs[0]*(-3))
# We're looking for the inflection point landing inside the range we're working in so we want zero point
# to be less than start or more than end. But numerical error in the zero point calculation could
# artificially place the point inside, so we allow for 1% of the total width excursion of the zero point
# into the invalid range. This should have little to no impact on valid systems and doesn't rule out
# systems where the inflection point is right at the intersection point (actually, we WANT that to be true
# since the ACTUAL curve has an inflection point around there.) If an inflection point is found within the
# segment, it will switch to the dual binomial method
if tList[2] + .01*self.w75 < zero_point < tList[4] - .01*self.w75: dual_binomial_used = True
if dual_binomial_used: #Split segment 2 into segment 2.1 and 2.2
# Setup Solver for the segment 2.1 binomial
input_matrix = np.zeros((3, 3)) # Matrix for 3-dimensional linear equation coefficient matrix
ordinate_vector = np.zeros(3) # Vector for dependent variables
# The equations are:
# T(2)^2 * A + T(2) * B + C = Q(2) (point T(2),Q(2))
# T(3)^2 * A + T(3) * B + C = Q(3) (point T(3),Q(3))
# T(3)^2 * 2A + B = 0 (Derivative at peak is 0)
input_matrix[0, 0] = 1
input_matrix[0, 1] = tList[2]
input_matrix[0, 2] = tList[2] ** 2
input_matrix[1, 0] = 1
input_matrix[1, 1] = tList[3]
input_matrix[1, 2] = tList[3] ** 2
input_matrix[2, 0] = 0
input_matrix[2, 1] = 1
input_matrix[2, 2] = tList[3] * 2
ordinate_vector[0] = qList[2]
ordinate_vector[1] = qList[3]
ordinate_vector[2] = 0
try: #compute binomial
result_vector = np.linalg.solve(input_matrix, ordinate_vector)
coeffs = np.flipud(result_vector)
except: #Give up and go linear
segment2_1_slope=(qList[3]-qList[2])/(tList[3]-tList[2])
segment2_1_intercept=qList[2]-segment2_1_slope*tList[2]
coeffs =(segment2_1_slope,segment2_1_intercept)
result_vector = None
self.uhCurveData.append((tList[3],coeffs))
# Update Solver for the segment 2.2 binomial
# Swap Out the first equation from the previous setup with point 4, the other two can remain
# The equations are:
# T(4)^2 * A + T(4) * B + C = Q(4) (point T(4),Q(4))
input_matrix[0, 0] = 1
input_matrix[0, 1] = tList[4]
input_matrix[0, 2] = tList[4] ** 2
ordinate_vector[0] = qList[4]
try: #compute binomial
result_vector = np.linalg.solve(input_matrix, ordinate_vector)
coeffs = np.flipud(result_vector)
except: # Give up and go linear
segment2_2_slope = (qList[4] - qList[3]) / (tList[4] - tList[3])
segment2_2_intercept = qList[3] - segment2_2_slope * tList[3]
coeffs = (segment2_2_slope,segment2_2_intercept)
result_vector = None
self.uhCurveData.append((tList[4], coeffs))
else:
self.uhCurveData.append((tList[4],coeffs))
# ------Compute Segment 3 t(4) to t(5) (Linear Fit)------
segment3_slope=(qList[5]-qList[4]) / (tList[5]-tList[4])
segment3_intercept=qList[4]-segment3_slope * tList[4]
coeffs = (segment3_slope,segment3_intercept)
self.uhCurveData.append((tList[5],coeffs))
#Compute area under the curve
end_time=0
uhCumulativeArea=0
for segment in self.uhCurveData:
begin_time=end_time
end_time=segment[0]
expanded_coeffs=np.append(np.zeros(4-len(segment[1])),segment[1]) #pads leading zeros into lower order coefficients
thisarea = (expanded_coeffs[0] * (end_time ** 4) / 4 + expanded_coeffs[1] * (end_time ** 3) / 3 +
expanded_coeffs[2] * (end_time ** 2) / 2 + expanded_coeffs[3] * end_time -
(expanded_coeffs[0] * (begin_time ** 4) / 4 + expanded_coeffs[1] * (begin_time ** 3) / 3 +
expanded_coeffs[2] * (begin_time ** 2) / 2 + expanded_coeffs[3] * begin_time))
uhCumulativeArea += thisarea
uhCumulativeVolume=uhCumulativeArea*60.0
VolRemaining=self.uhVolume-uhCumulativeVolume
# Compute T6 and T7
tList.append(None) #Hold for T6
tList.append(tList[5]+2*VolRemaining/(60*0.3667*qList[3])) #T7
tList[6]=tList[5]+0.333333333*(tList[7]-tList[5])
# ------Compute Segment 4 t5 to t6 (Linear Fit)------
segment4_slope=(qList[6]-qList[5]) / (tList[6]-tList[5])
segment4_intercept=qList[5]-segment4_slope * tList[5]
coeffs = (segment4_slope,segment4_intercept)
self.uhCurveData.append((tList[6],coeffs))
uhCumulativeArea += segment4_slope * tList[6] ** 2 / 2 + segment4_intercept * tList[6] - (
segment4_slope * tList[5] ** 2 / 2 + segment4_intercept * tList[5])
# ------Compute Segment 5 t6 to t7 (Linear Fit)------
segment5_slope=(qList[7]-qList[6]) / (tList[7]-tList[6])
segment5_intercept=qList[6]-segment5_slope * tList[6]
coeffs = (segment5_slope,segment5_intercept)
self.uhCurveData.append((tList[7],coeffs))
uhCumulativeArea += segment5_slope * tList[7] ** 2 / 2 + segment5_intercept * tList[7] - (
segment5_slope * tList[6] ** 2 / 2 + segment5_intercept * tList[6])
uhCumulativeVolume = uhCumulativeArea * 60.0
uhVolError = (self.uhVolume - uhCumulativeVolume) / self.uhVolume
# ----Compute Unit Hydrograph----
numSteps = int(math.ceil(tList[7] / self.timeStep))
self.unitHydrograph = []
thisCurveData=None
for step in range(1,numSteps+1):
thisTime=step*self.timeStep
if thisCurveData is None or thisCurveData[0] < thisTime: #Test if we need to find new uh coefficients.
lastvalue=0
for i in range(0,len(self.uhCurveData)):
if lastvalue < thisTime <= self.uhCurveData[i][0]:
thisCurveData=self.uhCurveData[i]
lastvalue=self.uhCurveData[i][0]
if thisTime > tList[7] or thisTime <= 0: thisCurveData=[0,[0]] #Flows are 0 beyond the end of the unit hydrograph
thisCoeffs=np.append(np.zeros(4-len(thisCurveData[1])),thisCurveData[1])
thisQ = (thisCoeffs[0]*thisTime ** 3 + thisCoeffs[1]*thisTime ** 2 + thisCoeffs[2]*thisTime + thisCoeffs[3])
self.unitHydrograph.append(thisQ)
def plot(self):
import matplotlib.pyplot as plt
timeData=np.array(range(len(self.unitHydrograph))) * self.timeStep
plt.plot(timeData,self.unitHydrograph)
plt.show()
class Runoff():
# Runoff computes and holds the flow information
# excess precipitation information
# Subcatchment, RainGage, unitHydrograph objects as arguments
def __init__(self,subcatchment,rainGage,timeStep,unitHydrograph = None):
self.subcatchment=subcatchment # Subcatchment object
self.rainGage=rainGage # RainGage object
self.timeStep = timeStep
if unitHydrograph is not None:
self.unitHydrograph=unitHydrograph
if self.unitHydrograph.timeStep != self.timeStep: errorMessages.append("Runoff timeStep != | |
None:
unit = self.na_helper.safe_get(self.parameters, ['san_application_template', 'total_size_unit'])
if unit is None:
unit = self.parameters['size_unit']
self.parameters['san_application_template']['total_size'] *= netapp_utils.POW2_BYTE_MAP[unit]
self.debug = dict()
# self.debug['got'] = 'empty' # uncomment to enable collecting data
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
# REST API for application/applications if needed
self.rest_api, self.rest_app = self.setup_rest_application()
def setup_rest_application(self):
use_application_template = self.na_helper.safe_get(self.parameters, ['san_application_template', 'use_san_application'])
rest_api, rest_app = None, None
if use_application_template:
if self.parameters.get('flexvol_name') is not None:
self.module.fail_json(msg="'flexvol_name' option is not supported when san_application_template is present")
rest_api = netapp_utils.OntapRestAPI(self.module)
use_rest = rest_api.is_rest()
ontap_97_options = ['san_application_template']
if not rest_api.meets_rest_minimum_version(use_rest, 9, 7) and any(x in self.parameters for x in ontap_97_options):
self.module.fail_json(msg='Error: %s' % rest_api.options_require_ontap_version(ontap_97_options, version='9.7'))
name = self.na_helper.safe_get(self.parameters, ['san_application_template', 'name'], allow_sparse_dict=False)
rest_app = RestApplication(rest_api, self.parameters['vserver'], name)
elif self.parameters.get('flexvol_name') is None:
self.module.fail_json(msg="flexvol_name option is required when san_application_template is not present")
return rest_api, rest_app
def get_luns(self, lun_path=None):
"""
Return list of LUNs matching vserver and volume names.
:return: list of LUNs in XML format.
:rtype: list
"""
luns = []
tag = None
if lun_path is None and self.parameters.get('flexvol_name') is None:
return luns
query_details = netapp_utils.zapi.NaElement('lun-info')
query_details.add_new_child('vserver', self.parameters['vserver'])
if lun_path is not None:
query_details.add_new_child('lun_path', lun_path)
else:
query_details.add_new_child('volume', self.parameters['flexvol_name'])
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(query_details)
while True:
lun_info = netapp_utils.zapi.NaElement('lun-get-iter')
lun_info.add_child_elem(query)
if tag:
lun_info.add_new_child('tag', tag, True)
result = self.server.invoke_successfully(lun_info, True)
if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
attr_list = result.get_child_by_name('attributes-list')
luns.extend(attr_list.get_children())
tag = result.get_child_content('next-tag')
if tag is None:
break
return luns
def get_lun_details(self, lun):
"""
Extract LUN details, from XML to python dict
:return: Details about the lun
:rtype: dict
"""
return_value = dict()
return_value['size'] = int(lun.get_child_content('size'))
bool_attr_map = {
'is-space-alloc-enabled': 'space_allocation',
'is-space-reservation-enabled': 'space_reserve'
}
for attr in bool_attr_map:
value = lun.get_child_content(attr)
if value is not None:
return_value[bool_attr_map[attr]] = self.na_helper.get_value_for_bool(True, value)
str_attr_map = {
'comment': 'comment',
'multiprotocol-type': 'os_type',
'name': 'name',
'path': 'path',
'qos-policy-group': 'qos_policy_group',
'qos-adaptive-policy-group': 'qos_adaptive_policy_group',
}
for attr in str_attr_map:
value = lun.get_child_content(attr)
if value is None and attr in ('comment', 'qos-policy-group', 'qos-adaptive-policy-group'):
value = ''
if value is not None:
return_value[str_attr_map[attr]] = value
# Find out if the lun is attached
attached_to = None
lun_id = None
if lun.get_child_content('mapped') == 'true':
lun_map_list = netapp_utils.zapi.NaElement.create_node_with_children(
'lun-map-list-info', **{'path': lun.get_child_content('path')})
result = self.server.invoke_successfully(
lun_map_list, enable_tunneling=True)
igroups = result.get_child_by_name('initiator-groups')
if igroups:
for igroup_info in igroups.get_children():
igroup = igroup_info.get_child_content(
'initiator-group-name')
attached_to = igroup
lun_id = igroup_info.get_child_content('lun-id')
return_value.update({
'attached_to': attached_to,
'lun_id': lun_id
})
return return_value
def find_lun(self, luns, name, lun_path=None):
"""
Return lun record matching name or path
:return: lun record
:rtype: XML or None if not found
"""
for lun in luns:
path = lun.get_child_content('path')
if lun_path is not None:
if lun_path == path:
return lun
else:
if name == path:
return lun
_rest, _splitter, found_name = path.rpartition('/')
if found_name == name:
return lun
return None
def get_lun(self, name, lun_path=None):
"""
Return details about the LUN
:return: Details about the lun
:rtype: dict
"""
luns = self.get_luns(lun_path)
lun = self.find_lun(luns, name, lun_path)
if lun is not None:
return self.get_lun_details(lun)
return None
def get_luns_from_app(self):
app_details, error = self.rest_app.get_application_details()
self.fail_on_error(error)
if app_details is not None:
app_details['paths'] = self.get_lun_paths_from_app()
return app_details
def get_lun_paths_from_app(self):
"""Get luns path for SAN application"""
backing_storage, error = self.rest_app.get_application_component_backing_storage()
self.fail_on_error(error)
# {'luns': [{'path': '/vol/ansibleLUN/ansibleLUN_1', ...
if backing_storage is not None:
return [lun['path'] for lun in backing_storage.get('luns', [])]
return None
def get_lun_path_from_backend(self, name):
"""returns lun path matching name if found in backing_storage
retruns None if not found
"""
lun_paths = self.get_lun_paths_from_app()
match = "/%s" % name
for path in lun_paths:
if path.endswith(match):
return path
return None
def create_san_app_component(self, modify):
'''Create SAN application component'''
if modify:
required_options = ['name']
action = 'modify'
if 'lun_count' in modify:
required_options.append('total_size')
else:
required_options = ('name', 'total_size')
action = 'create'
for option in required_options:
if self.parameters.get(option) is None:
self.module.fail_json(msg="Error: '%s' is required to %s a san application." % (option, action))
application_component = dict(name=self.parameters['name'])
if not modify:
application_component['lun_count'] = 1 # default value for create, may be overriden below
for attr in ('igroup_name', 'lun_count', 'storage_service'):
if not modify or attr in modify:
value = self.na_helper.safe_get(self.parameters, ['san_application_template', attr])
if value is not None:
application_component[attr] = value
for attr in ('os_type', 'qos_policy_group', 'qos_adaptive_policy_group', 'total_size'):
if not self.rest_api.meets_rest_minimum_version(True, 9, 8, 0):
if attr in ('os_type', 'qos_policy_group', 'qos_adaptive_policy_group'):
# os_type and qos are not supported in 9.7 for the SAN application_component
continue
if not modify or attr in modify:
value = self.na_helper.safe_get(self.parameters, [attr])
if value is not None:
# only one of them can be present at most
if attr in ('qos_policy_group', 'qos_adaptive_policy_group'):
attr = 'qos'
value = dict(policy=dict(name=value))
application_component[attr] = value
tiering = self.na_helper.safe_get(self.parameters, ['san_application_template', 'tiering'])
if tiering is not None and not modify:
application_component['tiering'] = dict()
for attr in ('control', 'policy', 'object_stores'):
value = tiering.get(attr)
if attr == 'object_stores' and value is not None:
value = [dict(name=x) for x in value]
if value is not None:
application_component['tiering'][attr] = value
return application_component
def create_san_app_body(self, modify=None):
'''Create body for san template'''
# TODO:
# Should we support new_igroups?
# It may raise idempotency issues if the REST call fails if the igroup already exists.
# And we already have na_ontap_igroups.
san = {
'application_components': [self.create_san_app_component(modify)],
}
for attr in ('protection_type',):
if not modify or attr in modify:
value = self.na_helper.safe_get(self.parameters, ['san_application_template', attr])
if value is not None:
# we expect value to be a dict, but maybe an empty dict
value = self.na_helper.filter_out_none_entries(value)
if value:
san[attr] = value
for attr in ('exclude_aggregates',):
if modify is None: # only used for create
values = self.na_helper.safe_get(self.parameters, ['san_application_template', attr])
if values:
san[attr] = [dict(name=name) for name in values]
for attr in ('os_type',):
if not modify: # not supported for modify operation, but required at application component level for create
value = self.na_helper.safe_get(self.parameters, [attr])
if value is not None:
san[attr] = value
body, error = self.rest_app.create_application_body('san', san)
return body, error
def create_san_application(self):
'''Use REST application/applications san template to create one or more LUNs'''
body, error = self.create_san_app_body()
self.fail_on_error(error)
dummy, error = self.rest_app.create_application(body)
self.fail_on_error(error)
def modify_san_application(self, modify):
'''Use REST application/applications san template to add one or more LUNs'''
body, error = self.create_san_app_body(modify)
self.fail_on_error(error)
# these cannot be present when using PATCH
body.pop('name')
body.pop('svm')
body.pop('smart_container')
dummy, error = self.rest_app.patch_application(body)
self.fail_on_error(error)
def convert_to_san_application(self, scope):
'''First convert volume to smart container using POST
Second modify app to add new luns using PATCH
'''
# dummy modify, so that we don't fill in the body
modify = dict(dummy='dummy')
body, error = self.create_san_app_body(modify)
self.fail_on_error(error)
dummy, error = self.rest_app.create_application(body)
self.fail_on_error(error)
app_current, error = self.rest_app.get_application_uuid()
self.fail_on_error(error)
if app_current is None:
self.module.fail_json(msg='Error: failed to create smart container for %s' % self.parameters['name'])
app_modify, app_modify_warning = self.app_changes(scope)
if app_modify_warning is not None:
self.module.warn(app_modify_warning)
if app_modify:
self.modify_san_application(app_modify)
def delete_san_application(self):
'''Use REST application/applications san template to delete one or more LUNs'''
dummy, error = self.rest_app.delete_application()
self.fail_on_error(error)
def create_lun(self):
"""
Create LUN with requested name and size
"""
path = '/vol/%s/%s' % (self.parameters['flexvol_name'], self.parameters['name'])
options = {'path': path,
'size': str(self.parameters['size']),
'space-reservation-enabled': str(self.parameters['space_reserve']),
'space-allocation-enabled': str(self.parameters['space_allocation']),
'use-exact-size': str(self.parameters['use_exact_size'])}
if self.parameters.get('comment') is not None:
options['comment'] = self.parameters['comment']
if self.parameters.get('os_type') is not None:
options['ostype'] = self.parameters['os_type']
if self.parameters.get('qos_policy_group') is not None:
options['qos-policy-group'] = self.parameters['qos_policy_group']
if self.parameters.get('qos_adaptive_policy_group') is not None:
options['qos-adaptive-policy-group'] = self.parameters['qos_adaptive_policy_group']
lun_create = netapp_utils.zapi.NaElement.create_node_with_children(
'lun-create-by-size', **options)
try:
self.server.invoke_successfully(lun_create, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as exc:
self.module.fail_json(msg="Error provisioning lun %s of size %s: %s"
% (self.parameters['name'], self.parameters['size'], to_native(exc)),
exception=traceback.format_exc())
def delete_lun(self, path):
"""
Delete requested LUN
"""
lun_delete = netapp_utils.zapi.NaElement.create_node_with_children(
'lun-destroy', **{'path': path,
'force': str(self.parameters['force_remove']),
'destroy-fenced-lun':
str(self.parameters['force_remove_fenced'])})
try:
self.server.invoke_successfully(lun_delete, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as exc:
self.module.fail_json(msg="Error deleting lun %s: %s" % (path, to_native(exc)),
exception=traceback.format_exc())
def resize_lun(self, path):
"""
Resize requested LUN.
:return: True if LUN was actually re-sized, false otherwise.
:rtype: bool
"""
lun_resize = netapp_utils.zapi.NaElement.create_node_with_children(
'lun-resize', **{'path': path,
'size': str(self.parameters['size']),
'force': str(self.parameters['force_resize'])})
try:
self.server.invoke_successfully(lun_resize, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as exc:
if to_native(exc.code) == "9042":
# Error 9042 denotes the new LUN size being the same as the
# old LUN size. This happens when there's barely any difference
# in the two sizes. For example, from 8388608 bytes to
# | |
<reponame>jeffg2k/avclass
import logging
import operator
import re
import string
import sys
from avclass import util
from collections import defaultdict, namedtuple
from typing import AnyStr, Callable, Collection, Dict, List, Optional, Set, Tuple, Union
logger = logging.getLogger(__name__)
# Prefix to identify platform tags
platform_prefix = "FILE:os:"
# Default category for tags in taxonomy with no category
uncategorized_cat = "UNC"
SampleInfo = namedtuple("SampleInfo", ["md5", "sha1", "sha256", "labels", "vt_tags"])
# AVs to use in suffix removal
suffix_removal_av_set = {
"Norman",
"Avast",
"Avira",
"Kaspersky",
"ESET-NOD32",
"Fortinet",
"Jiangmin",
"Comodo",
"GData",
"Avast",
"Sophos",
"BitDefenderTheta",
"Alibaba",
"Tencent",
"Cyren",
"Arcabit",
"TrendMicro-HouseCall",
"TrendMicro",
"NANO-Antivirus",
"Microsoft",
}
class Tag:
""" A Tag in the taxonomy """
def __init__(self, s):
word_list = s.strip().split(":")
if len(word_list) > 1:
self._name = word_list[-1].lower()
self._cat = word_list[0].upper()
self._prefix_l = [x.lower() for x in word_list[1:-1]]
path = self._cat
for x in self._prefix_l:
path = path + ":" + x
self._path = path + ":" + self._name
else:
self._name = word_list[0].lower()
self._cat = uncategorized_cat
self._prefix_l = []
self._path = self._name
def __hash__(self):
""" Return hash """
return hash((self._path))
@property
def name(self):
""" Return tag name """
return self._name
@property
def cat(self):
""" Return tag category """
return self._cat
@property
def path(self):
""" Return tag path """
return self._path
@property
def prefix_l(self):
""" Return tag prefix list """
return self._prefix_l
class Taxonomy:
"""
Contains tags and generic tokens read from filesystem
"""
def __init__(self, filepath: Optional[AnyStr]):
"""
Initialize and populate the Tag map from ``filepath``
:param filepath: Path to taxonomy data
"""
self._tags = set()
self._tag_map = {}
if filepath:
self.read_taxonomy(filepath)
def __len__(self) -> int:
"""
The number of tags contained in __tag_map (divided by 2 because we store paths there too)
:return: The length (int) of the Taxonomy
"""
return len(self._tags)
def __iter__(self):
""" Iterator over the alphabetically sorted tags in the taxonomy """
return (t for t in sorted(self._tags))
def is_generic(self, tag: AnyStr) -> bool:
"""
Whether or not the input ``tag`` is generic
:param tag: The tag
:return: Boolean
"""
t = self._tag_map.get(tag, None)
return getattr(t, "cat", None) == "GEN"
def is_tag(self, tag: AnyStr) -> bool:
"""
Whether this Taxonomy is aware of ``tag``
:param tag: The tag
:return: Boolean
"""
return tag in self._tag_map
def add_tag(self, s: AnyStr, override: bool = False):
"""
Add a tag (``s``) to the Taxonomy. Collisions are only replaced if ``override`` is truthy.
:param s: A string to create a Tag from
:param override: Whether or not to replace a duplicate if present
:return: None
"""
tag = Tag(s)
t = self._tag_map.get(tag.name, None)
if t and (t.path != tag.path):
if override:
logger.warning("[Taxonomy] Replacing %s with %s\n" % t.path, tag.path)
del self._tag_map[t.path]
else:
return
logger.debug("[Taxonomy] Adding tag %s" % s)
self._tag_map[tag.name] = tag
self._tag_map[tag.path] = tag
def remove_tag(self, tag: AnyStr) -> bool:
"""
Remove a Tag from the Taxonomy.
:param tag: The tag to remove
:return: Whether or not the tag was present
"""
t = self._tag_map.get(tag, None)
if tag:
logger.debug("[Taxonomy] Removing tag: %s" % t.path)
del self._tag_map[t.name]
del self._tag_map[t.path]
self._tags.remove(tag)
return t is not None
def get_category(self, tag: AnyStr) -> AnyStr:
"""
Return the tag's category or "UNK" if it's not a tag.
:param tag: The tag
:return: The category
"""
t = self._tag_map.get(tag, None)
return getattr(t, "cat", "UNK")
def get_path(self, tag: AnyStr) -> AnyStr:
"""
Get a tag's full path.
:param tag: The tag
:return: The tag's path
"""
t = self._tag_map.get(tag, None)
return getattr(t, "path", f"UNK:{tag}")
def get_prefix_l(self, tag: AnyStr) -> List[AnyStr]:
"""
Get a tag's prefix list.
:param tag: The tag
:return: The tag's prefix list
"""
t = self._tag_map.get(tag, None)
return getattr(t, "prefix_l", [])
def get_prefix(self, tag: AnyStr) -> List[AnyStr]:
"""
Get a tag's prefixes.
:param tag: The tag
:return: String representation of the tag's full prefix
"""
t = self._tag_map.get(tag, None)
tag_pfx = tag.path.split(":")[:-1]
return t.prefix_l if t else tag_pfx
def get_depth(self, tag: AnyStr) -> int:
"""
Determine the "depth" (token count) of the tag
:param tag: The tag
:return: The depth (int) of the tag
"""
t = self._tag_map.get(tag, None)
if t:
return len(tag.prefix_l) + 2
return 0
def get_info(self, tag: AnyStr) -> Tuple[AnyStr, AnyStr]:
"""
Get tag info (path, category) or "UNK:tag"
:param tag: The tag
:return: Tuple containing tag.path and tag.cat
"""
t = self._tag_map.get(tag, None)
if t:
return t.path, t.cat
return f"UNK:{tag}", "UNK"
def expand(self, tag: AnyStr) -> List[AnyStr]:
"""
Return tag prefixes that are leaf-nodes
:param tag: The tag
:return: A list of prefixes
"""
t = self._tag_map.get(tag, None)
if t:
return [x for x in t.prefix_l if x in self._tag_map]
return []
def platform_tags(self) -> Set[AnyStr]:
"""
Returns a set of platform tags in the Taxonomy
:return: Set of platformn tags
"""
return {
tag.name
for _, tag in self._tag_map.items()
if tag.path.startswith(platform_prefix)
}
def overlaps(self, t1: AnyStr, t2: AnyStr) -> bool:
"""
Whether or not the two tags overlap
:param t1: The first Tag
:param t2: The second Tag
:return: Boolean
"""
m1 = self.get_prefix_l(t1)
m2 = self.get_prefix_l(t2)
return t1 in m2 or t2 in m1
def remove_overlaps(
self, l: Collection[AnyStr]
) -> Union[Collection[AnyStr], List[AnyStr]]:
"""
Returns list with overlapping tags removed
:param l: The list
:return: Deduped list
"""
# TODO - code smell
if not l:
return l
pair_l = sorted([(self.get_depth(t), t) for t in l])
out_l = [pair_l.pop()[1]]
while pair_l:
t = pair_l.pop()[1]
if not any(self.overlaps(t, e) for e in out_l):
out_l.append(t)
return out_l
def read_taxonomy(self, filepath: AnyStr):
"""
Create Taxonomy from file (tab-separated lines)
:param filepath: The path of the file to read
:return: None
"""
with open(filepath, "r") as fd:
for line in fd:
line = line.strip()
if not line.startswith("#") and line:
self.add_tag(line)
def to_file(self, filepath: AnyStr):
"""
Write sorted Taxonomy to a file (tab-separated lines)
:param filepath: The path to write
:return: None
"""
with open(filepath, "w") as fd:
tag_l = sorted(self._tag_map.items(), key=lambda item: item[1].path)
idx = 0
for name, tag in tag_l:
if (idx % 2) == 0:
fd.write(tag.path + "\n")
idx += 1
class Rules:
"""
Map a single source with one or more destinations
"""
def __init__(self, filepath: Optional[AnyStr]):
"""
Initialize the rule-map and read rules from ``filepath``
:param filepath: The file to read from
"""
self._src_map = {}
if filepath:
self.read_rules(filepath)
def __len__(self):
"""
The number of rules/src in the rule-map
:return: Number of rules
"""
return len(self._src_map)
def add_rule(
self, src: AnyStr, dst_l: Collection[AnyStr] = None, overwrite: bool = False
):
"""
Add a rule to the map. On duplicate, append destinations. If ``overwrite`` is set, replace rule src/dst.
:param src: The source tag
:param dst_l: The destination list
:param overwrite: Whether or not to overwrite duplicates
:return: None
"""
# Remove src from dst_l if it exists
dst_l = filter(lambda x: x != src, dst_l)
if not dst_l:
return
logger.debug("[Rules] Adding %s -> %s" % (src, dst_l))
src_tag = Tag(src)
if overwrite:
target_l = [Tag(dst).name for dst in dst_l]
self._src_map[src_tag.name] = set(target_l)
else:
curr_dst = self._src_map.get(src_tag.name, set())
for dst in dst_l:
dst_tag = Tag(dst)
curr_dst.add(dst_tag.name)
self._src_map[src_tag.name] = curr_dst
def remove_rule(self, src: AnyStr) -> bool:
dst = self._src_map.get(src, [])
if dst:
logger.debug("[Rules] Removing rule: %s -> %s" % (src, dst))
del self._src_map[src]
return True
return False
def get_dst(self, src: AnyStr) -> List[AnyStr]:
"""
Returns a the dst belonging to src or an empty list.
:param src: The source rule
:return: List of dst
"""
return list(self._src_map.get(src, []))
def read_rules(self, filepath: AnyStr):
"""
Read rules from a file and create the rule-map.
:param filepath: The path of the file to read
:return: None
"""
with open(filepath, "r") as fd:
for line in fd:
line = line.strip()
if not line.startswith("#") and line:
word_list = line.split()
if len(word_list) > 1:
self.add_rule(word_list[0], word_list[1:])
def to_file(self, filepath: AnyStr, taxonomy: Taxonomy = None):
"""
Write current rules to the file at ``filepath``.
:param filepath: The path of the file to write
:param taxonomy: A Taxonomy to optionally resolve full tag paths
:return: None
"""
with open(filepath, "w") as fd:
for src, dst_set in sorted(self._src_map.items()):
| |
"""
Routes and views for the flask application.
"""
from datetime import datetime, timedelta, date
from dateutil import parser
from flask import Flask, render_template, redirect, url_for, flash, request, jsonify
from flask_cors import CORS
import requests
import json
import uuid
import pandas as pd
from timer import Timer
# Random uid for session
app = Flask(__name__)
app.secret_key = str(uuid.uuid1())
CORS(app)
app.debug = True
# Array of header information to be set and kept within the app to
# decrease number of pulls
WELL_NAMES = []
LE_NAMES = []
FORECAST_NAMES = []
# Home page that pulls LE names and passes it to the page
@app.route('/')
@app.route('/home')
def home():
"""Renders the home page."""
bigagg = getlenames(None, False)
return render_template(
'index.html',
title='Home Page',
year=datetime.now().year,
lesel=bigagg
)
# Template route. Inactive
@app.route('/contact')
def contact():
"""Renders the contact page."""
return render_template(
'contact.html',
title='Contact',
year=datetime.now().year,
message='Your contact page.'
)
# Template route. Inactive
@app.route('/about')
def about():
"""Renders the about page."""
return render_template(
'about.html',
title='About',
year=datetime.now().year,
message='Your application description page.'
)
# Pulling all wells using ajax request. Forms don't exist in html. This
# can be set in the DOM or flask.
@app.route('/getallwells', methods=['GET', 'POST'])
def getallwells():
BU = request.form['BusinessUnit']
Area = request.form['Area']
Route = request.form['Route']
data = {
"BusinessUnit": BU,
"Area": Area,
"Route": Route
}
r = makerequest('get', '/GetAllWells', json.dumps(data))
print(r.status_code)
res = r.json()['Package']
Welllist = res['WellName']
return jsonify(Welllist)
# Route that pulls LE names and Forecast names and passes them back to the
# createle page.
@app.route('/createle')
def createle():
"""Renders the about page."""
bigagg = getlenames(None, True)
foreagg = FORECAST_NAMES[0]['Details']
return render_template(
'createle.html',
title='About',
year=datetime.now().year,
lesel=bigagg,
foresel=foreagg
)
@app.route('/createforecast')
def createforecast():
"""Renders the about page."""
ag = getariesnames()
return render_template(
'createforecast.html',
title='About',
year=datetime.now().year,
ariessel=ag
)
# Future route. Inactive.
@app.route('/utilityadjustment')
def utilityadjustment():
"""Renders the home page."""
return render_template(
'utilityadjustment.html',
title='UtilityAdjustment',
year=datetime.now().year,
)
# Pulls and filters LE name from LE_NAMES global variable. If no filter
# exists, return all LE names.
def getlenames(namefilter, refresh):
if(refresh):
data = {
"StartDate": None,
"EndDate": None,
"LastWeek": "False",
"FirstOfMonth": "False",
"WellorAreas": None,
"Wedge": None,
"NameFilter": None
}
r = makerequest('get', '/GetLE', json.dumps(data))
res = r.json()['Package']
bigagg2 = []
for i in res:
val = '' + i + ''
bigagg2.append(res[val])
lepack = bigagg2
else:
if(namefilter):
for i in LE_NAMES:
if namefilter == i['LEName']:
lepack = i
else:
lepack = LE_NAMES
bigagg = lepack
return bigagg
# Gets all forecast names
@app.route('/getforecastnames', methods=['GET', 'POST'])
def getforecastnames():
bigagg = FORECAST_NAMES
return jsonify(bigagg)
@app.route('/getariesnames', methods=['GET', 'POST'])
def getariesnames():
today = date.today()
edate = today.replace(year=today.year + 1).strftime("%m/%d/%Y")
sdate = today.replace(year=today.year - 1).strftime("%m/%d/%Y")
data = {
"StartDate": sdate,
"EndDate": edate,
"CorpIDList": None,
"BusinessUnit": "EAST",
"Area": None,
"Route": None
}
r = makerequest('get', '/AriesScenarios', json.dumps(data))
bigagg = sorted(r.json()['Package']['Scenario'].values())
return bigagg
@app.route('/getareanames', methods=['GET', 'POST'])
def getareanames():
data = json.dumps(
{"AggregateNames": None, "WellNames": None, "CorpIDs": None})
r = makerequest('get', '/GetAreaDetails', data)
res = r.json()['Package']
bigagg = []
aname = []
for i in res:
val = '' + i + ''
bigagg.append(res[val])
for i in bigagg:
if i['AggregateName'].upper() not in aname:
aname.append(i['AggregateName'].upper())
return jsonify(aname)
# Initializes netting adjustment page. It pulls WELL NAMES and passes it
# to the html page.
@app.route('/nettingadjustment')
def nettingadjustment():
bigagg = WELL_NAMES
return render_template(
'nettingadjustment.html',
title='Netting Adjustment',
year=datetime.now().year,
wellsel=bigagg
)
# Doesn't appear to be used. Need to investigate.
def dict_clean(items):
result = {}
for key, value in items:
if value is None:
value = ''
result[key] = value
return result
# Route for accessing productionadjustment. LE Name is passed into the
# route to filter the dropdown options.
@app.route('/setleval/<string:lename>', methods=['POST', 'GET'])
def setleval(lename):
bigagg = getlenames(lename, False)['Details']
return render_template(
'productionadjustment.html',
title='Production Adjustment',
year=datetime.now().year,
wellsel=bigagg
)
# Not sure how active it still is. Need to investigate.
@app.route('/productionadjustment', methods=['POST', 'GET'])
def productionadjustment():
# TODO: get route to display
"""Renders the home page."""
return render_template(
'productionadjustment.html',
title='Production Adjustment',
year=datetime.now().year,
wellsel=bigagg
)
# Initialization route for areaaggregation. Repulls area names because the page changes the list.
# TODO: Explicit EAST declaration of BU.
# Passes routes and areas into html templates.
@app.route('/areaaggregation')
def areaaggregation():
data = json.dumps(
{"AggregateNames": None, "WellNames": None, "CorpIDs": None})
r = makerequest('get', '/GetAreaDetails', data)
res = r.json()['Package']
bigagg = []
for i in res:
val = '' + i + ''
bigagg.append(res[val])
route = json.dumps(
{"BusinessUnit": None})
r = makerequest('get', '/GetAllRoutes', route)
res2 = r.json()['Package']
BU = list(res2['business_unit'].values())
SUB = list(res2['sub_area'].values())
routes = []
for i, j in enumerate(BU):
if j == 'EAST':
val = '' + SUB[i] + ''
val = val.strip()
routes.append(val)
routes.sort(key=str.lower)
return render_template(
'areaaggregation.html',
title='AreaAggregation',
sl1=bigagg,
sl2=bigagg,
selroute=routes)
# Route for creating area from existing area. Takes form inputs of text
# input of new route name and existing route.
@app.route('/createarea', methods=['POST'])
def createarea():
new_rname = request.form['new_area_route']
old_rname = request.form['selroute']
if len(new_rname) > 0:
data = {
"NewRouteName": new_rname,
"DBRouteName": old_rname,
"UserName": "B<NAME>"
}
r = makerequest('POST', '/AreaFromRoute', data)
print('Make New From Route', r.status_code, r.json())
return redirect(url_for('areaaggregation'))
# Route for crud operations for named areas.
# Update: Changes well makeup of existing area.
# Create: Makes new area from list of selected wells.
# Delete: Deletes selected area from dropdown.
# Refreshes page after operation.
@app.route('/editarea', methods=['POST'])
def editareas():
# TODO: Change Update user to logged in user.
test = request.form['new_area']
area = request.form['sl2']
buttonpress = request.form['button_area']
wlist = request.form.getlist('to[]')
wlist = list(dict.fromkeys(wlist))
swlist = ','.join(map(str, wlist))
print(swlist)
if buttonpress == 'Update' and len(wlist) > 0:
data = {
"AreaName": area,
"WellNames": swlist,
"UpdateUser": "<NAME>"
}
r = makerequest('POST', '/UpdateArea', data)
elif buttonpress == 'Create' and len(wlist) > 0:
data = {
"AreaName": test,
"WellList": swlist,
"UpdateUser": "<NAME>"
}
r = makerequest('POST', '/CreateAreaFromWells', data)
elif buttonpress == 'Delete':
payload = {
"AreaName": area,
"WellList": None
}
r = makerequest('delete', '/DeleteArea', payload)
return redirect(url_for('areaaggregation'))
# Route that pulls production for adjustment page. A pull for Adjusted and
# Actual. Feeds these back in form jexcel can take.
@app.route('/getproduction', methods=['GET', 'POST'])
def getproduction():
lename = request.form['leselect']
well = request.form['wells']
phase = request.form['phase']
start_date = request.form['start_date']
end_date = request.form['end_date']
print(well, phase, start_date, end_date)
if len(start_date) > 0 and len(end_date) > 0:
data = {
"WellorArea": well,
"Wedge": None,
"StartDate": start_date,
"EndDate": end_date,
"LEName": lename,
"AdjustedBool": "False",
"Phase": phase
}
adj_data = data
adj_data["AdjustedBool"] = "True"
data = json.dumps(data)
adj_data = json.dumps(adj_data)
r = makerequest('GET', '/ActualProduction', data)
r2 = makerequest('GET', '/ActualProduction', adj_data)
ph = r.json()['Package']['phase']
unit = r.json()['Package']['units']
indicator = ph + ' in ' + unit
dates = r.json()['Package']['dates']
actual = r.json()['Package']['production_values']
adjusted = r2.json()['Package']['production_values']
inputdict = {}
inputdict['dates'] = dates
inputdict['Actual'] = actual
inputdict['Adjusted'] = adjusted
bigagg = getlenames(lename, False)['Details']
return render_template(
'productionadjustment.html',
title='ProductionAdjustment',
tabdef=inputdict,
phtext=indicator,
req=data,
wellsel=bigagg
)
# Route that updates production. Takes fetch request and pushes updates to
# route, then refreshes the page.
@app.route('/updateproduction', methods=['POST'])
def updateproduction():
payload = request.get_json()
r = makerequest('post', '/UpdateProduction', payload)
print(r.status_code)
print(r.json())
return redirect(url_for('nettingadjustment'))
# Route that gets netting values based on form inputs. Returns in form
# jexcel can accept.
@app.route('/getnetting', methods=['GET', 'POST'])
def getnetting():
wells = request.form.getlist('wells[]')
well = request.form.getlist('wells')
if(len(wells) > 0):
well = wells
print(well)
phase = request.form['phase']
data = {
"WellNames": well,
"CorpIDs": None
}
data = json.dumps(data)
if phase == 'Gas':
r = makerequest('GET', '/GetGasNF', data)
else:
r = makerequest('GET', '/GetOilNF', data)
netdata = r.json()['Package']
dates = []
v = []
nwell = []
filltab = {}
for i in netdata:
nwell.append(netdata[i]['WellName'])
dates.append(netdata[i]['NettingDate'])
v.append(netdata[i]['NettingValue'])
filltab['well'] = nwell
filltab['dates'] = dates
filltab['values'] = v
bigagg = WELL_NAMES
if(len(well) > 1):
df = pd.DataFrame(filltab)
df['dates'] = pd.to_datetime(df['dates'], format='%Y-%m-%d %H:%M:%S')
df = df.merge(df.groupby('well').dates.max(), on=['well', 'dates'])
print(df)
filltab2 = {}
filltab2['well'] = df['well'].tolist()
filltab2['dates'] = df['dates'].tolist()
filltab2['values'] = df['values'].tolist()
return jsonify(filltab2)
else:
return render_template(
'nettingadjustment.html',
title='NettingAdjustment',
tabdef=filltab,
netreq=data,
phreq=phase,
wellsel=bigagg)
# Route for updating gas netting. Gets fetch request info and passes it to
# the api.
@app.route('/updategasnetting', methods=['POST'])
def updategasnetting():
payload = request.get_json()
r = makerequest('post', '/UpdateGasNF', payload)
print(r.status_code)
return render_template(
'nettingadjustment.html',
title='NettingAdjustment')
# Route for updating oil netting. Gets fetch request info and passes it to
# the api.
@app.route('/updateoilnetting', methods=['POST'])
def updateoilnetting():
payload = request.get_json()
r = makerequest('post', '/UpdateOilNF', payload)
print(r.status_code)
return render_template(
'nettingadjustment.html',
title='NettingAdjustment')
# Helper function to change no selection to none.
def nchange(x):
if x == 'No Selection':
return None
else:
return x
# Route for getting data for plotting graph. Pulls LE, Forecast and Actual
# production. Pushes response back to AJAX call.
@app.route('/getlevalues/<string:valtype>', methods=['GET', 'POST'])
def getlevalues(valtype):
forecast_name = request.form['forecast_selection']
le_name = request.form['les']
wedge_name = nchange(request.form['wedge_selection'])
well_name = nchange(request.form['s_well_selection'])
n_well_name = well_name
phase_name = nchange(request.form['phase_selection'])
LE = | |
# Import libraries
import pandas as pd
import numpy as np
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from joblib import load
# Import application
from app import app
model = load('assets/final_model.joblib')
@app.callback(
Output('prediction-values', 'children'),
[
Input('room_type', 'value'),
Input('property_type', 'value'),
Input('accomodates', 'value'),
Input('beds', 'value'),
Input('bedrooms', 'value'),
Input('host_response_time', 'value'),
Input('is_superhost', 'value'),
Input('host_id_verify', 'value'),
]
)
# Reference https://dash-bootstrap-components.opensource.faculty.ai/l/components/layout
def predict(
room_type, property_type, accomodates, beds, bedrooms,
host_response_time, host_is_superhost, host_identity_verified
):
df = pd.DataFrame(
columns =
[
'room_type', 'property_type', 'accomodates', 'beds',
'bedrooms', 'host_response_time', 'is_superhost', 'host_id_verify'
],
data =
[
[room_type, property_type, accomodates, beds, bedrooms,
host_response_time, host_is_superhost, host_identity_verified]
]
)
y_pred = model.predict(df)[0]
return f'The Base Estimated price is: ${y_pred:.0f}'
# Layout
room_type_dropdown = html.Div(
[
dbc.Label('Room Type'),
dcc.Dropdown(
id='room_type',
options=[
{"label": "Entire home/apt", "value": "Entire home/apt"},
{"label": "Private room", "value": "Private room"},
{"label": "Shared room","value": "Shared room"}
],
multi=False
)
]
)
property_type_dropdown = html.Div(
[
dbc.Label('Property Type'),
dcc.Dropdown(
id='property_type',
options=
[
{"label": "Entire apartment", "value": "Entire apartment"},
{"label": "Private room in apartment", "value": "Private room in apartment"},
{"label": "Entire condominium","value": "Entire condominium"},
{"label": "Private room in house ","value": "Private room in house "},
{"label": "Entire house","value": "Entire house"},
{"label": "Private room in condominium","value": "Private room in condominium"},
{"label": "Entire guest suite","value": "Entire guest suite"},
{"label": "Room in boutique hotel","value": "Room in boutique hotel"},
{"label": "Entire serviced apartment","value": "Entire serviced apartment"},
{"label": "Entire loft","value": "Entire loft"},
{"label": "Entire townhouse","value": "Entire townhouse"},
{"label": "Private room in townhouse","value": "Private room in townhouse"},
{"label": "Private room in bungalow","value": "Private room in bungalow"},
{"label": "Entire guesthouse","value": "Entire guesthouse"},
{"label": "Shared room in apartment","value": "Shared room in apartment"},
{"label": "Room in hotel","value": "Room in hotel"},
{"label": "Private room in loft","value": "Private room in loft"},
{"label": "Shared room in house","value": "Shared room in house"},
{"label": "Private room in bed and breakfast","value": "Private room in bed and breakfast"},
{"label": "Private room in guest suite","value": "Private room in guest suite"},
{"label": "Entire bungalow","value": "Entire bungalow"},
{"label": "Shared room in condominium","value": "Shared room in condominium"},
{"label": "Room in serviced apartment","value": "Room in serviced apartment"},
{"label": "Room in bed and breakfast","value": "Room in bed and breakfast"},
{"label": "Private room in guesthouse","value": "Private room in guesthouse"},
{"label": "Room in hostel","value": "Room in hostel"},
{"label": "Boat","value": "Boat"},
{"label": "Private room in cottage","value": "Private room in cottage"},
{"label": "Entire cottage","value": "Entire cottage"},
{"label": "Private room in hostel","value": "Private room in hostel"},
{"label": "Private room in tiny house","value": "Private room in tiny house"},
{"label": "Shared room in hostel","value": "Shared room in hostel"},
{"label": "Room in aparthotel","value": "Room in aparthotel"},
{"label": "Tiny house","value": "Tiny house"},
{"label": "Private room","value": "Private room"},
{"label": "Private room in serviced apartment","value": "Private room in serviced apartment"},
{"label": "Shared room in cave","value": "Shared room in cave"},
{"label": "Entire home/apt","value": "Entire home/apt"},
{"label": "Private room in villa","value": "Private room in villa"},
{"label": "Shared room","value": "Shared room"},
{"label": "Entire villa","value": "Entire villa"},
{"label": "Campsite","value": "Campsite"},
{"label": "Shared room in serviced apartment","value": "Shared room in serviced apartment"},
{"label": "Shared room in loft","value": "Shared room in loft"},
{"label": "Private room in cabin","value": "Private room in cabin"},
{"label": "Shared room in bungalow","value": "Shared room in bungalow"},
{"label": "Private room in farm stay","value": "Private room in farm stay"},
{"label": "Entire place ","value": "Entire place "}
],
multi=False,
placeholder="Type",
)
]
)
accomodates_slider = html.Div(
[
dbc.Label('Accomodates'),
dcc.Slider(
id='accomodates',
min=1,
max=16,
step=1,
marks={i: '{}'.format(i) for i in range(1,17,1)},
className='mb-5'
)
]
)
beds_slider = html.Div(
[
dbc.Label('Beds'),
dcc.Slider(
id='beds',
min=1,
max=16,
step=1,
marks={i: '{}'.format(i) for i in range(1,17,1)},
className='mb-5'
)
]
)
bedrooms_slider = html.Div(
[
dbc.Label('Bedrooms'),
dcc.Slider(
id='bedrooms',
min=1,
max=16,
step=1,
marks={i: '{}'.format(i) for i in range(1,17,1)},
className='mb-5'
)
]
)
amenities_dropdown = html.Div(
[
dbc.Label('Amenities'),
dcc.Dropdown(
id='amenities',
options=
[
{'label': 'Air conditioning', 'value': 'air_conditioning'},
{'label': 'Kitchen', 'value': 'kitchen'},
{'label': 'Heating', 'value': 'heating'},
{'label': 'Essentials', 'value': 'essentials'},
{'label': 'Hair dryer', 'value': 'hair_dryer'},
{'label': 'Iron', 'value': 'iron'},
{'label': 'Shampoo', 'value': 'shampoo'},
{'label': 'Hangers', 'value': 'hangers'},
{'label': 'Fire extinguisher', 'value': 'fire_extinguisher'},
{'label': 'First aid kit', 'value': 'first_aid_kit'},
{'label': 'Indoor fireplace', 'value': 'indoor_fireplace'},
{'label': 'TV', 'value': 'tv'},
{'label': 'Cable TV', 'value': 'cable_tv'}
],
multi=True,
)
]
)
row1 = dbc.Col(
children=[
html.Div(id='prediction-values', className='lead')
]
)
row2 = dbc.Container(
[
dbc.Row(
[
dbc.Col(
[
dbc.Label('Room Type'),
dcc.Dropdown(
id='room_type',
options=[
{"label": "Entire home/apt", "value": "Entire home/apt"},
{"label": "Private room", "value": "Private room"},
{"label": "Shared room","value": "Shared room"}
],
multi=False
)
]
),
dbc.Col(
[
dbc.Label('Property Type'),
dcc.Dropdown(
id='property_type',
options=[
{"label": "Entire apartment", "value": "Entire apartment"},
{"label": "Private room in apartment", "value": "Private room in apartment"},
{"label": "Entire condominium","value": "Entire condominium"},
{"label": "Private room in house ","value": "Private room in house "},
{"label": "Entire house","value": "Entire house"},
{"label": "Private room in condominium","value": "Private room in condominium"},
{"label": "Entire guest suite","value": "Entire guest suite"},
{"label": "Room in boutique hotel","value": "Room in boutique hotel"},
{"label": "Entire serviced apartment","value": "Entire serviced apartment"},
{"label": "Entire loft","value": "Entire loft"},
{"label": "Entire townhouse","value": "Entire townhouse"},
{"label": "Private room in townhouse","value": "Private room in townhouse"},
{"label": "Private room in bungalow","value": "Private room in bungalow"},
{"label": "Entire guesthouse","value": "Entire guesthouse"},
{"label": "Shared room in apartment","value": "Shared room in apartment"},
{"label": "Room in hotel","value": "Room in hotel"},
{"label": "Private room in loft","value": "Private room in loft"},
{"label": "Shared room in house","value": "Shared room in house"},
{"label": "Private room in bed and breakfast","value": "Private room in bed and breakfast"},
{"label": "Private room in guest suite","value": "Private room in guest suite"},
{"label": "Entire bungalow","value": "Entire bungalow"},
{"label": "Shared room in condominium","value": "Shared room in condominium"},
{"label": "Room in serviced apartment","value": "Room in serviced apartment"},
{"label": "Room in bed and breakfast","value": "Room in bed and breakfast"},
{"label": "Private room in guesthouse","value": "Private room in guesthouse"},
{"label": "Room in hostel","value": "Room in hostel"},
{"label": "Boat","value": "Boat"},
{"label": "Private room in cottage","value": "Private room in cottage"},
{"label": "Entire cottage","value": "Entire cottage"},
{"label": "Private room in hostel","value": "Private room in hostel"},
{"label": "Private room in tiny house","value": "Private room in tiny house"},
{"label": "Shared room in hostel","value": "Shared room in hostel"},
{"label": "Room in aparthotel","value": "Room in aparthotel"},
{"label": "Tiny house","value": "Tiny house"},
{"label": "Private room","value": "Private room"},
{"label": "Private room in serviced apartment","value": "Private room in serviced apartment"},
{"label": "Shared room in cave","value": "Shared room in cave"},
{"label": "Entire home/apt","value": "Entire home/apt"},
{"label": "Private room in villa","value": "Private room in villa"},
{"label": "Shared room","value": "Shared room"},
{"label": "Entire villa","value": "Entire villa"},
{"label": "Campsite","value": "Campsite"},
{"label": "Shared room in serviced apartment","value": "Shared room in serviced apartment"},
{"label": "Shared room in loft","value": "Shared room in loft"},
{"label": "Private room in cabin","value": "Private room in cabin"},
{"label": "Shared room in bungalow","value": "Shared room in bungalow"},
{"label": "Private room in farm stay","value": "Private room in farm stay"},
{"label": "Entire place ","value": "Entire place "}
],
multi=False,
placeholder="Type",
)
]
),
]
),
],
)
row3 = dbc.Container(
[
dbc.Row(
[
dbc.Col(
[
dbc.Label('Accomodates'),
dcc.Slider(
id='accomodates',
min=1,
max=16,
step=1,
marks={i: '{}'.format(i) for i in range(1,17,1)},
className='mb-5'
)
]
)
]
),
dbc.Row(
[
dbc.Col(
[
dbc.Label('Beds'),
dcc.Slider(
id='beds',
min=1,
max=16,
step=1,
marks={i: '{}'.format(i) for i in range(1,17,1)},
className='mb-5'
)
]
)
]
),
dbc.Row(
[
dbc.Col(
[
dbc.Label('Bedrooms'),
dcc.Slider(
id='bedrooms',
min=1,
max=16,
step=1,
marks={i: '{}'.format(i) for i in range(1,17,1)},
className='mb-5'
)
]
)
]
),
dbc.Row(
[
dbc.Col(
[
dbc.Label('Amenities'),
dcc.Dropdown(
id='amenities',
options=
[
{'label': 'Air conditioning', 'value': 'air_conditioning'},
{'label': 'Kitchen', 'value': 'kitchen'},
{'label': 'Heating', 'value': 'heating'},
{'label': 'Essentials', 'value': 'essentials'},
{'label': 'Hair dryer', 'value': 'hair_dryer'},
{'label': 'Iron', 'value': 'iron'},
{'label': 'Shampoo', 'value': 'shampoo'},
{'label': 'Hangers', 'value': 'hangers'},
{'label': 'Fire extinguisher', 'value': 'fire_extinguisher'},
{'label': 'First aid kit', 'value': 'first_aid_kit'},
{'label': 'Indoor fireplace', 'value': 'indoor_fireplace'},
{'label': 'TV', 'value': 'tv'},
{'label': 'Cable TV', 'value': 'cable_tv'}
],
multi=True,
)
]
)
]
),
]
)
row4 = dbc.Container(
[
dbc.Row(
[
dbc.Col(
[
dbc.Label("Host's Response Time"),
dcc.Dropdown(
id='host_response_time',
options=
[
{'label': 'within an hour', 'value': 'within an hour '},
{'label': 'within a few hours', 'value': 'within a few hours'},
{'label': 'within a day ', 'value': 'within a day'},
| |
import argparse
import numpy as np
import os
import pprint
import yaml
# HACK: Get logger to print to stdout
import sys
sys.ps1 = '>>> ' # Make it "interactive"
import tensorflow as tf
from multiprocessing import Queue
from lib.config import cfg_from_file, cfg_from_list, cfg
from lib.data_process import make_data_processes, kill_processes
from lib.solver import Solver
from lib.solver_encoder import TextEncoderSolver, TextEncoderCosDistSolver, LBASolver
from lib.solver_gan import End2EndGANDebugSolver
from lib.solver_classifier import ClassifierSolver
from lib.cwgan import CWGAN
from lib.lba import LBA
from lib.classifier import Classifier
import lib.utils as utils
import models
del sys.ps1 # HACK: Get logger to print to stdout
def parse_args():
"""Parse the arguments.
"""
parser = argparse.ArgumentParser(
description='Main text2voxel train/test file.')
parser.add_argument('--cfg',
dest='cfg_files',
action='append',
help='optional config file',
default=None,
type=str)
parser.add_argument('--dont_save_voxels', dest='dont_save_voxels', action='store_true')
parser.add_argument('--lba_only', dest='lba_only', action='store_true')
parser.add_argument('--metric_learning_only', dest='metric_learning_only', action='store_true')
parser.add_argument('--non_inverted_loss', dest='non_inverted_loss', action='store_true')
parser.add_argument('--synth_embedding', dest='synth_embedding', action='store_true')
parser.add_argument('--all_tuples', dest='all_tuples', action='store_true')
parser.add_argument('--reed_classifier', dest='reed_classifier', action='store_true')
parser.add_argument('--val_split',
dest='split',
help='data split for validation/testing (train, val, test)',
default=None,
type=str)
parser.add_argument('--queue_capacity',
dest='queue_capacity',
help='size of queue',
default=None,
type=int)
parser.add_argument('--n_minibatch_test',
dest='n_minibatch_test',
help='number of minibatches to use for test phase',
default=None,
type=int)
parser.add_argument('--dataset', dest='dataset',
help='dataset',
default=None,
type=str)
parser.add_argument('--improved_wgan', dest='improved_wgan', action='store_true')
parser.add_argument('--debug', dest='is_debug', action='store_true')
parser.add_argument('--rand', dest='randomize',
help='randomize (do not use a fixed seed)',
action='store_true')
parser.add_argument('--tiny_dataset', dest='tiny_dataset',
help='use a tiny dataset (~5 examples)',
action='store_true')
parser.add_argument('--model',
dest='model',
help='name of the network model',
default=None,
type=str)
parser.add_argument('--text_encoder', dest='text_encoder',
help='train/test on text encoder',
action='store_true')
parser.add_argument('--classifier', dest='classifier',
help='train/test on classifier',
action='store_true')
parser.add_argument('--end2end', dest='end2end',
help='train/test using end2end model such as End2EndLBACWGAN',
action='store_true')
parser.add_argument('--shapenet_ct_classifier', dest='shapenet_ct_classifier',
help='chair/table classifier (sets up for classification)',
action='store_true')
parser.add_argument('--noise_size',
dest='noise_size',
help='dimension of the noise',
default=None,
type=int)
parser.add_argument('--noise_dist', dest='noise_dist',
help='noise distribution (uniform, gaussian)',
default=None,
type=str)
parser.add_argument('--validation', dest='validation',
help='run validation while training',
action='store_true')
parser.add_argument('--test', dest='test',
help='test mode',
action='store_true')
parser.add_argument('--test_npy', dest='test_npy',
help='test mode using npy files',
action='store_true')
parser.add_argument('--save_outputs', dest='save_outputs',
help='save the outputs to a file',
action='store_true')
parser.add_argument('--summary_freq',
dest='summary_freq',
help='summary frequency',
default=None,
type=int)
parser.add_argument('--optimizer',
dest='optimizer',
help='name of the optimizer',
default=None,
type=str)
parser.add_argument('--critic_optimizer',
dest='critic_optimizer',
help='name of the critic optimizer',
default=None,
type=str)
parser.add_argument('--batch_size',
dest='batch_size',
help='batch size',
default=None,
type=int)
parser.add_argument('--lba_mode',
dest='lba_mode',
help='LBA mode type (TST, STS, MM)',
default=None,
type=str)
parser.add_argument('--lba_test_mode',
dest='lba_test_mode',
help='LBA test mode (shape, text) - what to input during forward pass',
default=None,
type=str)
parser.add_argument('--visit_weight',
dest='visit_weight',
help='visit weight for lba models',
default=None,
type=float)
parser.add_argument('--lba_unnormalize', dest='lba_unnormalize', action='store_true')
parser.add_argument('--num_critic_steps',
dest='num_critic_steps',
help='number of critic steps per train step',
default=None,
type=int)
parser.add_argument('--intense_training_freq',
dest='intense_training_freq',
help='frequency of intense critic training',
default=None,
type=int)
parser.add_argument('--uniform_max',
dest='uniform_max',
help='absolute max for uniform distribution',
default=None,
type=float)
parser.add_argument('--match_loss_coeff',
dest='match_loss_coeff',
help='coefficient for real match loss',
default=None,
type=float)
parser.add_argument('--fake_match_loss_coeff',
dest='fake_match_loss_coeff',
help='coefficient for fake match loss',
default=None,
type=float)
parser.add_argument('--fake_mismatch_loss_coeff',
dest='fake_mismatch_loss_coeff',
help='coefficient for fake mismatch loss',
default=None,
type=float)
parser.add_argument('--gp_weight',
dest='gp_weight',
help='coefficient for gradient penalty',
default=None,
type=float)
parser.add_argument('--text2text_weight',
dest='text2text_weight',
help='coefficient for text2text loss',
default=None,
type=float)
parser.add_argument('--shape2shape_weight',
dest='shape2shape_weight',
help='coefficient for shape2shape loss',
default=None,
type=float)
parser.add_argument('--learning_rate',
dest='learning_rate',
help='learning rate',
default=None,
type=float)
parser.add_argument('--critic_lr_multiplier',
dest='critic_lr_multiplier',
help='critic learning rate multiplier',
default=None,
type=float)
parser.add_argument('--decay_steps',
dest='decay_steps',
help='decay steps',
default=None,
type=int)
parser.add_argument('--num_epochs',
dest='num_epochs',
help='number of epochs',
default=None,
type=int)
parser.add_argument('--augment_max',
dest='augment_max',
help='maximum augmentation perturbation out of 255',
default=None,
type=int)
parser.add_argument('--set',
dest='set_cfgs',
help='set config keys',
default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--ckpt_path', dest='ckpt_path',
help='Initialize network from checkpoint',
default=None)
parser.add_argument('--lba_ckpt_path', dest='lba_ckpt_path',
help='Initialize LBA component of end2endlbawgan network from checkpoint',
default=None)
parser.add_argument('--val_ckpt_path', dest='val_ckpt_path',
help='Initialize validation network from checkpoint',
default=None)
parser.add_argument('--log_path', dest='log_path', help='set log path',
default=None)
args = parser.parse_args()
return args
def modify_args(args):
"""Modify the default config based on the command line arguments.
"""
# modify default config if requested
if args.cfg_files is not None:
for cfg_file in args.cfg_files:
cfg_from_file(cfg_file)
randomize = args.randomize
if args.test: # Always randomize in test phase
randomize = True
if not randomize:
np.random.seed(cfg.CONST.RNG_SEED)
# NOTE: Unfortunately order matters here
if args.lba_only is True:
cfg_from_list(['LBA.COSINE_DIST', False])
if args.metric_learning_only is True:
cfg_from_list(['LBA.NO_LBA', True])
if args.non_inverted_loss is True:
cfg_from_list(['LBA.INVERTED_LOSS', False])
if args.dataset is not None:
cfg_from_list(['CONST.DATASET', args.dataset])
if args.lba_mode is not None:
cfg_from_list(['LBA.MODEL_TYPE', args.lba_mode])
if args.lba_test_mode is not None:
cfg_from_list(['LBA.TEST_MODE', args.lba_test_mode])
# cfg_from_list(['LBA.N_CAPTIONS_PER_MODEL', 1]) # NOTE: Important!
if args.shapenet_ct_classifier is True:
cfg_from_list(['CONST.SHAPENET_CT_CLASSIFIER', args.shapenet_ct_classifier])
if args.visit_weight is not None:
cfg_from_list(['LBA.VISIT_WEIGHT', args.visit_weight])
if args.lba_unnormalize is True:
cfg_from_list(['LBA.NORMALIZE', False])
if args.improved_wgan is True:
cfg_from_list(['CONST.IMPROVED_WGAN', args.improved_wgan])
if args.synth_embedding is True:
cfg_from_list(['CONST.SYNTH_EMBEDDING', args.synth_embedding])
if args.all_tuples is True:
cfg_from_list(['CONST.TEST_ALL_TUPLES', args.all_tuples])
if args.reed_classifier is True:
cfg_from_list(['CONST.REED_CLASSIFIER', args.reed_classifier])
if args.noise_dist is not None:
cfg_from_list(['GAN.NOISE_DIST', args.noise_dist])
if args.uniform_max is not None:
cfg_from_list(['GAN.NOISE_UNIF_ABS_MAX', args.uniform_max])
if args.num_critic_steps is not None:
cfg_from_list(['WGAN.NUM_CRITIC_STEPS', args.num_critic_steps])
if args.intense_training_freq is not None:
cfg_from_list(['WGAN.INTENSE_TRAINING_FREQ', args.intense_training_freq])
if args.match_loss_coeff is not None:
cfg_from_list(['WGAN.MATCH_LOSS_COEFF', args.match_loss_coeff])
if args.fake_match_loss_coeff is not None:
cfg_from_list(['WGAN.FAKE_MATCH_LOSS_COEFF', args.fake_match_loss_coeff])
if args.fake_mismatch_loss_coeff is not None:
cfg_from_list(['WGAN.FAKE_MISMATCH_LOSS_COEFF', args.fake_mismatch_loss_coeff])
if args.gp_weight is not None:
cfg_from_list(['WGAN.GP_COEFF', args.gp_weight])
if args.text2text_weight is not None:
cfg_from_list(['WGAN.TEXT2TEXT_WEIGHT', args.text2text_weight])
if args.shape2shape_weight is not None:
cfg_from_list(['WGAN.SHAPE2SHAPE_WEIGHT', args.shape2shape_weight])
if args.learning_rate is not None:
cfg_from_list(['TRAIN.LEARNING_RATE', args.learning_rate])
if args.critic_lr_multiplier is not None:
cfg_from_list(['GAN.D_LEARNING_RATE_MULTIPLIER', args.critic_lr_multiplier])
if args.decay_steps is not None:
cfg_from_list(['TRAIN.DECAY_STEPS', args.decay_steps])
if args.queue_capacity is not None:
cfg_from_list(['CONST.QUEUE_CAPACITY', args.queue_capacity])
if args.n_minibatch_test is not None:
cfg_from_list(['CONST.N_MINIBATCH_TEST', args.n_minibatch_test])
if args.noise_size is not None:
cfg_from_list(['GAN.NOISE_SIZE', args.noise_size])
if args.batch_size is not None:
cfg_from_list(['CONST.BATCH_SIZE', args.batch_size])
if args.summary_freq is not None:
cfg_from_list(['TRAIN.SUMMARY_FREQ', args.summary_freq])
if args.num_epochs is not None:
cfg_from_list(['TRAIN.NUM_EPOCHS', args.num_epochs])
if args.model is not None:
cfg_from_list(['NETWORK', args.model])
if args.optimizer is not None:
cfg_from_list(['TRAIN.OPTIMIZER', args.optimizer])
if args.critic_optimizer is not None:
cfg_from_list(['GAN.D_OPTIMIZER', args.critic_optimizer])
if args.ckpt_path is not None:
cfg_from_list(['DIR.CKPT_PATH', args.ckpt_path])
if args.lba_ckpt_path is not None:
cfg_from_list(['END2END.LBA_CKPT_PATH', args.lba_ckpt_path])
if args.val_ckpt_path is not None:
cfg_from_list(['DIR.VAL_CKPT_PATH', args.val_ckpt_path])
if args.log_path is not None:
cfg_from_list(['DIR.LOG_PATH', args.log_path])
if args.augment_max is not None:
cfg_from_list(['TRAIN.AUGMENT_MAX', args.augment_max])
if args.test:
cfg_from_list(['TRAIN.AUGMENT_MAX', 0])
cfg_from_list(['CONST.BATCH_SIZE', 1])
cfg_from_list(['LBA.N_CAPTIONS_PER_MODEL', 1]) # NOTE: Important!
cfg_from_list(['LBA.N_PRIMITIVE_SHAPES_PER_CATEGORY', 1]) # NOTE: Important!
if args.test_npy:
cfg_from_list(['CONST.BATCH_SIZE', 1])
# To overwrite default variables, put the set_cfgs after all argument initializations
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
def get_inputs_dict(args):
"""Gets the input dict for the current model and dataset.
"""
if cfg.CONST.DATASET == 'shapenet':
if (args.text_encoder is True) or (args.end2end is True) or (args.classifier is True):
inputs_dict = utils.open_pickle(cfg.DIR.TRAIN_DATA_PATH)
val_inputs_dict = utils.open_pickle(cfg.DIR.VAL_DATA_PATH)
test_inputs_dict = utils.open_pickle(cfg.DIR.TEST_DATA_PATH)
else: # Learned embeddings
inputs_dict = utils.open_pickle(cfg.DIR.SHAPENET_METRIC_EMBEDDINGS_TRAIN)
val_inputs_dict = utils.open_pickle(cfg.DIR.SHAPENET_METRIC_EMBEDDINGS_VAL)
test_inputs_dict = utils.open_pickle(cfg.DIR.SHAPENET_METRIC_EMBEDDINGS_TEST)
elif cfg.CONST.DATASET == 'primitives':
if ((cfg.CONST.SYNTH_EMBEDDING is True) or (args.text_encoder is True) or
(args.classifier is True)):
if args.classifier and not cfg.CONST.REED_CLASSIFIER: # Train on all splits for classifier
tf.logging.info('Using all (train/val/test) splits for training')
inputs_dict = utils.open_pickle(cfg.DIR.PRIMITIVES_ALL_SPLITS_DATA_PATH)
else:
tf.logging.info('Using train split only for training')
inputs_dict = utils.open_pickle(cfg.DIR.PRIMITIVES_TRAIN_DATA_PATH)
val_inputs_dict = utils.open_pickle(cfg.DIR.PRIMITIVES_VAL_DATA_PATH)
test_inputs_dict = utils.open_pickle(cfg.DIR.PRIMITIVES_TEST_DATA_PATH)
else: # Learned embeddings
inputs_dict = utils.open_pickle(cfg.DIR.PRIMITIVES_METRIC_EMBEDDINGS_TRAIN)
val_inputs_dict = utils.open_pickle(cfg.DIR.PRIMITIVES_METRIC_EMBEDDINGS_VAL)
test_inputs_dict = utils.open_pickle(cfg.DIR.PRIMITIVES_METRIC_EMBEDDINGS_TEST)
else:
raise ValueError('Please use a valid dataset (shapenet, primitives).')
if args.tiny_dataset is True:
if ((cfg.CONST.DATASET == 'primitives' and cfg.CONST.SYNTH_EMBEDDING is True)
or (args.text_encoder is True)):
raise NotImplementedError('Tiny dataset not supported for synthetic embeddings.')
ds = 5 # New dataset size
if cfg.CONST.BATCH_SIZE > ds:
raise ValueError('Please use a smaller batch size than {}.'.format(ds))
inputs_dict = utils.change_dataset_size(inputs_dict, new_dataset_size=ds)
val_inputs_dict = utils.change_dataset_size(val_inputs_dict, new_dataset_size=ds)
test_inputs_dict = utils.change_dataset_size(test_inputs_dict, new_dataset_size=ds)
# Select the validation/test split
if args.split == 'train':
split_str = 'train'
val_inputs_dict = inputs_dict
elif (args.split == 'val') or (args.split is None):
split_str = 'val'
val_inputs_dict = val_inputs_dict
elif args.split == 'test':
split_str = 'test'
val_inputs_dict = test_inputs_dict
else:
raise ValueError('Please select a valid split (train, val, test).')
print('Validation/testing on {} split.'.format(split_str))
if (cfg.CONST.DATASET == 'shapenet') and (cfg.CONST.SHAPENET_CT_CLASSIFIER is True):
category_model_list, class_labels = Classifier.set_up_classification(inputs_dict)
val_category_model_list, val_class_labels = Classifier.set_up_classification(val_inputs_dict)
assert class_labels == val_class_labels
# Update inputs dicts
inputs_dict['category_model_list'] = category_model_list
inputs_dict['class_labels'] = class_labels
val_inputs_dict['category_model_list'] = val_category_model_list
val_inputs_dict['class_labels'] = val_class_labels
return inputs_dict, val_inputs_dict
def get_solver(g, net, args, is_training):
if isinstance(net, LBA):
solver = LBASolver(net, g, is_training)
elif args.text_encoder:
solver = TextEncoderSolver(net, g, is_training)
elif isinstance(net, Classifier):
solver = ClassifierSolver(net, g, is_training)
elif isinstance(net, CWGAN):
solver = End2EndGANDebugSolver(net, g, is_training)
else:
raise ValueError('Invalid network.')
return solver
def main():
"""Main text2voxel function.
"""
args = parse_args()
print('Called with args:')
print(args)
if args.save_outputs is True and args.test is False:
raise ValueError('Can only save outputs when testing, not training.')
if args.validation:
assert not args.test
if args.test:
assert args.ckpt_path is not None
modify_args(args)
print('----------------- CONFIG -------------------')
pprint.pprint(cfg)
# Save yaml
os.makedirs(cfg.DIR.LOG_PATH, exist_ok=True)
with open(os.path.join(cfg.DIR.LOG_PATH, 'run_cfg.yaml'), 'w') as out_yaml:
yaml.dump(cfg, out_yaml, default_flow_style=False)
# set up logger
tf.logging.set_verbosity(tf.logging.INFO)
try:
with tf.Graph().as_default() as g: # create graph
# Load data
inputs_dict, val_inputs_dict = get_inputs_dict(args)
# Build network
is_training = not args.test
print('------------ BUILDING NETWORK -------------')
network_class = models.load_model(cfg.NETWORK)
net = network_class(inputs_dict, is_training)
# Prefetching data processes
#
# Create worker and data queue for | |
represented as a class variable - with a specific
YANG type.
YANG Description: List of Rendered Service Paths (RSP).
"""
__slots__ = ('_path_helper', '_extmethods', '__id','__name','__nsd_connection_point_ref',)
_yang_name = 'rsp'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__nsd_connection_point_ref = YANGDynClass(base=YANGListType("nsd_ref",yc_nsd_connection_point_ref_nst__nst_netslicefgd_rsp_nsd_connection_point_ref, yang_name="nsd-connection-point-ref", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='nsd-ref', extensions=None), is_container='list', yang_name="nsd-connection-point-ref", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='list', is_config=True)
self.__id = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='string', is_config=True)
self.__name = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='string', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'nst', u'netslicefgd', u'rsp']
def _get_id(self):
"""
Getter method for id, mapped from YANG variable /nst/netslicefgd/rsp/id (string)
YANG Description: Identifier for the RSP.
"""
return self.__id
def _set_id(self, v, load=False):
"""
Setter method for id, mapped from YANG variable /nst/netslicefgd/rsp/id (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_id() directly.
YANG Description: Identifier for the RSP.
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """id must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='string', is_config=True)""",
})
self.__id = t
if hasattr(self, '_set'):
self._set()
def _unset_id(self):
self.__id = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='string', is_config=True)
def _get_name(self):
"""
Getter method for name, mapped from YANG variable /nst/netslicefgd/rsp/name (string)
YANG Description: RSP name.
"""
return self.__name
def _set_name(self, v, load=False):
"""
Setter method for name, mapped from YANG variable /nst/netslicefgd/rsp/name (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_name() directly.
YANG Description: RSP name.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """name must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='string', is_config=True)""",
})
self.__name = t
if hasattr(self, '_set'):
self._set()
def _unset_name(self):
self.__name = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='string', is_config=True)
def _get_nsd_connection_point_ref(self):
"""
Getter method for nsd_connection_point_ref, mapped from YANG variable /nst/netslicefgd/rsp/nsd_connection_point_ref (list)
YANG Description: A list of references to connection points.
"""
return self.__nsd_connection_point_ref
def _set_nsd_connection_point_ref(self, v, load=False):
"""
Setter method for nsd_connection_point_ref, mapped from YANG variable /nst/netslicefgd/rsp/nsd_connection_point_ref (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_nsd_connection_point_ref is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_nsd_connection_point_ref() directly.
YANG Description: A list of references to connection points.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("nsd_ref",yc_nsd_connection_point_ref_nst__nst_netslicefgd_rsp_nsd_connection_point_ref, yang_name="nsd-connection-point-ref", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='nsd-ref', extensions=None), is_container='list', yang_name="nsd-connection-point-ref", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """nsd_connection_point_ref must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("nsd_ref",yc_nsd_connection_point_ref_nst__nst_netslicefgd_rsp_nsd_connection_point_ref, yang_name="nsd-connection-point-ref", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='nsd-ref', extensions=None), is_container='list', yang_name="nsd-connection-point-ref", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='list', is_config=True)""",
})
self.__nsd_connection_point_ref = t
if hasattr(self, '_set'):
self._set()
def _unset_nsd_connection_point_ref(self):
self.__nsd_connection_point_ref = YANGDynClass(base=YANGListType("nsd_ref",yc_nsd_connection_point_ref_nst__nst_netslicefgd_rsp_nsd_connection_point_ref, yang_name="nsd-connection-point-ref", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='nsd-ref', extensions=None), is_container='list', yang_name="nsd-connection-point-ref", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='list', is_config=True)
id = __builtin__.property(_get_id, _set_id)
name = __builtin__.property(_get_name, _set_name)
nsd_connection_point_ref = __builtin__.property(_get_nsd_connection_point_ref, _set_nsd_connection_point_ref)
_pyangbind_elements = OrderedDict([('id', id), ('name', name), ('nsd_connection_point_ref', nsd_connection_point_ref), ])
class yc_match_attributes_nst__nst_netslicefgd_classifier_match_attributes(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module nst - based on the path /nst/netslicefgd/classifier/match-attributes. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: List of match attributes.
"""
__slots__ = ('_path_helper', '_extmethods', '__id','__ip_proto','__source_ip_address','__destination_ip_address','__source_port','__destination_port',)
_yang_name = 'match-attributes'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__source_ip_address = YANGDynClass(base=[RestrictedClassType(base_type=six.text_type, restriction_dict={u'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}),RestrictedClassType(base_type=six.text_type, restriction_dict={u'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}),], is_leaf=True, yang_name="source-ip-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='inet:ip-address', is_config=True)
self.__destination_ip_address = YANGDynClass(base=[RestrictedClassType(base_type=six.text_type, restriction_dict={u'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}),RestrictedClassType(base_type=six.text_type, restriction_dict={u'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}),], is_leaf=True, yang_name="destination-ip-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='inet:ip-address', is_config=True)
self.__source_port = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={u'range': [u'0..65535']}), is_leaf=True, yang_name="source-port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='inet:port-number', is_config=True)
self.__ip_proto = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="ip-proto", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='uint8', is_config=True)
self.__destination_port = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={u'range': [u'0..65535']}), is_leaf=True, yang_name="destination-port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='inet:port-number', is_config=True)
self.__id = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='string', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'nst', u'netslicefgd', u'classifier', u'match-attributes']
def _get_id(self):
"""
Getter method for id, mapped from YANG variable /nst/netslicefgd/classifier/match_attributes/id (string)
YANG Description: Identifier for the classifier match attribute rule.
"""
return self.__id
def _set_id(self, v, load=False):
"""
Setter method for id, mapped from YANG variable /nst/netslicefgd/classifier/match_attributes/id (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_id() directly.
YANG Description: Identifier for the classifier match attribute rule.
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """id must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='string', is_config=True)""",
})
self.__id = t
if hasattr(self, '_set'):
self._set()
def _unset_id(self):
self.__id = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='string', is_config=True)
def _get_ip_proto(self):
"""
Getter method for ip_proto, mapped from YANG variable /nst/netslicefgd/classifier/match_attributes/ip_proto (uint8)
YANG Description: IP Protocol.
"""
return self.__ip_proto
def _set_ip_proto(self, v, load=False):
"""
Setter method for ip_proto, mapped from YANG variable /nst/netslicefgd/classifier/match_attributes/ip_proto (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_ip_proto is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ip_proto() directly.
YANG Description: IP Protocol.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="ip-proto", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='uint8', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ip_proto must be of a type compatible with uint8""",
'defined-type': "uint8",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="ip-proto", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='uint8', is_config=True)""",
})
self.__ip_proto = t
if hasattr(self, '_set'):
self._set()
def _unset_ip_proto(self):
| |
import argparse
import sys
import os
import ggutils.s3_access as s3_access
import smalltrain as st
try:
# For the case smalltrain is installed as Python library
print('try to load smalltrain modules from Python library')
from smalltrain.model.nn_model import NNModel
print('smalltrain modules are ready to be loaded from Python library')
except ModuleNotFoundError:
if os.environ.get('SMALLTRAIN_HOME'):
# For the case the environmental value SMALLTRAIN_HOME is exported
_smalltrain_home_path = os.environ.get('SMALLTRAIN_HOME')
_smalltrain_home_path = os.path.join(_smalltrain_home_path, 'src')
else:
# Try to load smalltrain modules from current directory
_smalltrain_home_path = './'
print('try to load smalltrain modules from the path: {}'.format(_smalltrain_home_path))
sys.path.append(_smalltrain_home_path)
from smalltrain.model.nn_model import NNModel
print('smalltrain modules are ready to be loaded from the path: {}'.format(_smalltrain_home_path))
def get_model_list():
from smalltrain.model.one_dim_cnn_model import OneDimCNNModel
from smalltrain.model.two_dim_cnn_model import TwoDimCNNModel
from smalltrain.model.two_dim_cnn_model_v2 import TwoDimCNNModelV2
model_list = [OneDimCNNModel(), TwoDimCNNModel(), TwoDimCNNModelV2()]
model_id_list = [model.MODEL_ID for model in model_list]
return model_list, model_id_list
MODEL_LIST, MODEL_ID_LIST = get_model_list()
def construct_model(log_dir_path, model_id, hparams, train_data=None, debug_mode=True):
if model_id in MODEL_ID_LIST:
for _m in MODEL_LIST:
if _m.MODEL_ID == model_id:
model = _m.construct_and_prepare_model(log_dir_path=log_dir_path, model_id=model_id, hparams=hparams, train_data=train_data, debug_mode=debug_mode)
return model
raise TypeError('Invalid model_id:{}'.format(model_id))
# MODEL_ID_4NN = '4NN_20180808' # 4 nn model 2019/09/10
# MODEL_ID_DNN = 'DNN' # 4 nn model 2019/09/10
# MODEL_ID_1D_CNN = '1D_CNN'
# MODEL_ID_CC = 'CC' # Carbon Copy
# MODEL_ID = MODEL_ID_4NN
class Operation:
"""Operation class as hyper parameter of train or prediction operation
Arguments:
params: A dictionary that maps hyper parameter keys and values
debug_mode: Boolean, if `True` then running with debug mode.
"""
def __init__(self, hparams=None, setting_file_path=None):
self._hparam_ins = st.Hyperparameters(hparams, setting_file_path)
self.hparams_dict = self._hparam_ins.__dict__
print('init hparams_dict: {}'.format(self.hparams_dict))
def get_hparams_ins(self):
return self._hparam_ins
def update_params_from_file(self, setting_file_path):
self._hparam_ins.update_hyper_param_from_file(setting_file_path)
self.hparams_dict = self._hparam_ins.__dict__
def update_hyper_param_from_json(self, json_obj):
self._hparam_ins.update_hyper_param_from_json(json_obj)
self.hparams_dict = self._hparam_ins.__dict__
def read_hyper_param_from_file(self, setting_file_path):
'''
This method is for the compatibility for the codes:
:param setting_file_path:
:return:
'''
self.update_params_from_file(setting_file_path=setting_file_path)
self.hparams_dict = self._hparam_ins.__dict__
return self.hparams_dict
def prepare_dirs(self):
'''
Prepare directories used in operation
:return:
'''
log_dir_path = self.hparams_dict['save_root_dir'] + '/logs/' + self.hparams_dict['train_id']
log_dir_path = log_dir_path.replace('//', '/')
os.makedirs(log_dir_path, exist_ok=True)
self.log_dir_path = log_dir_path
# Set value to hyperparameter
self._hparam_ins.set('log_dir_path', log_dir_path)
save_dir_path = self.hparams_dict['save_root_dir'] + '/model/' + self.hparams_dict['train_id'] + '/'
save_dir_path = save_dir_path.replace('//', '/')
os.makedirs(save_dir_path, exist_ok=True)
self.save_dir_path = save_dir_path
self._hparam_ins.set('save_dir_path', save_dir_path)
save_file_name = 'model-{}_lr-{}_bs-{}.ckpt'.format(self.hparams_dict['model_prefix'], self.hparams_dict['learning_rate'],
self.hparams_dict['batch_size'])
save_file_path = save_dir_path + '/' + save_file_name
save_file_path = save_file_path.replace('//', '/')
self.save_file_path = save_file_path
self._hparam_ins.set('save_file_path', save_file_path)
report_dir_path = self.hparams_dict['save_root_dir'] + '/report/' + self.hparams_dict['train_id'] + '/'
report_dir_path = report_dir_path.replace('//', '/')
os.makedirs(report_dir_path, exist_ok=True)
self.report_dir_path = report_dir_path
self._hparam_ins.set('report_dir_path', report_dir_path)
operation_dir_path = os.path.join(self.hparams_dict['save_root_dir'], 'operation')
operation_dir_path = os.path.join(operation_dir_path, self.hparams_dict['train_id'])
operation_file_path = os.path.join(operation_dir_path, self.hparams_dict['train_id'] + '.json')
os.makedirs(operation_dir_path, exist_ok=True)
# self.operation_dir_path = operation_dir_path
# self.operation_file_path = operation_file_path
if self.hparams_dict['cloud_root'] is not None:
print('Upload the hparams to cloud: {}'.format(self.hparams_dict['cloud_root']))
upload_to_cloud(operation_file_path, self.hparams_dict['cloud_root'], self.hparams_dict['save_root_dir'])
print('[Operation]DONE prepare_dirs')
def construct_and_prepare_model(self, hparams=None, train_data=None):
hparams = hparams or self.hparams_dict
model_id = hparams['model_id']
print('construct_and_prepare_model with model_id: {}'.format(model_id))
if model_id in MODEL_ID_LIST:
for _m in MODEL_LIST:
if _m.MODEL_ID == model_id:
model = _m.construct_and_prepare_model(log_dir_path=hparams['log_dir_path'], model_id=model_id,
hparams=hparams, train_data=train_data,
debug_mode=hparams['debug_mode'])
self.model = model
return model
raise TypeError('Invalid model_id:{}'.format(model_id))
def train(self, hparams=None):
hparams = hparams or self.hparams_dict
if self.model is None:
self.construct_and_prepare_model(hparams=hparams)
self.model.train(iter_to=hparams['iter_to'], learning_rate=hparams['learning_rate'],
batch_size=hparams['batch_size'], dropout_ratio=hparams['dropout_ratio'],
l1_norm_reg_ratio=hparams['l1_norm_reg_ratio'], save_file_path=hparams['save_file_path'],
report_dir_path=hparams['report_dir_path'])
print('DONE train data ')
print('====================')
def auto(self, hparams=None, setting_file_path=None):
print('====================')
print('TODO auto operation with hyper parameter: ')
print(self.hparams_dict)
print('====================')
self.prepare_dirs()
print('DONE prepare_dirs')
print('====================')
print('TODO construct_and_prepare_model')
self.construct_and_prepare_model()
print('DONE construct_and_prepare_model')
print('====================')
if (not self.hparams_dict.get('prediction_mode')):
print('TODO train( or test only)')
self.train()
print('DONE train( or test only)')
print('====================')
print('DONE auto operation')
print('====================')
def main(exec_param):
print(exec_param)
operation = Operation(setting_file_path=exec_param['setting_file_path'])
operation.auto()
def _main(exec_param):
print(exec_param)
operation = Operation()
if 'setting_file_path' in exec_param.keys() and exec_param['setting_file_path'] is not None:
operation.update_params_from_file(exec_param['setting_file_path'])
elif 'json_param' in exec_param.keys() and exec_param['json_param'] is not None:
operation.update_hyper_param_from_json(exec_param['json_param'])
exec_param = operation.hparams_dict
print('updated exec_param:{}'.format(exec_param))
# prepare directories
operation.prepare_dirs()
if 'scrpit_test' in exec_param.keys() and exec_param['scrpit_test'] == True:
test_static_methods()
model = operation.construct_and_prepare_model()
model.train(iter_to=1000, learning_rate=exec_param['learning_rate'], batch_size=exec_param['batch_size'], dropout_ratio=exec_param['dropout_ratio'], save_file_path=exec_param['save_file_path'])
exit()
model = None
print('====================')
print('TODO train data ')
if model is None:
model = operation.construct_and_prepare_model()
operation.train()
print('DONE train data ')
print('====================')
from pathlib import Path
def download_to_local(path, work_dir_path='/var/tmp/tsp/'):
ret_path = None
# check path is local
if os.path.exists(path): return path
os.makedirs(work_dir_path, exist_ok=True)
# check if s3 path
s3_bucket_name, s3_key = get_bucket_name(path)
if s3_bucket_name is not None:
ret_path = os.path.join(work_dir_path, s3_key)
os.makedirs(Path(ret_path).parent, exist_ok=True)
s3_access.download(s3_bucket_name=s3_bucket_name, s3_key=s3_key, local_dir=work_dir_path, file_path=s3_key)
return ret_path
import multiprocessing
def upload_to_cloud(local_path, cloud_root, local_root, with_multiprocessing=True):
if local_path is None:
print('No file to upload_to_cloud:local_path:{}'.format(local_path))
return
s3_bucket_name, s3_root_key = get_bucket_name(cloud_root)
if s3_bucket_name is None:
raise ValueError('Invalid cloud_root:{}'.format(cloud_root))
if len(local_path.split(local_root)[0]) > 0:
raise ValueError('Invalid local_path:{} or local_root:{}'.format(local_path, local_root))
local_path_from_local_root = local_path.split(local_root)[1]
# print('local_path_from_local_root:{}'.format(local_path_from_local_root))
s3_key = os.path.join(s3_root_key, local_path_from_local_root)
local_dir = Path(local_path).parent
file_path = Path(local_path).name
if with_multiprocessing:
# p = multiprocessing.Process(target=s3_access.upload, args=(s3_bucket_name, s3_key, local_dir, file_path,))
# p.start()
send_to_s3_uploader(s3_bucket_name=s3_bucket_name, s3_key=s3_key, local_dir=local_dir, file_path=file_path)
else:
s3_access.upload(s3_bucket_name=s3_bucket_name, s3_key=s3_key, local_dir=local_dir, file_path=file_path)
def send_to_s3_uploader(s3_bucket_name, s3_key, local_dir, file_path, queue_file_path='/var/tmp/tsp/queue.txt'):
mode = 'a' if os.path.isfile(queue_file_path) else 'w'
f = open(queue_file_path, mode)
f.write('{}, {}, {}, {}\n'.format(s3_bucket_name, s3_key, local_dir, file_path))
f.close()
def is_s3_path(s3_path):
s3_bucket_name, s3_key = get_bucket_name(s3_path)
return (s3_bucket_name is not None)
def get_bucket_name(s3_path):
if s3_path is None: return None, None
try:
_split = s3_path.split('s3://')
if len(_split[0]) > 0: return None, None
s3_bucket_name = _split[1].split('/')[0]
s3_key = _split[1][1 + len(s3_bucket_name):]
return s3_bucket_name, s3_key
except IndexError as e:
print('Can not read s3_bucket_name or s3_key from s3_path:{}'.format(s3_path))
return None, None
def test_download_to_local():
path = 's3://your-bucket/tsp/sample/sample.json'
download_path = download_to_local(path)
has_downloaded = os.path.isfile(download_path)
print('[test_download_to_local]from:{}, to:{} has_downloaded:{}'.format(path, download_path, has_downloaded))
assert has_downloaded
def test_upload_to_cloud():
# case 1
local_path = '/var/tsp/sample/test/sample_upload.txt'
cloud_root = 's3://your-bucket/tsp/sample/test/'
local_root = '/var/tsp/sample/test/'
upload_to_cloud(local_path, cloud_root, local_root)
def test_static_methods():
test_upload_to_cloud()
exit()
test_download_to_local()
print('Done test_static_methods')
def main_with_train_id(train_id):
print('TODO')
import json
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='tsp')
parser.add_argument('--model_prefix', '-mp', type=str, default='nn',
help='The prefix string representing the model')
parser.add_argument('--save_root_dir', '-rd', type=str, default='/var/tensorflow/tsp/',
help='Root dir for Tensorflow FileWriter')
parser.add_argument('--init_model_path', '-imp', type=str, default=None,
help='Model path to restore Tensorflow session')
parser.add_argument('--restore_var_name_list', '-rvnl', type=list, default=None,
help='restore_var_name_list')
parser.add_argument('--untrainable_var_name_list', '-utvnl', type=list, default=None,
help='untrainable_var_name_list')
parser.add_argument('--learning_rate', '-ll', type=float, default=1e-4,
help='learning_rate of optsimizer')
# About batch size
parser.add_argument('--batch_size', '-bs', type=int, default=128,
help='batch_size')
# About minibatch operation
parser.add_argument('--evaluate_in_minibatch', '-enmb', type=bool, default=False,
help = 'Bool, Whether to evaluate in minibatch or not (Default: False)')
parser.add_argument('--iter_to', '-itr', type=int, default=10000,
help='iter_to')
parser.add_argument('--dropout_ratio', '-dr', type=float, default=0.5,
help='Dropout ratio')
parser.add_argument('--train_id', '-tid', type=str, default='TEST_YYYYMMDD-HHmmSS',
help='id attached to model and log dir to identify train operation ')
parser.add_argument('--model_id', '-mid', type=str, default=st.Hyperparameters.DEFAULT_DICT['model_id'],
help='id attached to model to identify model constructure ')
parser.add_argument('--model_type', '-mty', type=str, default='REGRESSION',
help='model_type ')
parser.add_argument('--prediction_mode', '-pmd', type=bool, default=None,
help='Whether prediction mode or not')
parser.add_argument('--debug_mode', '-dmd', type=bool, default=None,
help='Whether debug mode or not')
parser.add_argument('--monochrome_mode', '-mmd', type=bool, default=False,
help='Whether monochrome mode or not')
parser.add_argument('--optimizer', '-otm', type=str, default=None,
help='String, optimizer')
parser.add_argument('--input_ts_size', '-its', type=int, default=12,
help='input_ts_size')
parser.add_argument('--input_ts_width', '-itw', type=int, default=None,
help='input_img_width')
parser.add_argument('--input_img_width', '-iiw', type=int, default=32,
help='input_img_width')
parser.add_argument('--input_output_ts_offset', '-iotso', type=int, default=1,
help='input_output_ts_offset')
parser.add_argument('--input_output_ts_offset_range', '-iotsor', type=list, default=None,
help='input_output_ts_offset_range')
parser.add_argument('--input_output_ts_offset_list', '-iotsol', type=list, default=None,
help='input_output_ts_offset_list')
parser.add_argument('--has_to_complement_before', '-htcb', type=bool, default=True,
help='Whether complement the value before ts starts or not(Default:True)')
parser.add_argument('--complement_ts', '-cpts', type=str, default=None,
help='String, Values to complement the missing time series data (Default:None)')
parser.add_argument('--n_layer', '-nly', type=int, default=5,
help='n_layer')
parser.add_argument('--num_add_fc_layers', '-nafl', type=int, default=0,
help='num_add_fc_layers')
parser.add_argument('--fc_node_size_list', '-fnsl', type=list, default=None,
help='fc_node_size_list')
parser.add_argument('--fc_weight_stddev_list', '-fwsl', type=list, default=None,
help='List of integer, the list of stddevs of weight variables in each fc layers. Default: all 0.1.')
parser.add_argument('--fc_bias_value_list', '-fbvl', type=list, default=None,
help='List of integer, the list of initial values of bias variables in each fc layers. Default: all 0.1')
# about sub model
parser.add_argument('--sub_model_url', '-smu', type=str, default=None,
help='String, The sub model\'s URL (Default: None, Do not use sub model)')
parser.add_argument('--sub_model_allocation', '-sma', type=float, default=0.0,
help='Float, the allocation of value which flows into the sub model (Default: 0.0, no allocation into the sub model)')
parser.add_argument('--sub_model_input_point', '-smip', type=str, default=None,
help='String, The sub model input point (Default: None, Do not use sub model)')
parser.add_argument('--sub_model_output_point', '-smop', type=str, default=None,
help='String, The sub model output point (Default: None, Do not use sub model)')
# about ResNet
parser.add_argument('--has_res_net', '-hrs', type=bool, default=False,
help='Whether the model has ResNet (the layers in the model has short cut) or not.')
parser.add_argument('--num_cnn_layers_in_res_block', '-nclrb', type=int, default=2,
help='Integer, the number of CNN layers in one Residual Block (Default: 2)')
parser.add_argument('--ts_start', '-tss', type=int, default=None,
help='ts_start')
parser.add_argument('--ts_end', '-tse', type=int, default=None,
help='ts_end')
parser.add_argument('--test_ts_index_from', '-tetsif', type=int, default=None,
help='test_ts_index_from')
parser.add_argument('--test_ts_index_to', '-tetsit', type=int, default=None,
help='test_ts_index_to')
parser.add_argument('--max_data_per_ts', '-mdpts', type=int, default=None,
help='max_data_per_ts')
parser.add_argument('--filter_width', '-flw', type=int, default=5,
help='filter_width')
parser.add_argument('--cnn_channel_size', '-ccs', type=int, default=4,
help='cnn_channel_size')
parser.add_argument('--cnn_channel_size_list', '-ccsl', type=list, default=None,
help='cnn_channel_size_list')
parser.add_argument('--pool_size_list', '-psl', type=list, default=None,
help='pool_size_list')
parser.add_argument('--act_func_list', '-actfl', type=list, default=None,
help='act_func_list')
parser.add_argument('--cnn_weight_stddev_list', '-cwsl', type=list, default=None,
| |
db_timestamp, db_size, db_flags) "
"VALUES(%s, %s, %s, %s, %s, %s)")
self.DbModify(query_string,
(client_host_name, db_name, db_pretty_name,
db_timestamp, db_size, db_flags))
# Record the db_id now, if it succeeded, and the following inserts fail,
# we can at least back some of the inserts out so we can try again later.
db_id = self.QueryDbId(client_host_name, db_name)
if db_id == 0:
raise exceptions.StreamPushServeException(
"HandleAddDbRequest: unable to add database: hostname:"
" %s, database name: %s" % (client_host_name, db_name))
# Check which files already exist in the files_table.
file_path_exists_flags = self._CheckFileExistence(client_host_name,
file_paths)
assert file_path_exists_flags
# Add files entries.
# Batch the SQL commands in groups of 1000 speeds things up noticeably.
# fewer gets slightly worse, more is not noticeable.
group_size = 1000
batcher = batch_sql_manager.BatchSqlManager(
self.stream_db, group_size, logger)
query_string = ("INSERT INTO files_table "
"(host_name, file_path, file_size, file_status) "
"VALUES(%s, %s, %s, 0)")
for i in range(len(file_paths)):
# Add only the newer files to avoid throwing SQLException.
if not file_path_exists_flags[i]:
batcher.AddModify(
query_string,
[client_host_name, file_paths[i], file_sizes[i]])
batcher.ExecuteRemaining()
# Finally update the db_files_table by first querying the file_ids from
# the files_table.
file_ids = self._QueryFileIds(client_host_name, file_paths)
assert file_ids
query_string = ("INSERT INTO db_files_table (db_id, file_id) "
"VALUES(%s, %s)")
parameters = [(db_id, file_id) for file_id in file_ids]
batcher.AddModifySet(query_string, parameters)
batcher.ExecuteRemaining()
batcher.Close() # Need to close to release SQL resources.
http_io.ResponseWriter.AddBodyElement(
response, constants.HDR_STATUS_CODE, constants.STATUS_SUCCESS)
except Exception as e:
# Record the internal exception to pass out via the final exception.
error_message = ("HandleAddDbRequest:"
" Error adding files to publisher database\n") + repr(e)
# Clean up any remnants of this db_id, so that a subsequent attempt to
# publish will start over at this step.
# Note: this assumes that our connection is still valid.
try:
if db_id > 0:
# Note: this will leave behind some entries in files_table, but this
# will not hurt anything.
query_string = "DELETE FROM db_files_table WHERE db_id = %s"
self.DbModify(query_string, [db_id])
query_string = "DELETE FROM db_table WHERE db_id = %s"
self.DbModify(query_string, [db_id])
except exceptions.Error:
error_message += "\nAttempt to cleanup the database failed"
raise exceptions.StreamPushServeException(error_message)
def HandleDeleteDbRequest(self, request, response):
"""Handles delete database requests.
Args:
request: request object.
response: response object.
Raises:
OSError, psycopg2.Warning/Error, StreamPushServeException
"""
logger.debug("HandleDeleteDbRequest...")
db_name = request.GetParameter(constants.DB_NAME)
if not db_name:
raise exceptions.StreamPushServeException(
"HandleDeleteDbRequest: Missing database name.")
(db_path, db_type) = serve_utils.IdentifyPublishedDb(db_name)
if serve_utils.IsFusionDb(db_type):
client_host_name = request.GetClientHostName()
if not client_host_name:
raise exceptions.StreamPushServeException(
"HandleDeleteDbRequest: missing Hostname.")
elif serve_utils.IsPortable(db_type):
# Note: The client host name is not used for portable globes, so we just
# make it an empty string.
client_host_name = ""
else:
raise exceptions.StreamPushServeException(
"HandleDeleteDbRequest: Unsupported DB type %s.", db_type)
# Check if the database exists.
db_id = self.QueryDbId(client_host_name, db_path)
if db_id == 0:
raise exceptions.StreamPushServeException(
"HandleDeleteDbRequest: Could not find database: "
"Fusion host: {} Database name: {}.".format(
client_host_name, db_path))
# Check if the database is currently published.
if self._QueryDbPublished(db_id):
raise exceptions.StreamPushServeException(
"HandleDeleteDbRequest: Database is currently published."
" Please unpublish it first.")
# Delete the entries in db_table.
query_string = "DELETE FROM db_table WHERE db_id = %s"
self.DbModify(query_string, (db_id,))
if serve_utils.IsFusionDb(db_type):
# Delete the entries in db_files_table. The entries in the
# files_table and the actual files are untouched. Those will be garbage
# collected at the request of the user.
query_string = "DELETE FROM db_files_table WHERE db_id = %s"
self.DbModify(query_string, [db_id])
elif serve_utils.IsPortable(db_type):
query_string = ("SELECT file_id FROM db_files_table WHERE db_id = %s")
rs_db_files = self.DbQuery(query_string, (db_id,))
if rs_db_files:
assert len(rs_db_files) == 1
file_id = rs_db_files[0]
# Delete the entries in db_files_table.
delete_db_files_table_cmd = (
"DELETE FROM db_files_table WHERE db_id = %s")
self.DbModify(delete_db_files_table_cmd, (db_id,))
# Get file_path from files_table and delete file.
query_string = ("SELECT file_path FROM files_table"
" WHERE file_id = %s")
rs_files = self.DbQuery(query_string, (file_id,))
if rs_files:
assert len(rs_files) == 1
assert db_path == rs_files[0]
if os.path.exists(rs_files[0]):
os.unlink(rs_files[0])
# Delete the file entry from files_table.
delete_files_table_cmd = "DELETE FROM files_table WHERE file_id = %s"
self.DbModify(delete_files_table_cmd, (file_id,))
else:
raise exceptions.StreamPushServeException(
"Unsupported DB type %s.", db_type)
http_io.ResponseWriter.AddBodyElement(
response, constants.HDR_STATUS_CODE, constants.STATUS_SUCCESS)
def HandleUnregisterPortableRequest(self, request, response):
"""Handles unregister portable globe requests.
Cleans up table's entries related to specified portable globe.
Args:
request: request object.
response: response object.
Raises:
OSError, psycopg2.Warning/Error, StreamPushServeException
"""
logger.debug("HandleUnregisterPortableRequest...")
db_name = request.GetParameter(constants.DB_NAME)
if not db_name:
raise exceptions.StreamPushServeException(
"HandleUnregisterPortableRequest: Missing database name.")
(db_path, db_type) = serve_utils.IdentifyPublishedDb(db_name)
if not serve_utils.IsPortable(db_type):
raise exceptions.StreamPushServeException(
"HandleUnregisterPortableRequest: Unsupported DB type %s.", db_type)
# Note: The client host name is not used for portable globes, so we just
# make it an empty string.
client_host_name = ""
# Check if the database exists.
db_id = self.QueryDbId(client_host_name, db_path)
if db_id == 0:
http_io.ResponseWriter.AddBodyElement(
response, constants.HDR_STATUS_CODE, constants.STATUS_SUCCESS)
return
# Check if the database is currently published.
if self._QueryDbPublished(db_id):
raise exceptions.StreamPushServeException(
"HandleUnregisterPortableRequest: Database is currently published."
" Please unpublish it first.")
self._UnregisterPortable(db_id)
http_io.ResponseWriter.AddBodyElement(
response, constants.HDR_STATUS_CODE, constants.STATUS_SUCCESS)
def HandleSyncRequest(self, request, response):
"""Handles database sync request.
Args:
request: request object.
response: response object.
Returns:
in response object, the list of files that need to be transfered.
Raises:
StreamPushServeException in case of invalid database.
"""
logger.debug("HandleSyncRequest..")
db_name = request.GetParameter(constants.DB_NAME)
if not db_name:
raise exceptions.StreamPushServeException(
"HandleSyncRequest: missing db name.")
(db_path, db_type) = serve_utils.IdentifyPublishedDb(db_name)
if not serve_utils.IsFusionDb(db_type):
raise exceptions.StreamPushServeException(
"HandleSyncRequest: Unsupported DB type %s.", db_type)
client_host_name = request.GetClientHostName()
if not client_host_name:
raise exceptions.StreamPushServeException(
"HandleSyncRequest: missing Hostname.")
db_id = self.QueryDbId(client_host_name, db_path)
if db_id == 0:
raise exceptions.StreamPushServeException(
"HandleSyncRequest: Database %s is not registered on server." %
db_name)
transfer_file_paths = self.SynchronizeDb(db_id, db_type, client_host_name)
if not transfer_file_paths:
http_io.ResponseWriter.AddBodyElement(
response, constants.HDR_STATUS_CODE, constants.STATUS_SUCCESS)
else:
for transfer_file_path in transfer_file_paths:
http_io.ResponseWriter.AddBodyElement(
response, constants.HDR_FILE_NAME, transfer_file_path)
http_io.ResponseWriter.AddBodyElement(
response, constants.HDR_STATUS_CODE, constants.STATUS_UPLOAD_NEEDED)
def HandleLocalTransferRequest(self, request, response):
"""Handles Local Transferring request.
Args:
request: request object.
response: response object.
Raises:
StreamPushServeException
"""
logger.debug("HandleLocalTransferRequest...")
src_path = request.GetParameter(constants.FILE_PATH)
dest_path = request.GetParameter(constants.DEST_FILE_PATH)
if (not src_path) or (not dest_path):
raise exceptions.StreamPushServeException(
"HandleLocalTransferRequest: Missing src/dest paths.")
src_path = os.path.normpath(src_path)
dest_path = os.path.normpath(dest_path)
logger.debug("HandleLocalTransferRequest: %s to %s", src_path, dest_path)
force_copy = request.IsForceCopy()
prefer_copy = request.IsPreferCopy()
if serve_utils.LocalTransfer(src_path, dest_path,
force_copy, prefer_copy, self._allow_symlinks):
http_io.ResponseWriter.AddBodyElement(
response, constants.HDR_STATUS_CODE, constants.STATUS_SUCCESS)
else:
raise exceptions.StreamPushServeException("Local transfer failed.")
def HandleGarbageCollectRequest(self, request, response):
"""Handles Garbage Collect request.
Removes all unnecessary files and file references from the publish root
volume and database.
Args:
request: request object.
response: response object.
Raises:
psycopg2.Error/Warning
"""
logger.debug("HandleGarbageCollectRequest..")
assert request.GetParameter(constants.CMD) == constants.CMD_GARBAGE_COLLECT
parent_dirs_set = set()
delete_count = 0
delete_size = 0
# The "NOT IN" sql operator is painfully slow for large collections of
# files.
# Instead we do 2 queries of all the file_id(s) in files_table and
# db_files_table
# and walk through the list removing files_table entries that don't occur
# in db_files_table and vice versa.
query = ("SELECT file_id, host_name, file_path, file_size"
" FROM files_table ORDER BY file_id")
rs_files = self.DbQuery(query)
query = ("SELECT file_id FROM db_files_table"
" GROUP BY file_id ORDER BY file_id")
rs_db_files = self.DbQuery(query)
db_file_id = sys.maxint
if rs_db_files:
db_file_iter = iter(rs_db_files)
db_file_id = int(db_file_iter.next())
# Use a BatchSqlManager to batch up many individual SQL commands into
# one postgres invocation.
batcher = batch_sql_manager.BatchSqlManager(
self.stream_db, 1000, logger)
top_level_dir_prefix = self.server_prefix + "/"
delete_files_table_cmd = "DELETE FROM files_table WHERE file_id = %s"
delete_db_files_table_cmd = (
"DELETE FROM db_files_table WHERE file_id = %s")
# We have 2 sorted lists of file_id's in the query results.
# db_files_table is the subset of files_table
# We will walk the two lists removing any files_table entries that don't
# appear in db_files_table.
for rs_file in rs_files:
file_id = int(rs_file[0])
# Check the boundary case...this could happen and would basically mean
# pain for whomever this happens to.
if file_id == sys.maxint:
logger.error("HandleGarbageCollectRequest has encountered a file_id "
"equal to the max int value. "
"The database has run out of valid file id's.")
raise exceptions.StreamPushServeException(
"The publisher database has run out of valid file id's."
" This published database must likely be recreated from scratch.")
if file_id < db_file_id:
# Delete this file:
# the files_table entry does not exist in the db_files_table
# Check if the file exists and if so delete it.
top_level_dir = top_level_dir_prefix + rs_file[1]
server_file_path | |
import json
import os
import random
from collections import defaultdict
from os import listdir, mkdir, makedirs
from os.path import abspath, basename, dirname, exists, isfile, join
from re import search, sub
from datetime import datetime
import click
import pandas as pd
from roboai_cli.util.cli import print_error, print_info
from roboai_cli.util.input_output import load_md, load_yaml
from roboai_cli.util.helpers import clean_intents
@click.command(name="test", help="Test Rasa models for the required bots.")
@click.argument("languages", nargs=-1,)
@click.option("--cross-validation", is_flag=True, default=False, help="Evaluates model in cross-validation mode.")
@click.option("--folds", "-f", "folds", type=int, default=3, help="Number of folds to be applied in cross-validation mode.")
def command(languages: tuple, cross_validation: bool, folds: int):
"""Tests a Rasa bot.
Args:
languages (tuple): languages (bots) to be tested. If no language is passed
it checks if the current bot is a multi-language bot. If so
all bots will be tested. Otherwise the single-language bot will
be tested.
cross_validation (bool): Evaluates model in cross-validation mode.
folds (int): Number of folds to be applied in cross-validation mode.
"""
if len(languages) == 0:
if exists(join(abspath("."), "languages")):
# multi-language bot
bot_dir = get_all_languages(path=abspath("."), languages=languages)
multi_language_bot = True
else:
# single-language bot
bot_dir = [abspath(".")]
multi_language_bot = False
else:
multi_language_bot = True
bot_dir = get_all_languages(path=abspath("."), languages=languages)
test(bot_dir, multi_language_bot, cross_validation, folds)
def test(languages_path: list, multi_language_bot: bool, cross_validation: bool, folds: int) -> None:
timestamp = datetime.now().strftime("%d%m%Y-%H%M%S")
for language in languages_path:
makedirs(join(language, "results", timestamp), exist_ok=True)
lang = basename(language) if basename(language) != "bot" else "the"
print_info(f"Starting test process for {lang} bot")
# Check if tests folder exists
if exists(join(language, "tests")):
# Check if tests folder contains any file
if any(
isfile(join(language, "tests", i))
for i in listdir(join(language, "tests"))
):
# Check if intents are already covered in the existing test files
# If there are intents left to be tested, the user is prompted
# with an option to continue testing
if check_covered_intents(language):
test_bot(language, cross_validation, folds, timestamp)
else:
continue
# If tests folder is empty, generate a test stories file
else:
generate_conversation_md_from_stories(
language, multi_language_bot
)
if proceed_with_test(
"Test stories have been generated. Continue testing?\n"
):
test_bot(language, cross_validation, folds, timestamp)
else:
continue
# If tests folder doesn't exist, create it and generate a test stories file
else:
generate_conversation_md_from_stories(language, multi_language_bot)
if proceed_with_test(
"Test stories have been generated. Continue testing?\n"
):
test_bot(language, cross_validation, folds, timestamp)
else:
continue
format_results(language, timestamp)
print_info(f"Finished testing {lang} bot")
def test_bot(language: str, cross_validation: bool, folds: int, timestamp: str):
if cross_validation:
os.system(
f"rasa test --model {join(language, 'models')} --nlu {join(language, 'data')} \
--cross-validation -f {folds} --config {join(language, 'config.yml')} \
--stories {join(language, 'tests')} --out {join(language, 'results', timestamp)}"
)
else:
os.system(
f"rasa test --model {join(language, 'models')} --nlu {join(language, 'data')} \
--stories {join(language, 'tests')} --out {join(language, 'results', timestamp)}"
)
def check_covered_intents(language_path: str) -> bool:
intents = load_yaml(join(language_path, "domain.yml")).get("intents", None)
if intents is None:
print_error("No intents were found.\n")
exit(0)
else:
intents = clean_intents(intents)
for filename in listdir(join(language_path, "tests")):
if filename.endswith(".md"):
lines = load_md(join(language_path, "tests", filename))
for line in lines:
for intent in intents:
if intent in line:
intents.remove(intent)
break
if intents:
print(
"The following intents are not covered in your test stories:"
)
print(*intents, sep="\n")
should_test = proceed_with_test("Continue testing?\n")
else:
should_test = True
return should_test
def proceed_with_test(message: str):
return click.confirm(message)
def get_intent_example(intent: str, nlu) -> str:
examples = []
copy = False
comment = False
for line in nlu:
if line.startswith("##") and search(r"\b" + intent + r"\b", line):
if comment is False:
copy = True
elif line.startswith("##") and not search(
r"\b" + intent + r"\b", line
):
if comment is False:
copy = False
elif line.strip().startswith("<!--") and line.strip().endswith("-->"):
comment = False
elif line.startswith("<!--") or line.strip().startswith("<!------------------"):
comment = True
elif line.endswith("-->\n") or line.strip().endswith("------------------->"):
comment = False
elif copy:
# keep entities
examples.append(line.replace("-", "", 1).strip())
# remove entities
# examples.append(
# sub(r"\{[^)]*\}", "", line)
# .replace("[", "")
# .replace("]", "")
# .replace("-", "", 1)
# .strip()
# )
examples = [i for i in examples if i]
if examples:
return random.choice(examples)
else:
return ""
def generate_conversation_md_from_stories(
path_to_language: str, multi_language_bot: bool
):
"""Generates test stories based on the stories.md file.
Complex stories are generated because they're copied from the original stories.md file
Args:
path_to_language (str): path_to_language (str): path to language folder.
If it's a single-language bot this will be the bot root's folder.
multi_language_bot (bool): flag indicating whether the bot is single or multi language
"""
if multi_language_bot:
stories_path = dirname(join(path_to_language))
else:
stories_path = join(path_to_language, "data")
all_stories = []
for filename in listdir(stories_path):
if isfile(join(stories_path, filename)):
if "stories" in filename:
stories = load_md(join(stories_path, filename))
all_stories.append(stories)
all_stories = [item for sublist in all_stories for item in sublist]
all_nlu = []
for filename in listdir(join(path_to_language, "data")):
if isfile(join(path_to_language, "data", filename)):
if "stories" not in filename:
nlu = load_md(join(path_to_language, "data", filename))
all_nlu.append(nlu)
all_nlu = [item for sublist in all_nlu for item in sublist]
output_path = join(path_to_language, "tests", "conversation_tests.md")
if not exists(join(path_to_language, "tests")):
mkdir(join(path_to_language, "tests"))
with open(output_path, "w", encoding="utf-8") as out_f:
for line in stories:
if line.startswith("*"):
intent = (
sub(r"\{[^)]*\}", "", line)
.replace("[", "")
.replace("]", "")
.replace("*", "", 1)
.strip()
)
if " or " in line.lower():
first_intent = intent.split()[0]
out_f.write(
f"* {first_intent}: {get_intent_example(first_intent, all_nlu)}\n"
)
elif "form:" in line.lower():
intent_in_form = intent.split()[1]
out_f.write(
f"* form: {intent_in_form}: {get_intent_example(intent_in_form, all_nlu)}\n"
)
else:
intent = (
sub(r"\{[^)]*\}", "", line)
.replace("[", "")
.replace("]", "")
.replace("*", "", 1)
.strip()
)
out_f.write(
f"* {intent}: {get_intent_example(intent, all_nlu)}\n"
)
else:
out_f.write(line)
def generate_conversation_md_from_domain(path_to_language: str):
"""Generates test stories based on the intents available in the domain.
Complex stories are not generated.
Args:
path_to_language (str): path to language folder.
If it's a single-language bot this will be the bot root's folder.
"""
domain = load_yaml(join(path_to_language, "domain.yml"))
intents_list = domain.get("intents", None)
all_nlu = []
for filename in listdir(join(path_to_language, "data")):
if isfile(join(path_to_language, "data", filename)):
if "stories" not in filename:
nlu = load_md(join(path_to_language, "data", filename))
all_nlu.append(nlu)
all_nlu = [item for sublist in all_nlu for item in sublist]
if not intents_list:
print_error("No intents were found.")
exit(0)
elif intents_list:
output_path = join(path_to_language, "tests", "conversation_tests.md")
if not exists(join(path_to_language, "tests")):
mkdir(join(path_to_language, "tests"))
with open(output_path, "w", encoding="utf-8") as out_f:
for intent in intents_list:
out_f.write(f"## {intent}\n")
out_f.write(
f"* {intent}: {get_intent_example(intent, all_nlu)}\n"
)
out_f.write(f" - utter_{intent}\n")
out_f.write("\n")
def format_results(language_path: str, timestamp: str):
"""
Format the results output by Rasa. This includes:
- confusion list stating how many times two intents are being confused
- misclassified intents: the same as above but it shows the specific utters
- statistics table: containing metrics like accuracy, precision, etc.
Args:
language_path (str): path to language folder.
timestamp (str): timestamp of when the test is run.
"""
try:
confusion_list = confusion_table_df(language_path, timestamp)
misclassified_intents = misclassified_intents_df(language_path, timestamp)
statistics_table = stats_table(language_path, timestamp)
with pd.ExcelWriter(
join(language_path, "results", timestamp, "intent_details.xlsx"),
engine="xlsxwriter",
) as xlsx_writer:
confusion_list.to_excel(
excel_writer=xlsx_writer, sheet_name="Confusion Table", index=False
)
worksheet = xlsx_writer.sheets["Confusion Table"]
for i, col in enumerate(confusion_list.columns):
column_len = max(
confusion_list[col].astype(str).str.len().max(), len(col) + 2
)
worksheet.set_column(i, i, column_len)
misclassified_intents.to_excel(
excel_writer=xlsx_writer, sheet_name="Misclassified Intents", index=False
)
worksheet = xlsx_writer.sheets["Misclassified Intents"]
for i, col in enumerate(misclassified_intents.columns):
column_len = max(
misclassified_intents[col].astype(str).str.len().max(),
len(col) + 2,
)
worksheet.set_column(i, i, column_len)
statistics_table.to_excel(
excel_writer=xlsx_writer, sheet_name="Intent Statistics", index=False
)
worksheet = xlsx_writer.sheets["Intent Statistics"]
for i, col in enumerate(statistics_table.columns):
column_len = max(
statistics_table[col].astype(str).str.len().max(),
len(col) + 2,
)
worksheet.set_column(i, i, column_len)
except Exception:
print_error("One or more files necessary for the intent_details.xlsx file was not output by Rasa and thus this file cannot be generated.\n")
def misclassified_intents_df(language_path: str, timestamp: str) -> pd.DataFrame:
with open(join(language_path, "results", timestamp, "intent_errors.json"), "r") as f:
intent_errors = json.load(f)
wrong_intents = defaultdict(lambda: defaultdict(list))
for error in intent_errors:
wrong_intents[error["intent"]][
error["intent_prediction"]["name"]
].append(error["text"])
return pd.DataFrame(
[
{
"intent": k,
"confused_with": value,
"utterances": "\n".join(v[value]),
}
for k, v in wrong_intents.items()
for value in v
]
)
def stats_table(language_path: str, timestamp: str) -> pd.DataFrame:
with open(join(language_path, "results", timestamp, "intent_report.json"), "r") as f:
intent_report = json.load(f)
stats_list = []
for key_, value_ in intent_report.items():
if key_ not in ["accuracy", "micro_avg", "macro_avg", "weighted_avg"]:
stats_list.append([key_, round(value_["precision"], 3), round(value_["recall"], 3), round(value_["f1-score"], 3)])
stats_table = pd.DataFrame(
stats_list, columns=["intent", "precision", "recall", "f1-score"]
)
return stats_table.sort_values("precision", ascending=True)
def confusion_table_df(language_path: str, timestamp: str) -> pd.DataFrame:
with open(join(language_path, "results", timestamp, "intent_report.json"), "r") as f:
intent_report = json.load(f)
confusion_list = []
for key_, value_ in intent_report.items():
if key_ not in ["accuracy", "micro avg", "macro avg", "weighted avg"]:
for intent, count in | |
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Wrapper for /usr/include/GL/glu.h
Generated by tools/gengl.py.
Do not modify this file.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
from ctypes import *
from pyglet.gl.lib import link_GLU as _link_function
from pyglet.gl.lib import c_ptrdiff_t
# BEGIN GENERATED CONTENT (do not edit below this line)
# This content is generated by tools/gengl.py.
# Wrapper for /usr/include/GL/glu.h
GLU_EXT_object_space_tess = 1 # /usr/include/GL/glu.h:58
GLU_EXT_nurbs_tessellator = 1 # /usr/include/GL/glu.h:59
GLU_FALSE = 0 # /usr/include/GL/glu.h:62
GLU_TRUE = 1 # /usr/include/GL/glu.h:63
GLU_VERSION_1_1 = 1 # /usr/include/GL/glu.h:66
GLU_VERSION_1_2 = 1 # /usr/include/GL/glu.h:67
GLU_VERSION_1_3 = 1 # /usr/include/GL/glu.h:68
GLU_VERSION = 100800 # /usr/include/GL/glu.h:71
GLU_EXTENSIONS = 100801 # /usr/include/GL/glu.h:72
GLU_INVALID_ENUM = 100900 # /usr/include/GL/glu.h:75
GLU_INVALID_VALUE = 100901 # /usr/include/GL/glu.h:76
GLU_OUT_OF_MEMORY = 100902 # /usr/include/GL/glu.h:77
GLU_INCOMPATIBLE_GL_VERSION = 100903 # /usr/include/GL/glu.h:78
GLU_INVALID_OPERATION = 100904 # /usr/include/GL/glu.h:79
GLU_OUTLINE_POLYGON = 100240 # /usr/include/GL/glu.h:83
GLU_OUTLINE_PATCH = 100241 # /usr/include/GL/glu.h:84
GLU_NURBS_ERROR = 100103 # /usr/include/GL/glu.h:87
GLU_ERROR = 100103 # /usr/include/GL/glu.h:88
GLU_NURBS_BEGIN = 100164 # /usr/include/GL/glu.h:89
GLU_NURBS_BEGIN_EXT = 100164 # /usr/include/GL/glu.h:90
GLU_NURBS_VERTEX = 100165 # /usr/include/GL/glu.h:91
GLU_NURBS_VERTEX_EXT = 100165 # /usr/include/GL/glu.h:92
GLU_NURBS_NORMAL = 100166 # /usr/include/GL/glu.h:93
GLU_NURBS_NORMAL_EXT = 100166 # /usr/include/GL/glu.h:94
GLU_NURBS_COLOR = 100167 # /usr/include/GL/glu.h:95
GLU_NURBS_COLOR_EXT = 100167 # /usr/include/GL/glu.h:96
GLU_NURBS_TEXTURE_COORD = 100168 # /usr/include/GL/glu.h:97
GLU_NURBS_TEX_COORD_EXT = 100168 # /usr/include/GL/glu.h:98
GLU_NURBS_END = 100169 # /usr/include/GL/glu.h:99
GLU_NURBS_END_EXT = 100169 # /usr/include/GL/glu.h:100
GLU_NURBS_BEGIN_DATA = 100170 # /usr/include/GL/glu.h:101
GLU_NURBS_BEGIN_DATA_EXT = 100170 # /usr/include/GL/glu.h:102
GLU_NURBS_VERTEX_DATA = 100171 # /usr/include/GL/glu.h:103
GLU_NURBS_VERTEX_DATA_EXT = 100171 # /usr/include/GL/glu.h:104
GLU_NURBS_NORMAL_DATA = 100172 # /usr/include/GL/glu.h:105
GLU_NURBS_NORMAL_DATA_EXT = 100172 # /usr/include/GL/glu.h:106
GLU_NURBS_COLOR_DATA = 100173 # /usr/include/GL/glu.h:107
GLU_NURBS_COLOR_DATA_EXT = 100173 # /usr/include/GL/glu.h:108
GLU_NURBS_TEXTURE_COORD_DATA = 100174 # /usr/include/GL/glu.h:109
GLU_NURBS_TEX_COORD_DATA_EXT = 100174 # /usr/include/GL/glu.h:110
GLU_NURBS_END_DATA = 100175 # /usr/include/GL/glu.h:111
GLU_NURBS_END_DATA_EXT = 100175 # /usr/include/GL/glu.h:112
GLU_NURBS_ERROR1 = 100251 # /usr/include/GL/glu.h:115
GLU_NURBS_ERROR2 = 100252 # /usr/include/GL/glu.h:116
GLU_NURBS_ERROR3 = 100253 # /usr/include/GL/glu.h:117
GLU_NURBS_ERROR4 = 100254 # /usr/include/GL/glu.h:118
GLU_NURBS_ERROR5 = 100255 # /usr/include/GL/glu.h:119
GLU_NURBS_ERROR6 = 100256 # /usr/include/GL/glu.h:120
GLU_NURBS_ERROR7 = 100257 # /usr/include/GL/glu.h:121
GLU_NURBS_ERROR8 = 100258 # /usr/include/GL/glu.h:122
GLU_NURBS_ERROR9 = 100259 # /usr/include/GL/glu.h:123
GLU_NURBS_ERROR10 = 100260 # /usr/include/GL/glu.h:124
GLU_NURBS_ERROR11 = 100261 # /usr/include/GL/glu.h:125
GLU_NURBS_ERROR12 = 100262 # /usr/include/GL/glu.h:126
GLU_NURBS_ERROR13 = 100263 # /usr/include/GL/glu.h:127
GLU_NURBS_ERROR14 = 100264 # /usr/include/GL/glu.h:128
GLU_NURBS_ERROR15 = 100265 # /usr/include/GL/glu.h:129
GLU_NURBS_ERROR16 = 100266 # /usr/include/GL/glu.h:130
GLU_NURBS_ERROR17 = 100267 # /usr/include/GL/glu.h:131
GLU_NURBS_ERROR18 = 100268 # /usr/include/GL/glu.h:132
GLU_NURBS_ERROR19 = 100269 # /usr/include/GL/glu.h:133
GLU_NURBS_ERROR20 = 100270 # /usr/include/GL/glu.h:134
GLU_NURBS_ERROR21 = 100271 # /usr/include/GL/glu.h:135
GLU_NURBS_ERROR22 = 100272 # /usr/include/GL/glu.h:136
GLU_NURBS_ERROR23 = 100273 # /usr/include/GL/glu.h:137
GLU_NURBS_ERROR24 = 100274 # /usr/include/GL/glu.h:138
GLU_NURBS_ERROR25 = 100275 # /usr/include/GL/glu.h:139
GLU_NURBS_ERROR26 = 100276 # /usr/include/GL/glu.h:140
GLU_NURBS_ERROR27 = 100277 # /usr/include/GL/glu.h:141
GLU_NURBS_ERROR28 = 100278 # /usr/include/GL/glu.h:142
GLU_NURBS_ERROR29 = 100279 # /usr/include/GL/glu.h:143
GLU_NURBS_ERROR30 = 100280 # /usr/include/GL/glu.h:144
GLU_NURBS_ERROR31 = 100281 # /usr/include/GL/glu.h:145
GLU_NURBS_ERROR32 = 100282 # /usr/include/GL/glu.h:146
GLU_NURBS_ERROR33 = 100283 # /usr/include/GL/glu.h:147
GLU_NURBS_ERROR34 = 100284 # /usr/include/GL/glu.h:148
GLU_NURBS_ERROR35 = 100285 # /usr/include/GL/glu.h:149
GLU_NURBS_ERROR36 = 100286 # /usr/include/GL/glu.h:150
GLU_NURBS_ERROR37 = 100287 # /usr/include/GL/glu.h:151
GLU_AUTO_LOAD_MATRIX = 100200 # /usr/include/GL/glu.h:154
GLU_CULLING = 100201 # /usr/include/GL/glu.h:155
GLU_SAMPLING_TOLERANCE = 100203 # /usr/include/GL/glu.h:156
GLU_DISPLAY_MODE = 100204 # /usr/include/GL/glu.h:157
GLU_PARAMETRIC_TOLERANCE = 100202 # /usr/include/GL/glu.h:158
GLU_SAMPLING_METHOD = 100205 # /usr/include/GL/glu.h:159
GLU_U_STEP = 100206 # /usr/include/GL/glu.h:160
GLU_V_STEP = 100207 # /usr/include/GL/glu.h:161
GLU_NURBS_MODE = 100160 # /usr/include/GL/glu.h:162
GLU_NURBS_MODE_EXT = 100160 # /usr/include/GL/glu.h:163
GLU_NURBS_TESSELLATOR = 100161 # /usr/include/GL/glu.h:164
GLU_NURBS_TESSELLATOR_EXT = 100161 # /usr/include/GL/glu.h:165
GLU_NURBS_RENDERER = 100162 # /usr/include/GL/glu.h:166
GLU_NURBS_RENDERER_EXT = 100162 # /usr/include/GL/glu.h:167
GLU_OBJECT_PARAMETRIC_ERROR = 100208 # /usr/include/GL/glu.h:170
GLU_OBJECT_PARAMETRIC_ERROR_EXT = 100208 # /usr/include/GL/glu.h:171
GLU_OBJECT_PATH_LENGTH = 100209 # /usr/include/GL/glu.h:172
GLU_OBJECT_PATH_LENGTH_EXT = 100209 # /usr/include/GL/glu.h:173
GLU_PATH_LENGTH = 100215 # /usr/include/GL/glu.h:174
GLU_PARAMETRIC_ERROR = 100216 # /usr/include/GL/glu.h:175
GLU_DOMAIN_DISTANCE = 100217 # /usr/include/GL/glu.h:176
GLU_MAP1_TRIM_2 = 100210 # /usr/include/GL/glu.h:179
GLU_MAP1_TRIM_3 = 100211 # /usr/include/GL/glu.h:180
GLU_POINT = 100010 # /usr/include/GL/glu.h:183
GLU_LINE = 100011 # /usr/include/GL/glu.h:184
GLU_FILL = 100012 # /usr/include/GL/glu.h:185
GLU_SILHOUETTE = 100013 # /usr/include/GL/glu.h:186
GLU_SMOOTH = 100000 # /usr/include/GL/glu.h:192
GLU_FLAT = 100001 # /usr/include/GL/glu.h:193
GLU_NONE = 100002 # /usr/include/GL/glu.h:194
GLU_OUTSIDE = 100020 # /usr/include/GL/glu.h:197
GLU_INSIDE = 100021 # /usr/include/GL/glu.h:198
GLU_TESS_BEGIN = 100100 # /usr/include/GL/glu.h:201
GLU_BEGIN = 100100 # /usr/include/GL/glu.h:202
GLU_TESS_VERTEX = 100101 # /usr/include/GL/glu.h:203
GLU_VERTEX = 100101 # /usr/include/GL/glu.h:204
GLU_TESS_END = 100102 # /usr/include/GL/glu.h:205
GLU_END = 100102 # /usr/include/GL/glu.h:206
GLU_TESS_ERROR = 100103 # /usr/include/GL/glu.h:207
GLU_TESS_EDGE_FLAG = 100104 # /usr/include/GL/glu.h:208
GLU_EDGE_FLAG = 100104 # /usr/include/GL/glu.h:209
GLU_TESS_COMBINE = 100105 # /usr/include/GL/glu.h:210
GLU_TESS_BEGIN_DATA = 100106 # /usr/include/GL/glu.h:211
GLU_TESS_VERTEX_DATA = 100107 # /usr/include/GL/glu.h:212
GLU_TESS_END_DATA = 100108 # /usr/include/GL/glu.h:213
GLU_TESS_ERROR_DATA = 100109 # /usr/include/GL/glu.h:214
GLU_TESS_EDGE_FLAG_DATA = 100110 # /usr/include/GL/glu.h:215
GLU_TESS_COMBINE_DATA = 100111 # /usr/include/GL/glu.h:216
GLU_CW = 100120 # /usr/include/GL/glu.h:219
GLU_CCW = 100121 # /usr/include/GL/glu.h:220
GLU_INTERIOR = 100122 # /usr/include/GL/glu.h:221
GLU_EXTERIOR = 100123 # /usr/include/GL/glu.h:222
GLU_UNKNOWN = 100124 # /usr/include/GL/glu.h:223
GLU_TESS_WINDING_RULE = 100140 # /usr/include/GL/glu.h:226
GLU_TESS_BOUNDARY_ONLY = 100141 # /usr/include/GL/glu.h:227
GLU_TESS_TOLERANCE = 100142 # /usr/include/GL/glu.h:228
GLU_TESS_ERROR1 = 100151 # /usr/include/GL/glu.h:231
GLU_TESS_ERROR2 = 100152 # /usr/include/GL/glu.h:232
GLU_TESS_ERROR3 = 100153 # /usr/include/GL/glu.h:233
GLU_TESS_ERROR4 = 100154 # /usr/include/GL/glu.h:234
GLU_TESS_ERROR5 = 100155 # /usr/include/GL/glu.h:235
GLU_TESS_ERROR6 = 100156 # /usr/include/GL/glu.h:236
GLU_TESS_ERROR7 = 100157 # /usr/include/GL/glu.h:237
GLU_TESS_ERROR8 = 100158 # /usr/include/GL/glu.h:238
GLU_TESS_MISSING_BEGIN_POLYGON = 100151 # /usr/include/GL/glu.h:239
GLU_TESS_MISSING_BEGIN_CONTOUR = 100152 # /usr/include/GL/glu.h:240
GLU_TESS_MISSING_END_POLYGON = 100153 # /usr/include/GL/glu.h:241
GLU_TESS_MISSING_END_CONTOUR = 100154 # /usr/include/GL/glu.h:242
GLU_TESS_COORD_TOO_LARGE = 100155 # /usr/include/GL/glu.h:243
GLU_TESS_NEED_COMBINE_CALLBACK = 100156 # /usr/include/GL/glu.h:244
GLU_TESS_WINDING_ODD = 100130 # /usr/include/GL/glu.h:247
GLU_TESS_WINDING_NONZERO = 100131 # /usr/include/GL/glu.h:248
GLU_TESS_WINDING_POSITIVE = 100132 # /usr/include/GL/glu.h:249
GLU_TESS_WINDING_NEGATIVE = 100133 # /usr/include/GL/glu.h:250
GLU_TESS_WINDING_ABS_GEQ_TWO = 100134 # /usr/include/GL/glu.h:251
class struct_GLUnurbs(Structure):
__slots__ = [
]
struct_GLUnurbs._fields_ = [
('_opaque_struct', c_int)
]
GLUnurbs = struct_GLUnurbs # /usr/include/GL/glu.h:261
class struct_GLUquadric(Structure):
__slots__ = [
]
struct_GLUquadric._fields_ = [
('_opaque_struct', c_int)
]
GLUquadric = struct_GLUquadric # /usr/include/GL/glu.h:262
class struct_GLUtesselator(Structure):
__slots__ = [
]
struct_GLUtesselator._fields_ = [
('_opaque_struct', c_int)
]
GLUtesselator = struct_GLUtesselator # /usr/include/GL/glu.h:263
GLUnurbsObj = GLUnurbs # /usr/include/GL/glu.h:266
GLUquadricObj = GLUquadric # /usr/include/GL/glu.h:267
GLUtesselatorObj = GLUtesselator # /usr/include/GL/glu.h:268
GLUtriangulatorObj = GLUtesselator # /usr/include/GL/glu.h:269
GLU_TESS_MAX_COORD = 9.9999999999999998e+149 # /usr/include/GL/glu.h:271
_GLUfuncptr = CFUNCTYPE(None) # /usr/include/GL/glu.h:274
# /usr/include/GL/glu.h:276
gluBeginCurve = _link_function('gluBeginCurve', None, [POINTER(GLUnurbs)], None)
# /usr/include/GL/glu.h:277
gluBeginPolygon = _link_function('gluBeginPolygon', None, [POINTER(GLUtesselator)], None)
# /usr/include/GL/glu.h:278
gluBeginSurface = _link_function('gluBeginSurface', None, [POINTER(GLUnurbs)], None)
# /usr/include/GL/glu.h:279
gluBeginTrim = _link_function('gluBeginTrim', None, [POINTER(GLUnurbs)], None)
GLint = c_int # /usr/include/GL/gl.h:58
GLenum = c_uint # /usr/include/GL/gl.h:53
GLsizei = c_int # /usr/include/GL/gl.h:59
# /usr/include/GL/glu.h:280
gluBuild1DMipmapLevels = _link_function('gluBuild1DMipmapLevels', GLint, [GLenum, GLint, GLsizei, GLenum, GLenum, GLint, GLint, GLint, POINTER(None)], None)
# /usr/include/GL/glu.h:281
gluBuild1DMipmaps = _link_function('gluBuild1DMipmaps', GLint, [GLenum, GLint, GLsizei, GLenum, GLenum, POINTER(None)], None)
# /usr/include/GL/glu.h:282
gluBuild2DMipmapLevels = _link_function('gluBuild2DMipmapLevels', GLint, [GLenum, GLint, GLsizei, GLsizei, GLenum, GLenum, GLint, GLint, GLint, POINTER(None)], None)
# /usr/include/GL/glu.h:283
gluBuild2DMipmaps = _link_function('gluBuild2DMipmaps', GLint, [GLenum, GLint, GLsizei, GLsizei, GLenum, GLenum, POINTER(None)], None)
# /usr/include/GL/glu.h:284
gluBuild3DMipmapLevels = _link_function('gluBuild3DMipmapLevels', GLint, [GLenum, GLint, GLsizei, GLsizei, GLsizei, GLenum, GLenum, GLint, GLint, GLint, POINTER(None)], None)
# /usr/include/GL/glu.h:285
gluBuild3DMipmaps = _link_function('gluBuild3DMipmaps', GLint, [GLenum, GLint, GLsizei, GLsizei, GLsizei, GLenum, GLenum, POINTER(None)], None)
GLboolean = c_ubyte # /usr/include/GL/gl.h:54
GLubyte = c_ubyte # /usr/include/GL/gl.h:60
# /usr/include/GL/glu.h:286
gluCheckExtension = _link_function('gluCheckExtension', GLboolean, [POINTER(GLubyte), POINTER(GLubyte)], None)
GLdouble = c_double # /usr/include/GL/gl.h:65
# /usr/include/GL/glu.h:287
gluCylinder = _link_function('gluCylinder', None, [POINTER(GLUquadric), GLdouble, GLdouble, GLdouble, GLint, GLint], None)
# /usr/include/GL/glu.h:288
gluDeleteNurbsRenderer = _link_function('gluDeleteNurbsRenderer', None, [POINTER(GLUnurbs)], None)
# /usr/include/GL/glu.h:289
gluDeleteQuadric = _link_function('gluDeleteQuadric', None, [POINTER(GLUquadric)], None)
# /usr/include/GL/glu.h:290
gluDeleteTess = _link_function('gluDeleteTess', None, [POINTER(GLUtesselator)], None)
# /usr/include/GL/glu.h:291
gluDisk = _link_function('gluDisk', None, [POINTER(GLUquadric), GLdouble, GLdouble, GLint, GLint], None)
# /usr/include/GL/glu.h:292
gluEndCurve = _link_function('gluEndCurve', None, [POINTER(GLUnurbs)], None)
# /usr/include/GL/glu.h:293
gluEndPolygon = _link_function('gluEndPolygon', None, [POINTER(GLUtesselator)], None)
# /usr/include/GL/glu.h:294
gluEndSurface = _link_function('gluEndSurface', None, [POINTER(GLUnurbs)], None)
# /usr/include/GL/glu.h:295
gluEndTrim = _link_function('gluEndTrim', None, [POINTER(GLUnurbs)], None)
# /usr/include/GL/glu.h:296
gluErrorString = _link_function('gluErrorString', POINTER(GLubyte), [GLenum], None)
GLfloat = c_float # /usr/include/GL/gl.h:63
# /usr/include/GL/glu.h:297
gluGetNurbsProperty = _link_function('gluGetNurbsProperty', None, [POINTER(GLUnurbs), GLenum, POINTER(GLfloat)], None)
# /usr/include/GL/glu.h:298
gluGetString = _link_function('gluGetString', POINTER(GLubyte), [GLenum], None)
# /usr/include/GL/glu.h:299
gluGetTessProperty = _link_function('gluGetTessProperty', None, [POINTER(GLUtesselator), GLenum, POINTER(GLdouble)], None)
# /usr/include/GL/glu.h:300
gluLoadSamplingMatrices = _link_function('gluLoadSamplingMatrices', None, [POINTER(GLUnurbs), POINTER(GLfloat), POINTER(GLfloat), POINTER(GLint)], None)
# /usr/include/GL/glu.h:301
gluLookAt = _link_function('gluLookAt', None, [GLdouble, GLdouble, GLdouble, GLdouble, GLdouble, GLdouble, GLdouble, GLdouble, GLdouble], None)
# /usr/include/GL/glu.h:302
gluNewNurbsRenderer = _link_function('gluNewNurbsRenderer', POINTER(GLUnurbs), [], None)
# /usr/include/GL/glu.h:303
gluNewQuadric = _link_function('gluNewQuadric', POINTER(GLUquadric), [], None)
# /usr/include/GL/glu.h:304
gluNewTess = _link_function('gluNewTess', POINTER(GLUtesselator), [], None)
# /usr/include/GL/glu.h:305
gluNextContour = _link_function('gluNextContour', None, [POINTER(GLUtesselator), GLenum], None)
# /usr/include/GL/glu.h:306
gluNurbsCallback = _link_function('gluNurbsCallback', None, [POINTER(GLUnurbs), GLenum, _GLUfuncptr], None)
GLvoid = None # /usr/include/GL/gl.h:67
# /usr/include/GL/glu.h:307
gluNurbsCallbackData = _link_function('gluNurbsCallbackData', None, [POINTER(GLUnurbs), POINTER(GLvoid)], None)
# /usr/include/GL/glu.h:308
gluNurbsCallbackDataEXT = _link_function('gluNurbsCallbackDataEXT', None, [POINTER(GLUnurbs), POINTER(GLvoid)], None)
# /usr/include/GL/glu.h:309
gluNurbsCurve = _link_function('gluNurbsCurve', None, [POINTER(GLUnurbs), GLint, POINTER(GLfloat), GLint, POINTER(GLfloat), GLint, GLenum], None)
# /usr/include/GL/glu.h:310
gluNurbsProperty = _link_function('gluNurbsProperty', None, [POINTER(GLUnurbs), GLenum, GLfloat], None)
# /usr/include/GL/glu.h:311
gluNurbsSurface = _link_function('gluNurbsSurface', None, [POINTER(GLUnurbs), GLint, POINTER(GLfloat), | |
<reponame>alvinwatner/protein_folding
"""
=====================================================
Protein Folding 2D Hydrophobic-Polar Model Simulation
=====================================================
Protein Folding is well known optimization combinatorial problem, there are some models to adress the folding process.
There are 20 different amino acid, Hydrophobic-Polar(HP) Model classified those amino into 2 types : H(Hydrophobic) and P(Hydrophillic).
HP Model is one of my favorite, since it more looks like a board game with a set of simple rules, But yea the simplicity also determined as NP-complete problem.
Here's how it works :
1. Given a set of amino 'H' and 'P' sequence.
2. Place all the the sequence one by one to 2D (or3D space).
3. Amino should be placed adjacent to the previous amino (Up, Left, Right, or Down). (note: placing to occupied occupied is not allowed).
Goals is to find H-H pairs that not connected to primary structure but consecutive in 2D space.
Thats it! sounds confusing?? no worry, Lets roll over..
The following code, follows the OpenAI gym based environment.
One things you should know is the observation_value return a list : [amino_data, image_data]
- amino_data = list with size 100 (to contain max amino size, 100)
- image_data = RGB image (150,150,3)
----------------------------------------
Author : <NAME>
Email : <EMAIL>
Website : -
License : -
-----------------------------------------
**Please feel free to use and modify this, but keep the above information. Thanks!**
"""
import numpy as np
from PIL import Image
import cv2
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
class environment():
def __init__(self):
# Since it was 2 dimensional environment, it consist 4 possible action given a state
self.action_space = np.array([0, 1, 2, 3]) # 0 = up, 1 = left, 2 = right, 3 = down
self.action_space_size = len(self.action_space)
self.win_size = 6000 #Window Size (Yea, i know it was a huge image, but this size could handle 100 amino sequence, with acceptable molecul size)
#initialize the amino coordinate position
self.init_amino_position_x = int(self.win_size/2)
self.init_amino_position_y = int(self.win_size/2)
#Follows gym environment, Isreset = True if environment.reset() is called, otherwise it raise an error
self.Isreset = False
def adjust_amino_size(self):
#Default amino size, it will be reduces as the amino sequence get longer
default_size = 80
center = self.win_size/2
max_size = (default_size *3) * len(self.amino_acid) #default_size * 3, because the value is the radius of the circle not diameter (also make it larger a bit)
"""
Since the 'init_amino_position' always start at the center of the window, codes below used to check if (default_size * amino length) is go beyond the window size
and shrink the 'default_size' if it does.
"""
if max_size > center:
while max_size > center:
#bound the minimum size to 11
if default_size < 11:
return default_size
default_size -= 1
max_size = (default_size * 3) * len(self.amino_acid)
return default_size
else:
return default_size
def preprocess_data(self, amino_data):
"""
Convert amino_data to number for ease computation
-H (Hydrophobic) = 1
-P (Hydrophillic) = 2
"""
for i in range(len(amino_data)):
if amino_data[i] == 'H':
amino_data[i] = 1
elif amino_data[i] == 'P':
amino_data[i] = 2
return np.array(amino_data)
"""
Codes Below Are The Drawing Process
Nothing fancy, just using regular opencv functionality
"""
def draw_amino(self, amino_type = None, coordinat_x = 0, coordinat_y = 0, size = 0):
"""
Draw amino :
- Hydrophobic Amino Acid = Black Circle
- Hydrophillic Amino Acid = White Circle
"""
if amino_type == 1:
return cv2.circle(self.current_image, (coordinat_x, coordinat_y), size , (0,0,0) , -2)
else:
return cv2.circle(self.current_image, (coordinat_x, coordinat_y), size , (255,255,255) , -2)
def draw_arrow_line(self, start_point = (0, 0), end_point = (2, 2)):
#draw a line and arrow pointing to the next amino in the sequence
return cv2.arrowedLine(self.current_image, start_point, end_point, (255,0,0), 4)
def draw_next_amino(self, amino_type = None, prev_coordinat_x = 0, prev_coordinat_y = 0, size = 0, action = 0):
"""
This Function draws next amino acid from the sequence, when step() function being called.
The Rule is : Next amino acid always placed consecutive to the previous amino
Parameter :
- amino_type = int, 1(Hydrophobic) or 2(Hydrophillic)
- prev_coordinat_x = int, amino coordinate x axis
- prev_coordinat_y = int, amino coordinate y axis
- size = int, amino size
- action = int, action from action_space (1, 2, 3, 4)
Return :
- amino coordinate x axis
- amino coordinate y axis
- RGB image
"""
if amino_type == 1:
if action == 0:
new_amino_position_x, new_amino_position_y, img = self.draw_next_up(amino_type = 1, coor_x = prev_coordinat_x, coor_y = prev_coordinat_y)
return new_amino_position_x, new_amino_position_y, img
elif action == 1:
new_amino_position_x, new_amino_position_y, img = self.draw_next_left(amino_type = 1, coor_x = prev_coordinat_x, coor_y = prev_coordinat_y)
return new_amino_position_x, new_amino_position_y, img
elif action == 2:
new_amino_position_x, new_amino_position_y, img = self.draw_next_right(amino_type = 1, coor_x = prev_coordinat_x, coor_y = prev_coordinat_y)
return new_amino_position_x, new_amino_position_y, img
else:
new_amino_position_x, new_amino_position_y, img = self.draw_next_down(amino_type = 1, coor_x = prev_coordinat_x, coor_y = prev_coordinat_y)
return new_amino_position_x, new_amino_position_y, img
elif amino_type == 2 :
if action == 0:
new_amino_position_x, new_amino_position_y, img = self.draw_next_up(amino_type = 2, coor_x = prev_coordinat_x, coor_y = prev_coordinat_y)
return new_amino_position_x, new_amino_position_y, img
elif action == 1:
new_amino_position_x, new_amino_position_y, img = self.draw_next_left(amino_type = 2, coor_x = prev_coordinat_x, coor_y = prev_coordinat_y)
return new_amino_position_x, new_amino_position_y, img
elif action == 2:
new_amino_position_x, new_amino_position_y, img = self.draw_next_right(amino_type = 2, coor_x = prev_coordinat_x, coor_y = prev_coordinat_y)
return new_amino_position_x, new_amino_position_y, img
else:
new_amino_position_x, new_amino_position_y, img = self.draw_next_down(amino_type = 2, coor_x = prev_coordinat_x, coor_y = prev_coordinat_y)
return new_amino_position_x, new_amino_position_y, img
def draw_next_up(self, amino_type = None, coor_x = 0, coor_y = 0):
#Return New coordinate and RGB image, after action '0 : up'
new_amino_position_x = coor_x
new_amino_position_y = coor_y - self.amino_move
start_line_x = coor_x
start_line_y = coor_y - self.line_length
img = self.draw_arrow_line(start_point = (start_line_x, start_line_y), end_point = (start_line_x, start_line_y - self.line_length))
img = self.draw_amino(amino_type = amino_type, coordinat_x = new_amino_position_x, coordinat_y = new_amino_position_y, size = self.amino_size)
return new_amino_position_x, new_amino_position_y, img
def draw_next_left(self, amino_type = None, coor_x = 0, coor_y = 0):
#Return New coordinate and RGB image, after action '1 : left'
new_amino_position_x = coor_x - self.amino_move
new_amino_position_y = coor_y
start_line_x = coor_x - self.line_length
start_line_y = coor_y
img = self.draw_arrow_line(start_point = (start_line_x, start_line_y), end_point = (start_line_x - self.line_length, start_line_y))
img = self.draw_amino(amino_type = amino_type, coordinat_x = new_amino_position_x, coordinat_y = new_amino_position_y, size = self.amino_size)
return new_amino_position_x, new_amino_position_y, img
def draw_next_right(self, amino_type = None, coor_x = 0, coor_y = 0):
#Return New coordinate and RGB image, after action '2 : right'
new_amino_position_x = coor_x + self.amino_move
new_amino_position_y = coor_y
start_line_x = coor_x + self.line_length
start_line_y = coor_y
img = self.draw_arrow_line(start_point = (start_line_x, start_line_y), end_point = (start_line_x + self.line_length, coor_y))
img = self.draw_amino(amino_type = amino_type, coordinat_x = new_amino_position_x, coordinat_y = new_amino_position_y, size = self.amino_size)
return new_amino_position_x, new_amino_position_y, img
def draw_next_down(self, amino_type = None, coor_x = 0, coor_y = 0):
#Return New coordinate and RGB image, after action '3 : down'
new_amino_position_x = coor_x
new_amino_position_y = coor_y + self.amino_move
start_line_x = coor_x
start_line_y = coor_y + self.line_length
img = self.draw_arrow_line(start_point = (start_line_x, start_line_y), end_point = (coor_x, start_line_y + self.line_length))
img = self.draw_amino(amino_type = amino_type, coordinat_x = new_amino_position_x, coordinat_y = new_amino_position_y, size = self.amino_size)
return new_amino_position_x, new_amino_position_y, img
"""
Codes Below Use To Check Current Amino Neighbour
The Functions Returns:
----------------------
- Free Energy = Int, '-1' if the neighbour is Hydrophobic and Not Connected in Primary Structure (arrow line), '0' otherwise
- Amino = Bool, 'True' if the neighbour of current amino (based on given coordinate) exist another amino, 'False' if there is no amino
"""
def check_Above_Neighbour(self, new_coordinat_x, new_coordinat_y):
half_line_length = int(0.5 * self.line_length)
#Check if above neighbour exist hydrophobic amino
if np.sum(self.current_image[new_coordinat_y - self.amino_move, new_coordinat_x]) == 0:
amino = True
#Then check if it is connected or not
if np.sum(self.current_image[new_coordinat_y - half_line_length * 3, new_coordinat_x]) == 255:
free_energy = 0
else:
free_energy = -1
#Check if above neighbour exist hydrophillic amino
elif np.sum(self.current_image[new_coordinat_y - self.amino_move, new_coordinat_x]) == 765:
amino = True
free_energy = 0
#Check if above neighbour exist nothing
elif np.sum(self.current_image[new_coordinat_y - self.amino_move, new_coordinat_x]) == 330:
amino = False
free_energy = 0
return free_energy, amino
def check_Left_Neighbour(self, new_coordinat_x, new_coordinat_y):
half_line_length = int(0.5 * self.line_length)
#Check if left neighbour exist hydrophobic amino
if np.sum(self.current_image[new_coordinat_y, new_coordinat_x - self.amino_move]) == 0:
amino = True
#Then check if it is connected or not
if np.sum(self.current_image[new_coordinat_y, new_coordinat_x - half_line_length * 3]) == 255:
free_energy = 0
else:
free_energy = -1
#Check if left neighbour exist hydrophillic amino
elif np.sum(self.current_image[new_coordinat_y, new_coordinat_x - self.amino_move]) == 765:
amino = True
free_energy = 0
#Check if left neighbour exist nothing
elif np.sum(self.current_image[new_coordinat_y, new_coordinat_x - self.amino_move]) == 330:
amino = False
free_energy = 0
return free_energy, amino
def check_Right_Neighbour(self, new_coordinat_x, new_coordinat_y):
half_line_length = int(0.5 * self.line_length)
#Check if right neighbour exist hydrophobic amino
if np.sum(self.current_image[new_coordinat_y, new_coordinat_x + self.amino_move]) == 0:
amino = True
#Then check if it is connected or not
if np.sum(self.current_image[new_coordinat_y, new_coordinat_x + half_line_length * 3]) == 255:
free_energy = 0
else:
free_energy = -1
#Check if right neighbour exist hydrophillic amino
elif np.sum(self.current_image[new_coordinat_y, new_coordinat_x + self.amino_move]) == 765:
amino = True
free_energy = 0
#Check if right neighbour exist nothing
elif np.sum(self.current_image[new_coordinat_y, new_coordinat_x + self.amino_move]) == 330:
amino = False
free_energy = 0
return free_energy, amino
def check_Below_Neighbour(self, new_coordinat_x, new_coordinat_y):
half_line_length = int(0.5 * self.line_length)
#Check if below neighbour exist hydrophobic amino
if np.sum(self.current_image[new_coordinat_y + self.amino_move, new_coordinat_x]) == 0:
amino = | |
<gh_stars>0
"""
THIS CODE IS AUTO-GENERATED. DO NOT EDIT.
GL ES 2.0 API based on the Angle library (i.e. DirectX)
"""
import ctypes
from .angle import _lib
_lib.glActiveTexture.argtypes = ctypes.c_uint,
# void = glActiveTexture(GLenum texture)
def glActiveTexture(texture):
_lib.glActiveTexture(texture)
_lib.glAttachShader.argtypes = ctypes.c_uint, ctypes.c_uint,
# void = glAttachShader(GLuint program, GLuint shader)
def glAttachShader(program, shader):
_lib.glAttachShader(program, shader)
_lib.glBindAttribLocation.argtypes = ctypes.c_uint, ctypes.c_uint, ctypes.c_char_p,
# void = glBindAttribLocation(GLuint program, GLuint index, GLchar* name)
def glBindAttribLocation(program, index, name):
name = ctypes.c_char_p(name.encode('utf-8'))
res = _lib.glBindAttribLocation(program, index, name)
_lib.glBindBuffer.argtypes = ctypes.c_uint, ctypes.c_uint,
# void = glBindBuffer(GLenum target, GLuint buffer)
def glBindBuffer(target, buffer):
_lib.glBindBuffer(target, buffer)
_lib.glBindFramebuffer.argtypes = ctypes.c_uint, ctypes.c_uint,
# void = glBindFramebuffer(GLenum target, GLuint framebuffer)
def glBindFramebuffer(target, framebuffer):
_lib.glBindFramebuffer(target, framebuffer)
_lib.glBindRenderbuffer.argtypes = ctypes.c_uint, ctypes.c_uint,
# void = glBindRenderbuffer(GLenum target, GLuint renderbuffer)
def glBindRenderbuffer(target, renderbuffer):
_lib.glBindRenderbuffer(target, renderbuffer)
_lib.glBindTexture.argtypes = ctypes.c_uint, ctypes.c_uint,
# void = glBindTexture(GLenum target, GLuint texture)
def glBindTexture(target, texture):
_lib.glBindTexture(target, texture)
_lib.glBlendColor.argtypes = ctypes.c_float, ctypes.c_float, ctypes.c_float, ctypes.c_float,
# void = glBlendColor(GLclampf red, GLclampf green, GLclampf blue, GLclampf alpha)
def glBlendColor(red, green, blue, alpha):
_lib.glBlendColor(red, green, blue, alpha)
_lib.glBlendEquation.argtypes = ctypes.c_uint,
# void = glBlendEquation(GLenum mode)
def glBlendEquation(mode):
_lib.glBlendEquation(mode)
_lib.glBlendEquationSeparate.argtypes = ctypes.c_uint, ctypes.c_uint,
# void = glBlendEquationSeparate(GLenum modeRGB, GLenum modeAlpha)
def glBlendEquationSeparate(modeRGB, modeAlpha):
_lib.glBlendEquationSeparate(modeRGB, modeAlpha)
_lib.glBlendFunc.argtypes = ctypes.c_uint, ctypes.c_uint,
# void = glBlendFunc(GLenum sfactor, GLenum dfactor)
def glBlendFunc(sfactor, dfactor):
_lib.glBlendFunc(sfactor, dfactor)
_lib.glBlendFuncSeparate.argtypes = ctypes.c_uint, ctypes.c_uint, ctypes.c_uint, ctypes.c_uint,
# void = glBlendFuncSeparate(GLenum srcRGB, GLenum dstRGB, GLenum srcAlpha, GLenum dstAlpha)
def glBlendFuncSeparate(srcRGB, dstRGB, srcAlpha, dstAlpha):
_lib.glBlendFuncSeparate(srcRGB, dstRGB, srcAlpha, dstAlpha)
_lib.glBufferData.argtypes = ctypes.c_uint, ctypes.c_int, ctypes.c_void_p, ctypes.c_uint,
# void = glBufferData(GLenum target, GLsizeiptr size, GLvoid* data, GLenum usage)
def glBufferData(target, data, usage):
""" Data can be numpy array or the size of data to allocate.
"""
if isinstance(data, int):
size = data
data = ctypes.c_voidp(0)
else:
if not data.flags['C_CONTIGUOUS'] or not data.flags['ALIGNED']:
data = data.copy('C')
data_ = data
size = data_.nbytes
data = data_.ctypes.data
res = _lib.glBufferData(target, size, data, usage)
_lib.glBufferSubData.argtypes = ctypes.c_uint, ctypes.c_int, ctypes.c_int, ctypes.c_void_p,
# void = glBufferSubData(GLenum target, GLintptr offset, GLsizeiptr size, GLvoid* data)
def glBufferSubData(target, offset, data):
if not data.flags['C_CONTIGUOUS']:
data = data.copy('C')
data_ = data
size = data_.nbytes
data = data_.ctypes.data
res = _lib.glBufferSubData(target, offset, size, data)
_lib.glCheckFramebufferStatus.argtypes = ctypes.c_uint,
_lib.glCheckFramebufferStatus.restype = ctypes.c_uint
# GLenum = glCheckFramebufferStatus(GLenum target)
def glCheckFramebufferStatus(target):
return _lib.glCheckFramebufferStatus(target)
_lib.glClear.argtypes = ctypes.c_uint,
# void = glClear(GLbitfield mask)
def glClear(mask):
_lib.glClear(mask)
_lib.glClearColor.argtypes = ctypes.c_float, ctypes.c_float, ctypes.c_float, ctypes.c_float,
# void = glClearColor(GLclampf red, GLclampf green, GLclampf blue, GLclampf alpha)
def glClearColor(red, green, blue, alpha):
_lib.glClearColor(red, green, blue, alpha)
_lib.glClearDepthf.argtypes = ctypes.c_float,
# void = glClearDepthf(GLclampf depth)
def glClearDepth(depth):
_lib.glClearDepthf(depth)
_lib.glClearStencil.argtypes = ctypes.c_int,
# void = glClearStencil(GLint s)
def glClearStencil(s):
_lib.glClearStencil(s)
_lib.glColorMask.argtypes = ctypes.c_bool, ctypes.c_bool, ctypes.c_bool, ctypes.c_bool,
# void = glColorMask(GLboolean red, GLboolean green, GLboolean blue, GLboolean alpha)
def glColorMask(red, green, blue, alpha):
_lib.glColorMask(red, green, blue, alpha)
_lib.glCompileShader.argtypes = ctypes.c_uint,
# void = glCompileShader(GLuint shader)
def glCompileShader(shader):
_lib.glCompileShader(shader)
_lib.glCompressedTexImage2D.argtypes = ctypes.c_uint, ctypes.c_int, ctypes.c_uint, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_void_p,
# void = glCompressedTexImage2D(GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLsizei imageSize, GLvoid* data)
def glCompressedTexImage2D(target, level, internalformat, width, height, border, data):
# border = 0 # set in args
if not data.flags['C_CONTIGUOUS']:
data = data.copy('C')
data_ = data
size = data_.size
data = data_.ctypes.data
res = _lib.glCompressedTexImage2D(target, level, internalformat, width, height, border, imageSize, data)
_lib.glCompressedTexSubImage2D.argtypes = ctypes.c_uint, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_uint, ctypes.c_int, ctypes.c_void_p,
# void = glCompressedTexSubImage2D(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, GLvoid* data)
def glCompressedTexSubImage2D(target, level, xoffset, yoffset, width, height, format, data):
if not data.flags['C_CONTIGUOUS']:
data = data.copy('C')
data_ = data
size = data_.size
data = data_.ctypes.data
res = _lib.glCompressedTexSubImage2D(target, level, xoffset, yoffset, width, height, format, imageSize, data)
_lib.glCopyTexImage2D.argtypes = ctypes.c_uint, ctypes.c_int, ctypes.c_uint, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int,
# void = glCopyTexImage2D(GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLsizei height, GLint border)
def glCopyTexImage2D(target, level, internalformat, x, y, width, height, border):
_lib.glCopyTexImage2D(target, level, internalformat, x, y, width, height, border)
_lib.glCopyTexSubImage2D.argtypes = ctypes.c_uint, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int,
# void = glCopyTexSubImage2D(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height)
def glCopyTexSubImage2D(target, level, xoffset, yoffset, x, y, width, height):
_lib.glCopyTexSubImage2D(target, level, xoffset, yoffset, x, y, width, height)
_lib.glCreateProgram.argtypes = ()
_lib.glCreateProgram.restype = ctypes.c_uint
# GLuint = glCreateProgram()
def glCreateProgram():
return _lib.glCreateProgram()
_lib.glCreateShader.argtypes = ctypes.c_uint,
_lib.glCreateShader.restype = ctypes.c_uint
# GLuint = glCreateShader(GLenum type)
def glCreateShader(type):
return _lib.glCreateShader(type)
_lib.glCullFace.argtypes = ctypes.c_uint,
# void = glCullFace(GLenum mode)
def glCullFace(mode):
_lib.glCullFace(mode)
_lib.glDeleteBuffers.argtypes = ctypes.c_int, ctypes.POINTER(ctypes.c_uint),
# void = glDeleteBuffers(GLsizei n, GLuint* buffers)
def glDeleteBuffer(buffer):
n = 1
buffers = (ctypes.c_uint*n)(buffer)
res = _lib.glDeleteBuffers(n, buffers)
_lib.glDeleteFramebuffers.argtypes = ctypes.c_int, ctypes.POINTER(ctypes.c_uint),
# void = glDeleteFramebuffers(GLsizei n, GLuint* framebuffers)
def glDeleteFramebuffer(framebuffer):
n = 1
framebuffers = (ctypes.c_uint*n)(framebuffer)
res = _lib.glDeleteFramebuffers(n, framebuffers)
_lib.glDeleteProgram.argtypes = ctypes.c_uint,
# void = glDeleteProgram(GLuint program)
def glDeleteProgram(program):
_lib.glDeleteProgram(program)
_lib.glDeleteRenderbuffers.argtypes = ctypes.c_int, ctypes.POINTER(ctypes.c_uint),
# void = glDeleteRenderbuffers(GLsizei n, GLuint* renderbuffers)
def glDeleteRenderbuffer(renderbuffer):
n = 1
renderbuffers = (ctypes.c_uint*n)(renderbuffer)
res = _lib.glDeleteRenderbuffers(n, renderbuffers)
_lib.glDeleteShader.argtypes = ctypes.c_uint,
# void = glDeleteShader(GLuint shader)
def glDeleteShader(shader):
_lib.glDeleteShader(shader)
_lib.glDeleteTextures.argtypes = ctypes.c_int, ctypes.POINTER(ctypes.c_uint),
# void = glDeleteTextures(GLsizei n, GLuint* textures)
def glDeleteTexture(texture):
n = 1
textures = (ctypes.c_uint*n)(texture)
res = _lib.glDeleteTextures(n, textures)
_lib.glDepthFunc.argtypes = ctypes.c_uint,
# void = glDepthFunc(GLenum func)
def glDepthFunc(func):
_lib.glDepthFunc(func)
_lib.glDepthMask.argtypes = ctypes.c_bool,
# void = glDepthMask(GLboolean flag)
def glDepthMask(flag):
_lib.glDepthMask(flag)
_lib.glDepthRangef.argtypes = ctypes.c_float, ctypes.c_float,
# void = glDepthRangef(GLclampf zNear, GLclampf zFar)
def glDepthRange(zNear, zFar):
_lib.glDepthRangef(zNear, zFar)
_lib.glDetachShader.argtypes = ctypes.c_uint, ctypes.c_uint,
# void = glDetachShader(GLuint program, GLuint shader)
def glDetachShader(program, shader):
_lib.glDetachShader(program, shader)
_lib.glDisable.argtypes = ctypes.c_uint,
# void = glDisable(GLenum cap)
def glDisable(cap):
_lib.glDisable(cap)
_lib.glDisableVertexAttribArray.argtypes = ctypes.c_uint,
# void = glDisableVertexAttribArray(GLuint index)
def glDisableVertexAttribArray(index):
_lib.glDisableVertexAttribArray(index)
_lib.glDrawArrays.argtypes = ctypes.c_uint, ctypes.c_int, ctypes.c_int,
# void = glDrawArrays(GLenum mode, GLint first, GLsizei count)
def glDrawArrays(mode, first, count):
_lib.glDrawArrays(mode, first, count)
_lib.glDrawElements.argtypes = ctypes.c_uint, ctypes.c_int, ctypes.c_uint, ctypes.c_void_p,
# void = glDrawElements(GLenum mode, GLsizei count, GLenum type, GLvoid* indices)
def glDrawElements(mode, count, type, offset):
if offset is None:
offset = ctypes.c_void_p(0)
elif isinstance(offset, ctypes.c_void_p):
pass
elif isinstance(offset, (int, ctypes.c_int)):
offset = ctypes.c_void_p(int(offset))
else:
if not offset.flags['C_CONTIGUOUS']:
offset = offset.copy('C')
offset_ = offset
offset = offset.ctypes.data
indices = offset
res = _lib.glDrawElements(mode, count, type, indices)
_lib.glEnable.argtypes = ctypes.c_uint,
# void = glEnable(GLenum cap)
def glEnable(cap):
_lib.glEnable(cap)
_lib.glEnableVertexAttribArray.argtypes = ctypes.c_uint,
# void = glEnableVertexAttribArray(GLuint index)
def glEnableVertexAttribArray(index):
_lib.glEnableVertexAttribArray(index)
_lib.glFinish.argtypes = ()
# void = glFinish()
def glFinish():
_lib.glFinish()
_lib.glFlush.argtypes = ()
# void = glFlush()
def glFlush():
_lib.glFlush()
_lib.glFramebufferRenderbuffer.argtypes = ctypes.c_uint, ctypes.c_uint, ctypes.c_uint, ctypes.c_uint,
# void = glFramebufferRenderbuffer(GLenum target, GLenum attachment, GLenum renderbuffertarget, GLuint renderbuffer)
def glFramebufferRenderbuffer(target, attachment, renderbuffertarget, renderbuffer):
_lib.glFramebufferRenderbuffer(target, attachment, renderbuffertarget, renderbuffer)
_lib.glFramebufferTexture2D.argtypes = ctypes.c_uint, ctypes.c_uint, ctypes.c_uint, ctypes.c_uint, ctypes.c_int,
# void = glFramebufferTexture2D(GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level)
def glFramebufferTexture2D(target, attachment, textarget, texture, level):
_lib.glFramebufferTexture2D(target, attachment, textarget, texture, level)
_lib.glFrontFace.argtypes = ctypes.c_uint,
# void = glFrontFace(GLenum mode)
def glFrontFace(mode):
_lib.glFrontFace(mode)
_lib.glGenBuffers.argtypes = ctypes.c_int, ctypes.POINTER(ctypes.c_uint),
# void = glGenBuffers(GLsizei n, GLuint* buffers)
def glCreateBuffer():
n = 1
buffers = (ctypes.c_uint*n)()
res = _lib.glGenBuffers(n, buffers)
return buffers[0]
_lib.glGenFramebuffers.argtypes = ctypes.c_int, ctypes.POINTER(ctypes.c_uint),
# void = glGenFramebuffers(GLsizei n, GLuint* framebuffers)
def glCreateFramebuffer():
n = 1
framebuffers = (ctypes.c_uint*n)()
res = _lib.glGenFramebuffers(n, framebuffers)
return framebuffers[0]
_lib.glGenRenderbuffers.argtypes = ctypes.c_int, ctypes.POINTER(ctypes.c_uint),
# void = glGenRenderbuffers(GLsizei n, GLuint* renderbuffers)
def glCreateRenderbuffer():
n = 1
renderbuffers = (ctypes.c_uint*n)()
res = _lib.glGenRenderbuffers(n, renderbuffers)
return renderbuffers[0]
_lib.glGenTextures.argtypes = ctypes.c_int, ctypes.POINTER(ctypes.c_uint),
# void = glGenTextures(GLsizei n, GLuint* textures)
def glCreateTexture():
n = 1
textures = (ctypes.c_uint*n)()
res = _lib.glGenTextures(n, textures)
return textures[0]
_lib.glGenerateMipmap.argtypes = ctypes.c_uint,
# void = glGenerateMipmap(GLenum target)
def glGenerateMipmap(target):
_lib.glGenerateMipmap(target)
_lib.glGetActiveAttrib.argtypes = ctypes.c_uint, ctypes.c_uint, ctypes.c_int, ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_uint), ctypes.c_char_p,
# void = glGetActiveAttrib(GLuint program, GLuint index, GLsizei bufsize, GLsizei* length, GLint* size, GLenum* type, GLchar* name)
def glGetActiveAttrib(program, index):
bufsize = 256
length = (ctypes.c_int*1)()
size = (ctypes.c_int*1)()
type = (ctypes.c_uint*1)()
name = ctypes.create_string_buffer(bufsize)
res = _lib.glGetActiveAttrib(program, index, bufsize, length, size, type, name)
name = name[:length[0]].decode('utf-8')
return name, size[0], type[0]
_lib.glGetActiveUniform.argtypes = ctypes.c_uint, ctypes.c_uint, ctypes.c_int, ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_uint), ctypes.c_char_p,
# void = glGetActiveUniform(GLuint program, GLuint index, GLsizei bufsize, GLsizei* length, GLint* size, GLenum* type, GLchar* name)
def glGetActiveUniform(program, index):
bufsize = 256
length = (ctypes.c_int*1)()
size = (ctypes.c_int*1)()
type = (ctypes.c_uint*1)()
name = ctypes.create_string_buffer(bufsize)
res = _lib.glGetActiveUniform(program, index, bufsize, length, size, type, name)
name = name[:length[0]].decode('utf-8')
return name, size[0], type[0]
_lib.glGetAttachedShaders.argtypes = ctypes.c_uint, ctypes.c_int, ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_uint),
# void = glGetAttachedShaders(GLuint program, GLsizei maxcount, GLsizei* count, GLuint* shaders)
def glGetAttachedShaders(program):
maxcount = 256
count = (ctypes.c_int*1)()
shaders = (ctypes.c_uint*maxcount)()
res = _lib.glGetAttachedShaders(program, maxcount, count, shaders)
return tuple(shaders[:count[0]])
_lib.glGetAttribLocation.argtypes = ctypes.c_uint, ctypes.c_char_p,
_lib.glGetAttribLocation.restype = ctypes.c_int
# GLint = glGetAttribLocation(GLuint program, GLchar* name)
def glGetAttribLocation(program, name):
name = ctypes.c_char_p(name.encode('utf-8'))
res = _lib.glGetAttribLocation(program, name)
return res
_lib.glGetBooleanv.argtypes = ctypes.c_uint, ctypes.POINTER(ctypes.c_bool),
# void = glGetBooleanv(GLenum pname, GLboolean* params)
def _glGetBooleanv(pname):
params = (ctypes.c_bool*1)()
res = _lib.glGetBooleanv(pname, params)
return params[0]
_lib.glGetBufferParameteriv.argtypes = ctypes.c_uint, ctypes.c_uint, ctypes.POINTER(ctypes.c_int),
# void = glGetBufferParameteriv(GLenum target, GLenum pname, GLint* params)
def glGetBufferParameter(target, pname):
d = -2**31 # smallest 32bit integer
params = (ctypes.c_int*1)(d)
res = _lib.glGetBufferParameteriv(target, pname, params)
return params[0]
_lib.glGetError.argtypes = ()
_lib.glGetError.restype = ctypes.c_uint
# GLenum = glGetError()
def glGetError():
| |
import random
from tyckiting_client import messages
from tyckiting_client import actions
from collections import Counter
class BaseAi:
def __init__(self, team_id, config=None):
"""
Initializes the AI, storing the configuration values as fields
Args:
team_id: Team identifier as an integer, shouldn't be needed
config: Dictionary of game parameters
"""
self.team_id = team_id
self.config = config #or {}
self.print_game_config()
self.jradar_initial_value = 1
# Calculate all the field points for later use
self.field_points = set(self.get_positions_in_range(x=0, y=0, radius=self.config.field_radius))
# Calculate all the radar points for later use
self.optimal_radar_points = set(self.get_positions_in_range(x=0, y=0, radius=self.config.field_radius-self.config.radar))
# Calculate minimum number of radars on the field (Lasse radar)
self.min_optimal_radars = self.optimal_radars_on_field(radar=self.config.radar)
# Field point radar values (Jarno radar)
self.jradar_values = Counter() # These are the values for all the field points
self.reset_jradar_field()
def reset_jradar_field(self):
for point in self.field_points:
#self.jradar_values.update({point: self.jradar_initial_value})
self.jradar_values[point] = self.jradar_initial_value
def increase_jradar_values(self):
for key, value in self.jradar_values.items():
self.jradar_values[key] = value + 1
# Calculate the sum of jradar values for given point
def calc_jradar_combination_value(self, x, y):
points = self.get_positions_in_range(x, y, self.config.radar)
sum = 0
lista = []
for point in points:
sum += self.jradar_values[point]
return sum
# This returns the biggest single coordinate jradar value
# Does not count the points around the coordinate
# This is just a debug method, don't use for real AI
def get_biggest_jradar(self):
biggest = None
for key, value in self.jradar_values.items():
if biggest is None or value > biggest[1]:
biggest = (key, value)
return biggest
# Return the biggest points
# Take into account the points around
def get_biggest_jradar_points(self):
biggest = []
for point in self.optimal_radar_points:
value = self.calc_jradar_combination_value(point.x, point.y)
# Add point if the value is equal
if len(biggest) == 0 or value == biggest[0][1]:
biggest.append((point, value))
# If the value is bigger clear the list and add the point
elif value > biggest[0][1]:
del biggest[:] # Clear the list
biggest.append((point, value))
return biggest
# If there are multiple points this will return one of them by random
# If only one, then it is returned
def get_single_biggest_jradar_points(self):
points = self.get_biggest_jradar_points()
if len(points) == 1:
return points[0]
elif len(points) >= 2:
return random.choice(points)
return (messages.Pos(0, 0), 0) # This should never happen
def reset_jradar(self, x, y):
points_to_reset = self.get_positions_in_range(x, y, self.config.radar)
for point in points_to_reset:
self.jradar_values[point] = self.jradar_initial_value
def print_game_config(self):
"""
'field_radius': 14,
'loop_time': 1000,
'cannon': 1,
'move': 2,
'max_count': 200,
'radar': 3, 'see': 2,
'bots': 3,
'start_hp': 10}
"""
if self.config is not None:
print "Game config"
print self.config.__dict__
else:
print "Could not print game config, because it is None"
def move(self, bots, events):
"""
Perform bot actions, based on events from last round.
This is the only method that needs to be implemented in custom AIs.
Args:
bots: List of bot states for own team
events: List of events form previous round
Returns:
List of actions to perform this round.
"""
raise NotImplementedError()
def get_valid_moves(self, bot):
return self.get_positions_in_range(x=bot.pos.x, y=bot.pos.y, radius=self.config.move)
def get_valid_moves_wo_cur_pos(self, bot):
return self.get_positions_in_range(x=bot.pos.x, y=bot.pos.y, radius=self.config.move)
# All moves with max distance (fails when close the field border)
def get_valid_edge_moves(self, bot):
return self.get_edge_positions_in_range(x=bot.pos.x, y=bot.pos.y, radius=self.config.move)
# All moves with max distance (field border fixed)
def get_valid_edge_moves_in_field(self, bot):
return self.get_edge_positions_in_range_in_field(x=bot.pos.x, y=bot.pos.y, radius=self.config.move, field_radius=self.config.field_radius)
def get_valid_cannons(self, bot):
return self.get_positions_in_range(x=0, y=0, radius=self.config.field_radius)
def get_valid_radars(self, bot):
return self.get_positions_in_range(x=0, y=0, radius=self.config.field_radius)
def get_valid_radars_optimal_wall(self, bot):
return self.optimal_radar_points
# Radar to given coordinate and update necessary stuff
def jradar(self, bot, x, y):
self.reset_jradar(x, y)
return self.radar(bot, x, y)
# radars: list of Pos where already radared
def get_valid_radars_optimal_wall_wo_overlap(self, radars):
field = self.optimal_radar_points
for r in radars:
# self.config.radar*2 to avoid overlap
dont_radar_here = self.get_positions_in_range(r.x, r.y, self.config.radar*2)
field = field - set(dont_radar_here)
return field
def get_positions_in_range(self, x=0, y=0, radius=1):
for dx in xrange(-radius, radius+1):
for dy in xrange(max(-radius, -dx-radius), min(radius, -dx+radius)+1):
yield messages.Pos(dx+x, dy+y)
def get_positions_in_range_wo_cur_pos(self, x=0, y=0, radius=1):
for dx in xrange(-radius, radius+1):
for dy in xrange(max(-radius, -dx-radius), min(radius, -dx+radius)+1):
if dx != 0 and dy != 0: # Force move somewhere
yield messages.Pos(dx+x, dy+y)
def get_edge_positions_in_range(self, x=0, y=0, radius=1):
return self.circle(x, y, radius)
def get_edge_positions_in_range_in_field(self, x=0, y=0, radius=1, field_radius=14):
return self.circle_on_field(x, y, radius, field_radius)
def east(self, x=0, y=0, n=1):
return messages.Pos(x+n, y)
def southeast(self, x=0, y=0, n=1):
return messages.Pos(x, y+n)
def southwest(self, x=0, y=0, n=1):
return messages.Pos(x-n, y+n)
def west(self, x=0, y=0, n=1):
return messages.Pos(x-n, y)
def northwest(self, x=0, y=0, n=1):
return messages.Pos(x, y-n)
def northeast(self, x=0, y=0, n=1):
return messages.Pos(x+n, y-n)
def pos_on_field(self, x=0, y=0, field_radius=14):
if messages.Pos(x,y) in self.field_points:
return True
return False
def circle(self, x=0, y=0, radius=1):
points = []
cur = self.east(x, y, radius) # Start point
for i in range(radius):
points.append(cur)
cur = self.southwest(cur.x, cur.y)
for i in range(radius):
points.append(cur)
cur = self.west(cur.x, cur.y)
for i in range(radius):
points.append(cur)
cur = self.northwest(cur.x, cur.y)
for i in range(radius):
points.append(cur)
cur = self.northeast(cur.x, cur.y)
for i in range(radius):
points.append(cur)
cur = self.east(cur.x, cur.y)
for i in range(radius):
points.append(cur)
cur = self.southeast(cur.x, cur.y)
return points
def circle_on_field(self, x=0, y=0, radius=1, field_radius=14):
points = []
cur = self.east(x, y, radius) # Start point
for i in range(radius):
if self.pos_on_field(cur.x, cur.y, field_radius):
points.append(cur)
cur = self.southwest(cur.x, cur.y)
for i in range(radius):
if self.pos_on_field(cur.x, cur.y, field_radius):
points.append(cur)
cur = self.west(cur.x, cur.y)
for i in range(radius):
if self.pos_on_field(cur.x, cur.y, field_radius):
points.append(cur)
cur = self.northwest(cur.x, cur.y)
for i in range(radius):
if self.pos_on_field(cur.x, cur.y, field_radius):
points.append(cur)
cur = self.northeast(cur.x, cur.y)
for i in range(radius):
if self.pos_on_field(cur.x, cur.y, field_radius):
points.append(cur)
cur = self.east(cur.x, cur.y)
for i in range(radius):
if self.pos_on_field(cur.x, cur.y, field_radius):
points.append(cur)
cur = self.southeast(cur.x, cur.y)
return points
def move_random_max(self, bot):
move_pos = random.choice(self.get_valid_edge_moves(bot))
return actions.Move(bot_id=bot.bot_id,
x=move_pos[0],
y=move_pos[1])
def move_random_max_in_field(self, bot):
move_pos = random.choice(self.get_valid_edge_moves_in_field(bot))
return actions.Move(bot_id=bot.bot_id,
x=move_pos[0],
y=move_pos[1])
def move_random(self, bot):
move_pos = random.choice(list(self.get_valid_moves(bot)))
return actions.Move(bot_id=bot.bot_id,
x=move_pos.x,
y=move_pos.y)
def move_random_force(self, bot):
move_pos = random.choice(list(self.get_valid_moves_wo_cur_pos(bot)))
return actions.Move(bot_id=bot.bot_id,
x=move_pos.x,
y=move_pos.y)
def move_bot(self, bot, x, y):
return actions.Move(bot_id=bot.bot_id,
x=x,
y=y)
def cannon_random(self, bot):
cannon_pos = random.choice(list(self.get_valid_cannons(bot)))
return actions.Cannon(bot_id=bot.bot_id,
x=cannon_pos.x,
y=cannon_pos.y)
def cannon(self, bot, x, y):
return actions.Cannon(bot_id=bot.bot_id,
x=x,
y=y)
def radar_random(self, bot):
radar_pos = random.choice(list(self.get_valid_radars(bot)))
return actions.Radar(bot_id=bot.bot_id,
x=radar_pos.x,
y=radar_pos.y)
def radar_random_optimal_wall(self, bot):
radar_pos = random.choice(list(self.get_valid_radars_optimal_wall(bot)))
return actions.Radar(bot_id=bot.bot_id,
x=radar_pos.x,
y=radar_pos.y)
def radar_random_optimal_wall_wo_overlap(self, bot, radars):
radar_pos = random.choice(list(self.get_valid_radars_optimal_wall_wo_overlap(radars)))
return actions.Radar(bot_id=bot.bot_id,
x=radar_pos.x,
y=radar_pos.y)
def radar(self, bot, x, y):
return actions.Radar(bot_id=bot.bot_id,
x=x,
y=y)
# Return triangle points for shooting
def triangle_points(self, x, y, radius=1):
points = []
# 2 different possibilities, random between them
choise = random.randint(1,2)
if choise == 1:
points.append(self.northeast(x, y, radius))
points.append(self.southeast(x, y, radius))
points.append(self.west(x, y, radius))
elif choise == 2:
points.append(self.northwest(x, y, radius))
points.append(self.southwest(x, y, radius))
points.append(self.east(x, y, radius))
else: # Fallback, should never happen, 3 same points to given coordinates
for i in range(0,3):
points.append(messages.Pos(x,y))
return points
def optimal_radar_first(self, x, y, radar=4):
point = self.northwest(x, y, radar+1)
point = self.northeast(point.x, point.y, radar)
return point
def optimal_radar_second(self, x, y, radar=4):
point = self.northeast(x, y, radar+1)
point = self.east(point.x, point.y, radar)
return point
def optimal_radar_third(self, x, y, radar=4):
point = self.east(x, y, radar+1)
point = self.southeast(point.x, point.y, radar)
return point
def optimal_radar_fourth(self, x, y, radar=4):
point = self.southeast(x, y, radar+1)
point = self.southwest(point.x, point.y, radar)
return point
def optimal_radar_fifth(self, x, y, radar=4):
point = self.southwest(x, y, radar+1)
point = self.west(point.x, point.y, radar)
return point
def optimal_radar_sixth(self, x, y, radar=4):
point = self.west(x, y, radar+1)
point = self.northwest(point.x, point.y, radar)
return point
# Returns the six optimal radar points around the given point
# radar = the radius of the radar
def optimal_radar_around(self, x, y, radar=4):
points = []
points.append(self.optimal_radar_first(x, y, radar))
points.append(self.optimal_radar_second(x, y, radar))
points.append(self.optimal_radar_third(x, y, radar))
points.append(self.optimal_radar_fourth(x, y, radar))
points.append(self.optimal_radar_fifth(x, y, radar))
points.append(self.optimal_radar_sixth(x, y, radar))
return points
def optimal_radars_on_field(self, radar=4):
# start from origo
points_to_check = set([messages.Pos(0,0)])
checked_points = set([])
looper = 0
while len(points_to_check) >= 1:
looper += 1
if looper >= 50:
break
p = points_to_check.pop()
new_points = set(self.optimal_radar_around(p.x, p.y, radar))
# new points must be in the field
new_points &= self.field_points
# new points cannot be already checked points
new_points -= checked_points
points_to_check |= new_points
checked_points.add(p)
return checked_points
def should_i_move(self, bot, limit):
| |
<reponame>BobDenny/alpyca-client
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# telescope - Implements ASCOM Alpaca Telescope device classes and enums
#
# Part of the Alpyca Client application interface package
#
# Author: <NAME> <<EMAIL>> (rbd)
# <NAME> <<EMAIL>>
#
# Python Compatibility: Requires Python 3.7 or later
# Doc Environment: Sphinx v4.5.0 with autodoc, autosummary, napoleon, and autoenum
# GitHub: https://github.com/BobDenny/alpyca-client
#
# -----------------------------------------------------------------------------
# MIT License
#
# Copyright (c) 2022 <NAME> and <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -----------------------------------------------------------------------------
# Edit History:
# 02-May-22 (rbd) Initial Edit
# -----------------------------------------------------------------------------
from datetime import datetime
from typing import List
import dateutil.parser
from alpaca.docenum import DocIntEnum
from alpaca.device import Device
from alpaca.exceptions import *
class AlignmentModes(DocIntEnum):
"""The geometry of the mount"""
algAltAz = 0, 'Altitude-Azimuth alignment'
algPolar = 1, 'Polar (equatorial) mount other than German equatorial.'
algGermanPolar = 2, 'German equatorial mount.'
class DriveRates(DocIntEnum):
"""Well-known telescope tracking rates"""
driveSidereal = 0, 'Sidereal tracking rate (15.041 arcseconds per second).'
drivelunar = 1, 'Lunar tracking rate (14.685 arcseconds per second).'
driveSolar = 2, 'Solar tracking rate (15.0 arcseconds per second).'
driveKing = 3, 'King tracking rate (15.0369 arcseconds per second).'
class EquatorialCoordinateType(DocIntEnum):
"""Equatorial coordinate systems used by telescopes."""
equOther = 0, 'Custom or unknown equinox and/or reference frame.'
equTopocentric = 1, 'Topocentric coordinates. Coordinates of the object at the current date having allowed for annual aberration, precession and nutation. This is the most common coordinate type for amateur telescopes.'
equJ2000 = 2, 'J2000 equator/equinox. Coordinates of the object at mid-day on 1st January 2000, ICRS reference frame.'
equJ2050 = 3, 'J2050 equator/equinox, ICRS reference frame.'
equB1950 = 4, 'B1950 equinox, FK4 reference frame.'
## equLocalTopocentric = 1 # OBSOLETE, use Topocentric
class GuideDirections(DocIntEnum): # Shared by Camera
"""The direction in which the guide-rate motion is to be made."""
guideNorth = 0, 'North (+ declination/altitude).'
guideSouth = 1, 'South (- declination/altitude).'
guideEast = 2, 'East (+ right ascension/azimuth).'
guideWest = 3, 'West (- right ascension/azimuth).'
class PierSide(DocIntEnum):
"""The pointing state of the mount"""
pierEast = 0, 'Normal pointing state - Mount on the East side of pier (looking West)'
pierWest = 1, 'Unknown or indeterminate.'
pierUnknown = -1, 'Through the pole pointing state - Mount on the West side of pier (looking East)'
class TelescopeAxes(DocIntEnum):
axisPrimary = 0, 'Primary axis (e.g., Right Ascension or Azimuth).'
axisSecondary = 1, 'Secondary axis (e.g., Declination or Altitude).'
axisTertiary = 2, 'Tertiary axis (e.g. imager rotator/de-rotator).'
class Rate:
"""Describes a range of rates supported by the :py:meth:`MoveAxis()` method"""
def __init__(
self,
maxv: float,
minv: float
):
self.maxv = maxv
self.minv = minv
@property
def Maximum(self) -> float:
"""The maximum rate (degrees per second)"""
return self.maxv
@property
def Minimum(self) -> float:
"""The minimum rate (degrees per second)"""
return self.minv
class Telescope(Device):
"""ASCOM Standard ITelescope V3 Interface"""
def __init__(
self,
address: str,
device_number: int,
protocol: str = "http"
):
"""Initialize the Telescope object.
Args:
address (str): IP address and port of the device (x.x.x.x:pppp)
device_number (int): The index of the device (usually 0)
protocol (str, optional): Only if device needs https. Defaults to "http".
Raises:
DriverException: An error occurred that is not described by
one of the more specific ASCOM exceptions.
The device did not *successfully* complete the request.
"""
super().__init__(address, "telescope", device_number, protocol)
@property
def AlignmentMode(self) -> AlignmentModes:
"""The current mount alignment mode.
Raises:
NotImplementedException: If the mount cannot report its alignment mode.
NotConnectedException: If the device is not connected
DriverException: An error occurred that is not described by
one of the more specific ASCOM exceptions.
The device did not *successfully* complete the request.
"""
return AlignmentModes(self._get("alignmentmode"))
@property
def Altitude(self) -> float:
"""The mount's current Altitude (degrees) above the horizon.
Raises:
NotImplementedException: Alt-Az not implemented by the device
NotConnectedException: If the device is not connected
DriverException: An error occurred that is not described by
one of the more specific ASCOM exceptions.
The device did not *successfully* complete the request.
"""
return self._get("altitude")
@property
def ApertureArea(self) -> float:
"""The telescope's aperture area (square meters).
Raises:
NotImplementedException:Not implemented by the device
NotConnectedException: If the device is not connected
DriverException: An error occurred that is not described by
one of the more specific ASCOM exceptions.
The device did not *successfully* complete the request.
Notes:
* The area takes into account any obstructions; it is the actual
light-gathering area.
"""
return self._get("aperturearea")
@property
def ApertureDiameter(self) -> float:
"""Return the telescope's effective aperture (meters).
Raises:
NotImplementedException: Alt-Az not implemented by the device
NotConnectedException: If the device is not connected
DriverException: An error occurred that is not described by
one of the more specific ASCOM exceptions.
The device did not *successfully* complete the request.
"""
return self._get("aperturediameter")
@property
def AtHome(self) -> bool:
"""The mount is at the home position.
Raises:
NotConnectedException: If the device is not connected
DriverException: An error occurred that is not described by
one of the more specific ASCOM exceptions.
The device did not *successfully* complete the request.
Notes:
* True if the telescope is stopped in the Home position. Can be True
only following a FindHome() operation.
* Will become False immediately upon any slewing operation
* Will always be False if the telescope does not support homing. Use
:py:attr:`CanFindHome` to determine if the mount supports homing.
* TODO [REVIEW] This should be the completion property for async
FindHomeAsync().
"""
return self._get("athome")
@property
def AtPark(self) -> bool:
"""The telescope is at the park position.
Raises:
NotConnectedException: If the device is not connected
DriverException: An error occurred that is not described by
one of the more specific ASCOM exceptions.
The device did not *successfully* complete the request.
Notes:
* True if the telescope is stopped in the Park position. Can be True
only following successful completion of a :py:meth:`Park()` operation.
* When parked, the telescope will be stationary or restricted to a small
safe range of movement. :py:attr:`Tracking` will be False.
* You must take the telescope out of park by calling :py:meth:`Unpark()`;
attempts to slew enabling tracking while parked will raise an exception.
* Will always be False if the telescope does not support parking. Use
:py:attr:`CanPark` to determine if the mount supports parking.
* TODO [REVIEW] This should be the completion property for async
ParkAsync(). I think we have established that Park is already
asynch? If so I will document that.
"""
return self._get("atpark")
@property
def Azimuth(self) -> float:
"""The azimuth (degrees) at which the telescope is currently pointing.
Raises:
NotImplementedException: Alt-Az not implemented by the device
NotConnectedException: If the device is not connected
DriverException: An error occurred that is not described by
one of the more specific ASCOM exceptions.
The device did not *successfully* complete the request.
Notes:
* Azimuth is per the usual alt/az coordinate convention: degrees
North-referenced, positive East/clockwise.
"""
return self._get("azimuth")
@property
def CanFindHome(self) -> bool:
"""The mount can find its home position.
Raises:
NotConnectedException: If the device is not connected
DriverException: An error occurred that is not described by
one of the more specific ASCOM exceptions.
The device did not *successfully* complete the request.
| |
import random
from math import sqrt
from enum import Enum, auto
import torch
from torch import nn
from torch.autograd import Function
from torch.nn import functional as F
from torch.nn import init
import constants
from logger import LOGGER
from inference.perspective import make_homography_kornia, warp_homography_kornia
def slerp(val, low, high):
"""
val, low, high: bs x frames x coordinates
if val == 0 then low
if val == 1 then high
"""
assert low.dim() == 3, low.dim()
assert val.dim() == 3, val.dim()
assert high.dim() == 3, high.dim()
low_norm = low / torch.norm(low, dim=2, keepdim=True)
high_norm = high / torch.norm(high, dim=2, keepdim=True)
omega = torch.acos((low_norm * high_norm).sum(dim=2)).unsqueeze(-1) # bs x frames x 1
so = torch.sin(omega)
res = (torch.sin((1.0 - val) * omega) / so) * low + (torch.sin(val * omega) / so) * high
return res
def bilinear_warp(images, flow, cycle_wrap=False, padding_mode='zeros'):
"""
Apply warping via bilinear resampling to given images
:param images: BatchSize x Channels x Height x Width - Images to warp
:param flow: BatchSize x 2 x Height x Width - Offsets in range (-1, 1) (flow)
:param cycle_wrap: Whether to append fragments moved out of view to another part of the image
:return:
"""
flow = flow[:, [1, 0]]
batch_size, channels, height, width = images.size()
height_coords = torch.linspace(-1, 1, height, device=flow.device)
width_coords = torch.linspace(-1, 1, width, device=flow.device)
src_grid = torch.cat([width_coords.unsqueeze(0).expand(height, width).unsqueeze(0),
height_coords.unsqueeze(1).expand(height, width).unsqueeze(0)],
dim=0).unsqueeze(0)
new_grids = src_grid + flow
if cycle_wrap:
new_grids = (new_grids <= 1).float() * new_grids + (new_grids > 1).float() * new_grids % 2
new_grids = (new_grids >= -1).float() * new_grids + (new_grids < -1).float() * new_grids % -2
new_grids = new_grids - 2 * (new_grids > 1).float() + 2 * (new_grids < -1).float()
return F.grid_sample(images, new_grids.permute(0, 2, 3, 1), padding_mode=padding_mode)
def frames2batch(tensor_or_list):
if isinstance(tensor_or_list, list):
return [frames2batch(t) for t in tensor_or_list]
if isinstance(tensor_or_list, tuple):
return tuple([frames2batch(t) for t in tensor_or_list])
else:
t = tensor_or_list
return t.reshape(t.shape[0] * t.shape[1], *t.shape[2:])
def batch2frames(tensor_or_list, batch_size, n_frames):
if isinstance(tensor_or_list, list):
return [batch2frames(t, batch_size, n_frames) for t in tensor_or_list]
elif isinstance(tensor_or_list, tuple):
return tuple([batch2frames(t, batch_size, n_frames) for t in tensor_or_list])
else:
t = tensor_or_list
return t.view(batch_size, n_frames, *t.shape[1:])
def init_linear(linear):
init.xavier_normal(linear.weight)
linear.bias.data.zero_()
def init_conv(conv, glu=True):
init.kaiming_normal(conv.weight)
if conv.bias is not None:
conv.bias.data.zero_()
class EqualLR:
def __init__(self, name):
self.name = name
def compute_weight(self, module):
weight = getattr(module, self.name + '_orig')
fan_in = weight.data.size(1) * weight.data[0][0].numel()
return weight * sqrt(2 / fan_in)
@staticmethod
def apply(module, name):
fn = EqualLR(name)
weight = getattr(module, name)
del module._parameters[name]
module.register_parameter(name + '_orig', nn.Parameter(weight.data))
module.register_forward_pre_hook(fn)
return fn
def __call__(self, module, input):
weight = self.compute_weight(module)
setattr(module, self.name, weight)
def equal_lr(module, name='weight'):
EqualLR.apply(module, name)
return module
class FusedUpsample(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, padding=0):
super().__init__()
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
weight = torch.randn(in_channel, out_channel, *kernel_size)
bias = torch.zeros(out_channel)
fan_in = in_channel * kernel_size[0] * kernel_size[1]
self.multiplier = sqrt(2 / fan_in)
self.weight = nn.Parameter(weight)
self.bias = nn.Parameter(bias)
self.pad = padding
def forward(self, input):
weight = F.pad(self.weight * self.multiplier, [1, 1, 1, 1])
weight = (
weight[:, :, 1:, 1:]
+ weight[:, :, :-1, 1:]
+ weight[:, :, 1:, :-1]
+ weight[:, :, :-1, :-1]
) / 4
out = F.conv_transpose2d(input, weight, self.bias, stride=2, padding=self.pad)
return out
class FusedDownsample(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, padding=0):
super().__init__()
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
weight = torch.randn(out_channel, in_channel, *kernel_size)
bias = torch.zeros(out_channel)
fan_in = in_channel * kernel_size[0] * kernel_size[1]
self.multiplier = sqrt(2 / fan_in)
self.weight = nn.Parameter(weight)
self.bias = nn.Parameter(bias)
self.pad = padding
def forward(self, input):
weight = F.pad(self.weight * self.multiplier, [1, 1, 1, 1])
weight = (
weight[:, :, 1:, 1:]
+ weight[:, :, :-1, 1:]
+ weight[:, :, 1:, :-1]
+ weight[:, :, :-1, :-1]
) / 4
out = F.conv2d(input, weight, self.bias, stride=2, padding=self.pad)
return out
class PixelNorm(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input):
return input / torch.sqrt(torch.mean(input ** 2, dim=1, keepdim=True) + 1e-8)
class BlurFunctionBackward(Function):
@staticmethod
def forward(ctx, grad_output, kernel, kernel_flip):
ctx.save_for_backward(kernel, kernel_flip)
grad_input = F.conv2d(
grad_output, kernel_flip, padding=1, groups=grad_output.shape[1]
)
return grad_input
@staticmethod
def backward(ctx, gradgrad_output):
kernel, kernel_flip = ctx.saved_tensors
grad_input = F.conv2d(
gradgrad_output, kernel, padding=1, groups=gradgrad_output.shape[1]
)
return grad_input, None, None
class BlurFunction(Function):
@staticmethod
def forward(ctx, input, kernel, kernel_flip):
ctx.save_for_backward(kernel, kernel_flip)
output = F.conv2d(input, kernel, padding=1, groups=input.shape[1])
return output
@staticmethod
def backward(ctx, grad_output):
kernel, kernel_flip = ctx.saved_tensors
grad_input = BlurFunctionBackward.apply(grad_output, kernel, kernel_flip)
return grad_input, None, None
blur = BlurFunction.apply
class Blur(nn.Module):
def __init__(self, channel):
super().__init__()
weight = torch.tensor([[1, 2, 1], [2, 4, 2], [1, 2, 1]], dtype=torch.float32)
weight = weight.view(1, 1, 3, 3)
weight = weight / weight.sum()
weight_flip = torch.flip(weight, [2, 3])
self.register_buffer('weight', weight.repeat(channel, 1, 1, 1))
self.register_buffer('weight_flip', weight_flip.repeat(channel, 1, 1, 1))
def forward(self, input):
return blur(input, self.weight, self.weight_flip)
# return F.conv2d(input, self.weight, padding=1, groups=input.shape[1])
class EqualConv2d(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
conv = nn.Conv2d(*args, **kwargs)
conv.weight.data.normal_()
conv.bias.data.zero_()
self.conv = equal_lr(conv)
def forward(self, input):
return self.conv(input)
class EqualLinear(nn.Module):
def __init__(self, in_dim, out_dim):
super().__init__()
linear = nn.Linear(in_dim, out_dim)
linear.weight.data.normal_()
linear.bias.data.zero_()
self.linear = equal_lr(linear)
def forward(self, input):
return self.linear(input)
class ConvBlock(nn.Module):
def __init__(
self,
in_channel,
out_channel,
kernel_size,
padding,
kernel_size2=None,
padding2=None,
downsample=False,
fused=False,
):
super().__init__()
pad1 = padding
pad2 = padding2 if padding2 is not None else padding
kernel1 = kernel_size
kernel2 = kernel_size2 if kernel_size2 is not None else kernel_size
self.conv1 = nn.Sequential(
EqualConv2d(in_channel, out_channel, kernel1, padding=pad1),
nn.LeakyReLU(0.2),
)
if downsample:
if fused:
self.conv2 = nn.Sequential(
Blur(out_channel),
FusedDownsample(out_channel, out_channel, kernel2, padding=pad2),
nn.LeakyReLU(0.2),
)
else:
self.conv2 = nn.Sequential(
Blur(out_channel),
EqualConv2d(out_channel, out_channel, kernel2, padding=pad2),
nn.AvgPool2d(2),
nn.LeakyReLU(0.2),
)
else:
self.conv2 = nn.Sequential(
EqualConv2d(out_channel, out_channel, kernel2, padding=pad2),
nn.LeakyReLU(0.2),
)
def forward(self, input):
out = self.conv1(input)
out = self.conv2(out)
return out
class AdaptiveInstanceNorm(nn.Module):
def __init__(self, in_channel, style_dim):
super().__init__()
self.norm = nn.InstanceNorm2d(in_channel)
self.style = EqualLinear(style_dim, in_channel * 2)
self.style.linear.bias.data[:in_channel] = 1
self.style.linear.bias.data[in_channel:] = 0
# monkey patch to optimize W'
self.fixed_style = None
def forward(self, input, style):
# monkey patch to optimize W'
if self.fixed_style is not None:
assert self.fixed_style[0].shape == style.shape, (self.fixed_style[0].shape, style.shape)
style = self.fixed_style[0]
style = self.style(style).unsqueeze(2).unsqueeze(3)
gamma, beta = style.chunk(2, 1)
out = self.norm(input)
out = gamma * out + beta
return out
class NoiseInjection(nn.Module):
def __init__(self, channel):
super().__init__()
self.weight = nn.Parameter(torch.zeros(1, channel, 1, 1))
def forward(self, image, noise):
added = self.weight * noise
return image + added
class ConstantInput(nn.Module):
def __init__(self, channel, size=4):
super().__init__()
self.input = nn.Parameter(torch.randn(1, channel, size, size))
def forward(self, input):
if isinstance(input, (list, tuple)):
batch = input[0].shape[0]
else:
batch = input.shape[0]
out = self.input.repeat(batch, 1, 1, 1)
return out
class StyledConvBlock(nn.Module):
def __init__(
self,
in_channel,
out_channel,
kernel_size=3,
padding=1,
style_dim=512,
initial=False,
upsample=False,
fused=False,
two_noises=False,
frames_channels=None
):
super().__init__()
self.two_noises = two_noises
if initial:
self.conv1 = ConstantInput(in_channel)
else:
if upsample:
if fused:
self.conv1 = nn.Sequential(
FusedUpsample(
in_channel, out_channel, kernel_size, padding=padding
),
Blur(out_channel),
)
else:
self.conv1 = nn.Sequential(
nn.Upsample(scale_factor=2, mode='nearest'),
EqualConv2d(
in_channel, out_channel, kernel_size, padding=padding
),
Blur(out_channel),
)
else:
self.conv1 = EqualConv2d(
in_channel, out_channel, kernel_size, padding=padding
)
self.noise1 = equal_lr(NoiseInjection(out_channel))
if self.two_noises:
self.noise12 = equal_lr(NoiseInjection(out_channel))
self.adain1 = AdaptiveInstanceNorm(out_channel, style_dim)
self.lrelu1 = nn.LeakyReLU(0.2)
self.conv2 = EqualConv2d(out_channel, out_channel, kernel_size, padding=padding)
self.noise2 = equal_lr(NoiseInjection(out_channel))
if self.two_noises:
self.noise22 = equal_lr(NoiseInjection(out_channel))
self.adain2 = AdaptiveInstanceNorm(out_channel, style_dim)
self.lrelu2 = nn.LeakyReLU(0.2)
def forward(self, input, style, noise):
if self.two_noises:
noise1, noise2 = noise
else:
noise1 = noise
if isinstance(style, tuple):
style1, style2 = style
else:
style1 = style2 = style
out = self.conv1(input)
out = self.noise1(out, noise1)
if self.two_noises:
out = self.noise12(out, noise2)
out = self.lrelu1(out)
out = self.adain1(out, style1)
out = self.conv2(out)
out = self.noise2(out, noise1)
if self.two_noises:
out = self.noise22(out, noise2)
out = self.lrelu2(out)
out = self.adain2(out, style2)
return out
class Generator(nn.Module):
def __init__(self, code_dim=512, fused=True, two_noises=False):
super().__init__()
channels = 512
progression = []
to_rgb = []
for i in range(9):
if i == 0: # 4
block = StyledConvBlock(
channels, channels, 3, 1, style_dim=code_dim, initial=True, two_noises=two_noises,
)
elif i <= 3: # 8 - 32
block = StyledConvBlock(
channels, channels, 3, 1, style_dim=code_dim, upsample=True, two_noises=two_noises,
)
elif i == 4: # 64
block = StyledConvBlock(
channels, channels // 2, 3, 1, style_dim=code_dim,
upsample=True, two_noises=two_noises,
)
channels //= 2
else: # 128 - 1024
block = StyledConvBlock(
channels, channels // 2, 3, 1, style_dim=code_dim, upsample=True,
fused=fused, two_noises=two_noises,
)
channels //= 2
progression.append(block)
to_rgb.append(EqualConv2d(channels, 3, 1))
self.progression = nn.ModuleList(progression)
self.to_rgb = | |
def wrapper(self, wrapper):
self._wrapper = wrapper
@property
def wrapped(self) -> "PayloadType":
return self._wrapped
@wrapped.setter
def wrapped(self, wrapped):
if isinstance(wrapped, PayloadType) or wrapped is None:
self._wrapped = wrapped
else:
self._wrapped_ = PayloadType(ptype=wrapped)
@property
def supported_os(self) -> str:
return self._supported_os
@supported_os.setter
def supported_os(self, supported_os):
self._supported_os = supported_os
@property
def last_heartbeat(self) -> str:
return self._last_heartbeat
@last_heartbeat.setter
def last_heartbeat(self, last_heartbeat):
self._last_heartbeat = last_heartbeat
@property
def container_running(self) -> bool:
return self._container_running
@container_running.setter
def container_running(self, container_running):
self._container_running = container_running
@property
def service(self) -> str:
return self._service
@service.setter
def service(self, service):
self._service = service
@property
def id(self) -> int:
return self._id
@id.setter
def id(self, id):
self._id = id
@property
def author(self) -> str:
return self._author
@author.setter
def author(self, author):
self._author = author
@property
def note(self) -> str:
return self._note
@note.setter
def note(self, note):
self._note = note
@property
def supports_dynamic_loading(self) -> bool:
return self._supports_dynamic_loading
@supports_dynamic_loading.setter
def supports_dynamic_loading(self, supports_dynamic_loading):
self._supports_dynamic_loading = supports_dynamic_loading
@property
def deleted(self) -> bool:
return self._deleted
@deleted.setter
def deleted(self, deleted):
self._deleted = deleted
@property
def build_parameters(self) -> List[Dict]:
return self._build_parameters
@build_parameters.setter
def build_parameters(self, build_parameters):
self._build_parameters = build_parameters
@property
def c2_profiles(self) -> List["C2Profile"]:
return self._c2_profiles
@c2_profiles.setter
def c2_profiles(self, c2_profiles):
if isinstance(c2_profiles, List):
self._c2_profiles = [
C2Profile(**x) if isinstance(x, Dict) else x for x in c2_profiles
]
else:
self._c2_profiles = c2_profiles
@property
def commands(self) -> List["Command"]:
return self._commands
@commands.setter
def commands(self, commands):
if isinstance(commands, List):
self._commands = [
Command(**x)
if isinstance(x, Dict)
else Command(cmd=x)
if isinstance(x, str)
else x
for x in commands
]
else:
self._commands = commands
class Command:
def __init__(
self,
needs_admin: bool = None,
help_cmd: str = None,
description: str = None,
cmd: str = None,
payload_type: Union[PayloadType, str] = None,
creation_time: str = None,
version: int = None,
is_exit: bool = None,
is_file_browse: bool = None,
is_process_list: bool = None,
is_download_file: bool = None,
is_remove_file: bool = None,
is_upload_file: bool = None,
author: str = None,
mythic_version: int = None,
deleted: bool = None,
id: int = None,
params: List[Union["CommandParameters", Dict[str, str]]] = None,
):
self._needs_admin = needs_admin
self._help_cmd = help_cmd
self._description = description
self._cmd = cmd
if isinstance(payload_type, PayloadType) or payload_type is None:
self._payload_type = payload_type
else:
self._payload_type = PayloadType(ptype=payload_type)
self._creation_time = creation_time
self._version = version
self._is_exit = is_exit
self._is_file_browse = is_file_browse
self._is_process_list = is_process_list
self._is_download_file = is_download_file
self._is_remove_file = is_remove_file
self._is_upload_file = is_upload_file
self._author = author
self._delted = deleted
self._mythic_version = mythic_version
self._id = id
if params is not None and params != []:
if isinstance(params, list):
self._params = [
CommandParameters(**x) if isinstance(x, Dict) else x for x in params
]
else:
raise ValueError("params must be a list")
else:
self._params = None
def to_json(self):
r = {}
for k in vars(self):
if getattr(self, k) is not None:
try:
r[k[1:]] = getattr(self, k)
except:
r[k[1:]] = json.dumps(
getattr(self, k), default=lambda o: o.to_json()
)
return r
def __str__(self):
return json.dumps(self.to_json())
def __eq__(self, other):
"""Overrides the default implementation"""
if isinstance(other, Command):
return (
self._cmd == other.cmd
and self._payload_type.ptype == other.payload_type.ptype
) or (
self._id is not None and other.id is not None and self._id == other.id
)
return False
@property
def needs_admin(self) -> bool:
return self._needs_admin
@needs_admin.setter
def needs_admin(self, needs_admin):
self._needs_admin = needs_admin
@property
def help_cmd(self) -> str:
return self._help_cmd
@help_cmd.setter
def help_cmd(self, help_cmd):
self._help_cmd = help_cmd
@property
def description(self) -> str:
return self._description
@description.setter
def description(self, description):
self._description = description
@property
def cmd(self) -> str:
return self._cmd
@cmd.setter
def cmd(self, cmd):
self._cmd = cmd
@property
def payload_type(self) -> PayloadType:
return self._payload_type
@payload_type.setter
def payload_type(self, payload_type):
if isinstance(payload_type, PayloadType) or payload_type is None:
self._payload_type = payload_type
else:
self._payload_type = PayloadType(ptype=payload_type)
@property
def creation_time(self) -> str:
return self._creation_time
@creation_time.setter
def creation_time(self, creation_time):
self._creation_time = creation_time
@property
def version(self) -> int:
return self._version
@version.setter
def version(self, version):
self._version = version
@property
def is_exit(self) -> bool:
return self._is_exit
@is_exit.setter
def is_exit(self, is_exit):
self._is_exit = is_exit
@property
def is_file_browse(self) -> bool:
return self._is_file_browse
@is_file_browse.setter
def is_file_browse(self, is_file_browse):
self._is_file_browse = is_file_browse
@property
def is_process_list(self) -> bool:
return self._is_process_list
@is_process_list.setter
def is_process_list(self, is_process_list):
self._is_process_list = is_process_list
@property
def is_download_file(self) -> bool:
return self._is_download_file
@is_download_file.setter
def is_download_file(self, is_download_file):
self._is_download_file = is_download_file
@property
def is_remove_file(self) -> bool:
return self._is_remove_file
@is_remove_file.setter
def is_remove_file(self, is_remove_file):
self._is_remove_file = is_remove_file
@property
def is_upload_file(self) -> bool:
return self._is_upload_file
@is_upload_file.setter
def is_upload_file(self, is_upload_file):
self._is_upload_file = is_upload_file
@property
def deleted(self) -> bool:
return self._deleted
@deleted.setter
def deleted(self, deleted):
self._deleted = deleted
@property
def author(self) -> str:
return self._author
@author.setter
def author(self, author):
self._author = author
@property
def mythic_version(self) -> int:
return self._mythic_version
@mythic_version.setter
def mythic_version(self, mythic_version):
self._mythic_version = mythic_version
@property
def id(self) -> int:
return self._id
@id.setter
def id(self, id):
self._id = id
@property
def params(self) -> List["CommandParameters"]:
return self._params
@params.setter
def params(self, params):
if isinstance(params, list):
self._params = [
CommandParameters(**x) if isinstance(x, Dict) else x for x in params
]
elif params is None or params == []:
self._params = None
else:
raise ValueError("params must be a list")
class CommandParameters:
def __init__(
self,
command: Union[
Command, int
] = None, # database ID for the corresponding command
cmd: str = None, # cmd string the command refers to (like shell)
payload_type: Union[PayloadType, str] = None,
name: str = None,
type: str = None,
default_value: str = None,
description: str = None,
supported_agents: str = None,
choices: Union[List[str], str] = None,
required: bool = None,
id: int = None,
):
if isinstance(command, Command) or command is None:
self._command = command
else:
self._command = Command(id=command)
self._cmd = cmd
if isinstance(payload_type, PayloadType) or payload_type is None:
self._payload_type = payload_type
else:
self._payload_type = PayloadType(ptype=payload_type)
self._name = name
self._type = type
self._description = description
self._supported_agents = supported_agents
self._default_value = default_value
if isinstance(choices, List) or choices is None:
self._choices = choices
else:
self._choices = choices.split("\n")
self._required = required
self._id = id
def to_json(self):
r = {}
for k in vars(self):
if getattr(self, k) is not None:
try:
r[k[1:]] = getattr(self, k)
except:
r[k[1:]] = json.dumps(
getattr(self, k), default=lambda o: o.to_json()
)
return r
def __str__(self):
return json.dumps(self.to_json())
def __eq__(self, other):
"""Overrides the default implementation"""
if isinstance(other, CommandParameters):
return (
self._name == other.name
and (self._command == other.command)
or (self._cmd == other.cmd)
) or (
self._id is not None and other.id is not None and self._id == other.id
)
return False
@property
def command(self) -> Command:
return self._command
@command.setter
def command(self, command):
if isinstance(command, Command) or command is None:
self._command = command
else:
self._command = Command(id=command)
@property
def name(self) -> str:
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def type(self) -> str:
return self._type
@type.setter
def type(self, type):
self._type = type
@property
def description(self) -> str:
return self._description
@description.setter
def description(self, description):
self._description = description
@property
def supported_agents(self) -> str:
return self._supported_agents
@supported_agents.setter
def supported_agents(self, supported_agents):
self._supported_agents = supported_agents
@property
def default_value(self) -> str:
return self._default_value
@default_value.setter
def default_value(self, default_value):
self._default_value = default_value
@property
def choices(self) -> List[str]:
return self._choices
@choices.setter
def choices(self, choices):
if isinstance(choices, List) or choices is None:
self._choices = choices
else:
self._choices = choices.split("\n")
@property
def required(self) -> bool:
return self._required
@required.setter
def required(self, required):
self._required = required
@property
def id(self) -> int:
return self._id
@id.setter
def id(self, id):
self._id = id
@property
def cmd(self) -> str:
return self._cmd
@cmd.setter
def cmd(self, cmd):
self._cmd = cmd
@property
def payload_type(self) -> PayloadType:
return self._payload_type
@payload_type.setter
def payload_type(self, payload_type):
if isinstance(payload_type, PayloadType) or payload_type is None:
self._payload_type = payload_type
else:
self._payload_type = PayloadType(ptype=payload_type)
class C2Profile:
def __init__(
self,
name: str = None,
description: str = None,
creation_time: str = None,
running: bool = None,
last_heartbeat: str = None,
container_running: bool = None,
author: str = None,
is_p2p: bool = None,
is_server_routed: bool = None,
mythic_encrypts: bool = None,
deleted: bool = None,
id: int = None,
ptype: List[Union[PayloadType, str]] = None,
parameters: Dict = None,
): # list of payload types that support this c2 profile
self._name = name
self._description = description
self._creation_time = creation_time
self._running = running
self._last_heartbeat = last_heartbeat
self._container_running = container_running
self._id | |
molecular entities when we click on the labels. It is
a list of objects and has None for characters such as spaces
"""
if not self.sequenceIndex.has_key(mol):
posy = None
else:
# delete all canvas items for that molecule
if not self.isVisible[mol]:
self.showSequence(mol)
self.canvas.delete(mol.molTag)
# compute y position for that molecule on canvas
molInd = self.ranksOfVisibleMolecules()
posy = molInd[mol]*self.rowScale*self.characterHeight
# set the 3 tuple used to draw this molecule
mol.sequenceLabels = [labels, numbers, objects]
self.addMolecule(mol, posy)
def showHideChain(self, chain, event):
mol = chain.top
cid = chain.id
if cid==' ': cid='?'
item = self.molNamesCanvas.find_withtag("%s&&%s"%(mol.molTag, cid))
if chain in self.ignoreChains[mol]:
self.ignoreChains[mol].remove(chain)
self.molNamesCanvas.itemconfig(item, fill=self.activeTextColor)
show = True
else:
self.ignoreChains[mol].append(chain)
self.molNamesCanvas.itemconfig(item, fill=self.inactiveTextColor)
show = False
# rebuild labels
mol.sequenceLabels = buildLabels(mol, self.ignoreChains[mol], self.userGaps[mol])
# delete items for this molecule
self.canvas.delete(mol.molTag)
# rebuild Tk items
molInd = self.ranksOfVisibleMolecules()
posy = molInd[mol]*self.rowScale*self.characterHeight
self.addMolecule(mol, posy=posy)
if show:
canvas = self.canvas
bcan = self.bcan
# redo selection
items = []
for res in chain.residues:
if self.selected.has_key(res):
items.append(self.resToTkid[res])
self.selectItems(items)
def handleDeleteAtoms(self, atoms):
for mol in atoms.top.uniq():
if not hasattr(mol, 'sequenceLabels'): continue
labl, numl, obj= mol.sequenceLabels
#print numl
#print labl
labl1, numl1, obj1 = buildLabels(mol , self.ignoreChains[mol], self.userGaps[mol])
#print "--------------------------"
#print numl1
#print labl1
if labl != labl1:
#rebuild the seq.string for this molecule in the viewer
self.canvas.delete(mol.molTag)
mol.sequenceLabels = (labl1, numl1, obj1)
# rebuild Tk items
molInd = self.ranksOfVisibleMolecules()
posy = molInd[mol]*self.rowScale*self.characterHeight
self.addMolecule(mol, posy=posy)
def addMolecule(self, molecule, posy=None, molInd=None):
if posy is None:
# when we add the molecule we create all items on the canvas but hid some
if self.sequenceIndex.has_key(molecule):
return # the molecule was already added
# add the molecule and increment the number of sequences
self.nbSequences += 1
if molInd is not None:
self.sequenceIndex[molecule] = molInd+1
self.sequenceOrder.insert(molInd, molecule)
self.lasty = molInd*self.rowScale*self.characterHeight
else:
self.sequenceIndex[molecule] = self.nbSequences
self.sequenceOrder.append(molecule)
self.ignoreChains[molecule] = []
self.isVisible[molecule] = True
self.userGaps[molecule] = {}# no gaps to start with
self.nbVisibleSequences += 1
posy1 = self.lasty
else:
posy1 = posy
if not hasattr(molecule, "sequenceLabels"):
labels = buildLabels(molecule)
molecule.sequenceLabels = labels
else:
labels = molecule.sequenceLabels
canvas = self.canvas
bcan = self.bcan
## now create all the canvas items
## we create letter(s) for each character in labels[1]. This is the numbers line
## we create letter(s) for each character in labels[0]. This is the sequence line
size = self.fontSize
posy2 = posy1 + self.characterHeight*.8
width = self.characterWidth
tkColFormat = '#%02X%02X%02X'
scids = self.scids
# we add '_' in front in case the moelcule name looks like an int which is not a legal tag
molName = molecule.molTag = '_'+molecule.name
if posy is None:
# create the molecule name
mcan = self.molNamesCanvas
cid = mcan.create_text( 10, posy1+2, text=molecule.name, anchor='nw',
font=(('courier'), size, 'bold'), fill=self.activeTextColor,
tags=(molName,'activeText'))
# add chain names
posx = 20
for c in molecule.chains:
if c in self.ignoreChains[molecule]:
color = self.inactiveTextColor
tag = 'inactiveText'
else:
color = self.activeTextColor
tag = 'activeText'
chainid = c.id
if chainid==' ': chainid='?'
cid = mcan.create_text( posx, posy2+3, text=chainid, anchor='nw',
font=(('courier'), size-2, ''), fill=color,
tags=(molName ,chainid, tag))
cb1 = CallbackFunction(self.jumpToChain, c)
mcan.tag_bind(cid, "<Button-1>", cb1)
cb3 = CallbackFunction(self.showHideChain, c)
mcan.tag_bind(cid, "<Button-3>", cb3)
posx += width
# create the letter for the number line
labs, numList, obj = labels
posx = 0
for num in numList:
tid = canvas.create_text(posx, posy1, text=num, anchor='nw',
font=(('courier'), size-2, 'bold'), fill=self.activeTextColor,
tags=(molName,'number', 'activeText'))
posx += len(num)*width
# create the letters for the sequence line
posx = 0
pady = self.pady
scids = self.scids
tkIdToObj = self.tkIdToObj
resToTkid = self.resToTkid
orientId = self.orientId
for lab, res in zip(labs, obj):
# find color or residue.
if res is None or isinstance(res, Chain): # non pickable charaters, i.e. mol name etc
textcol = self.activeTextColor
tag = 'activeText'
if lab=='-' or (isinstance(res, Chain) and res in self.ignoreChains[molecule]):
textcol = self.inactiveTextColor
tag = 'inactiveText'
else:
# For amino acids we take color of lines of CA atom
# if there is no CA we use gray50
## if res.hasCA:
## ca = res.get('CA')
## if len(ca)==0: # can happen when alternate position are available
## ca = res.get('CA@.')
## if len(ca)==0: # no CA found !
## col = (.5, .5, .5)
## else:
## col = ca.colors['lines'][0]
## else:
## col = ca.colors['lines'][0]
## else:
## at = res.get('C1*')
## if at:
## col = at[0].colors['lines']
## else:
## col = (.5, .5, .5)
if res.CAatom is not None:
col = res.CAatom.colors['lines']
elif res.C1atom is not None:
col = res.C1atom.colors['lines']
else:
col = (.5, .5, .5)
# create Tkcolors
# if the residue is selected, we color the letter black and create a box behind it
# with residue color
# else:
# we color the letter the same as the residue
textcol = tkColFormat%(col[0]*255,col[1]*255, col[2]*255)
backCol = tkColFormat%(col[0]*255,col[1]*255, col[2]*255)
tag = ''
# draw residue letter
tid = canvas.create_text(posx, posy2, text=lab, anchor='nw', tags=('letter',molName,tag),
font=(('courier'), size, 'bold'), fill=textcol)
# build lookup allowing us to get a residue from the letter canvas id
tkIdToObj[tid] = res
# build lookup allowing us to the letter's canvas id from for a residue
resToTkid[res] = tid
if isinstance(res, Residue):
# draw selection polygon, but hide it until the residue is selected
bb = canvas.bbox(tid)
scid = canvas.create_rectangle( bb[0], bb[1]+pady, bb[2], bb[3]-pady, fill=backCol,
outline='', tags=('bg', molName), state='hidden')
cbres = CallbackFunction(self.postResidueMenu, res, lab)
canvas.tag_bind(tid, "<Button-3>", cbres)
scids[tid] = scid
# draw box on scroll bar canvas
y0 = 4 + (self.sequenceIndex[molecule]-1)*6
oid = bcan.create_rectangle( bb[0]*self.ratio, y0, bb[2]*self.ratio, y0+4,
fill=backCol, outline='', tags=(molName, 'feedback'),
state='hidden')
orientId[tid] = oid
elif isinstance(res, Chain):
self.chainItems[res] = tid
#cb1 = CallbackFunction(self.jumpToChain, res)
#canvas.tag_bind(tid, "<Button-1>", cb1)
cb3 = CallbackFunction(self.showHideChain, res)
canvas.tag_bind(tid, "<Button-3>", cb3)
# increment the x position
posx += width*len(lab)
self.seqLength[molecule] = canvas.bbox(molName)[2]
# lower all boxes below the labels
canvas.tag_lower('bg')
self.updateTotalWidth()
self.resizeCanvases()
# configure scroll area
#self.configureScrollRegion()
# configure scroll bar
#self.updateScrollBar()
# resize paned widget
#self.panew.setnaturalsize()
def configureScrollRegion(self):
canvas = self.canvas
self.lasty = self.nbVisibleSequences*self.rowScale*self.characterHeight
# configure the scrolling area
bb = self.canvas.bbox('all')
if bb is None: return
maxx = bb[2]
canvas.configure(scrollregion=(0,0,maxx,self.lasty))
def selectItems(self, items):
canvas = self.canvas
bcan = self.bcan
scids = self.scids
orientId = self.orientId
residues = []
for tid in items:
if tid == self.currentSelectionBox:
continue # selection box
res = self.tkIdToObj.get(tid)
if res is None:
continue # not a residue
residues.append(res)
# change letter color to black
self.canvas.itemconfig(tid, fill='black')
# make polygons behind letters visible
canvas.itemconfig(scids[tid], state='normal')
# add 'selected' tag
canvas.addtag_withtag('selected', tid)
canvas.addtag_withtag('selected', scids[tid])
bcan.addtag_withtag('selected', orientId[tid])
# make polygons in scroll bar visible
bcan.itemconfig(orientId[tid], state='normal')
self.updateScrollBar()
return residues
def deselectItems(self, items):
canvas = self.canvas
bcan = self.bcan
scids = self.scids
orientId = self.orientId
tkColFormat = '#%02X%02X%02X'
residues = []
for tid in items:
if tid == self.currentSelectionBox:
continue # selection box
res = self.tkIdToObj.get(tid)
if res is None:
continue # not a residue
residues.append(res)
## if res.hasCA:
## ca = res.get('CA')
## if len(ca)==0: # can happen when alternate position are available
## ca = res.get('CA@.')
## if len(ca)==0: # no CA found !
## col = (.5, .5, .5)
## else:
## col = ca.colors['lines'][0]
## else:
## col = ca.colors['lines'][0]
## else:
## at = res.get('C1*')
## if at:
## col = at[0].colors['lines']
## else:
## col = (.5, .5, .5)
if res.CAatom is not None:
col = res.CAatom.colors['lines']
elif res.C1atom is not None:
col = res.C1atom.colors['lines']
else:
col = (.5, .5, .5)
tkcol = tkColFormat%(col[0]*255,col[1]*255, col[2]*255)
# change letter color to residue color
canvas.itemconfig(tid, fill=tkcol)
# make polygons behind letters visible
canvas.itemconfig(scids[tid], state='hidden')
# make polygons in scroll bar visible
bcan.itemconfig(orientId[tid], state='hidden')
# remove 'selected' tag
canvas.dtag(tid, 'selected')
canvas.dtag(scids[tid], 'selected')
bcan.dtag(orientId[tid], 'selected')
self.updateScrollBar()
return residues
def _showSequence(self, mol):
self.isVisible[mol] = True
self.nbVisibleSequences += 1
# show sequence and numbers (i.e. tag molecule but not bg
self.canvas.itemconfig("%s&&!bg"%mol.molTag, state='normal')
# show selected background poly and scroll bar items
self.canvas.itemconfig("%s&&selected"%mol.molTag, state='normal')
self.bcan.itemconfig("%s&&selected"%mol.molTag, state='normal')
# show molecule name
self.molNamesCanvas.itemconfig(mol.molTag, state='normal')
| |
from paddle.fluid.initializer import Constant
from paddle.fluid.param_attr import ParamAttr
import paddle.fluid as fluid
class Fdresnext():
def __init__(self):
pass
def net(self, x2paddle_input):
x2paddle_fdresnext_fc_bias = fluid.layers.create_parameter(dtype='float32', shape=[121],
name='x2paddle_fdresnext_fc_bias',
attr='x2paddle_fdresnext_fc_bias',
default_initializer=Constant(0.0))
x2paddle_fdresnext_fc_weight = fluid.layers.create_parameter(dtype='float32', shape=[121, 2048],
name='x2paddle_fdresnext_fc_weight',
attr='x2paddle_fdresnext_fc_weight',
default_initializer=Constant(0.0))
x2paddle_321 = fluid.layers.conv2d(x2paddle_input, num_filters=64, filter_size=[7, 7], stride=[2, 2],
padding=[3, 3], dilation=[1, 1], groups=1,
param_attr='x2paddle_fdresnext_conv1_weight', name='x2paddle_321',
bias_attr=False)
x2paddle_322 = fluid.layers.batch_norm(x2paddle_321, momentum=0.8999999761581421, epsilon=9.999999747378752e-06,
data_layout='NCHW', is_test=True,
param_attr='x2paddle_fdresnext_bn1_weight',
bias_attr='x2paddle_fdresnext_bn1_bias',
moving_mean_name='x2paddle_fdresnext_bn1_running_mean',
moving_variance_name='x2paddle_fdresnext_bn1_running_var',
use_global_stats=False, name='x2paddle_322')
x2paddle_323 = fluid.layers.relu(x2paddle_322, name='x2paddle_323')
x2paddle_324 = fluid.layers.pool2d(x2paddle_323, pool_size=[3, 3], pool_type='max', pool_stride=[2, 2],
pool_padding=[1, 1], ceil_mode=False, name='x2paddle_324', exclusive=False)
x2paddle_325 = fluid.layers.conv2d(x2paddle_324, num_filters=128, filter_size=[1, 1], stride=[1, 1],
padding=[0, 0], dilation=[1, 1], groups=1,
param_attr='x2paddle_fdresnext_layer1_0_conv1_weight', name='x2paddle_325',
bias_attr=False)
x2paddle_333 = fluid.layers.conv2d(x2paddle_324, num_filters=256, filter_size=[1, 1], stride=[1, 1],
padding=[0, 0], dilation=[1, 1], groups=1,
param_attr='x2paddle_fdresnext_layer1_0_downsample_0_weight',
name='x2paddle_333', bias_attr=False)
x2paddle_326 = fluid.layers.batch_norm(x2paddle_325, momentum=0.8999999761581421, epsilon=9.999999747378752e-06,
data_layout='NCHW', is_test=True,
param_attr='x2paddle_fdresnext_layer1_0_bn1_weight',
bias_attr='x2paddle_fdresnext_layer1_0_bn1_bias',
moving_mean_name='x2paddle_fdresnext_layer1_0_bn1_running_mean',
moving_variance_name='x2paddle_fdresnext_layer1_0_bn1_running_var',
use_global_stats=False, name='x2paddle_326')
x2paddle_334 = fluid.layers.batch_norm(x2paddle_333, momentum=0.8999999761581421, epsilon=9.999999747378752e-06,
data_layout='NCHW', is_test=True,
param_attr='x2paddle_fdresnext_layer1_0_downsample_1_weight',
bias_attr='x2paddle_fdresnext_layer1_0_downsample_1_bias',
moving_mean_name='x2paddle_fdresnext_layer1_0_downsample_1_running_mean',
moving_variance_name='x2paddle_fdresnext_layer1_0_downsample_1_running_var',
use_global_stats=False, name='x2paddle_334')
x2paddle_327 = fluid.layers.relu(x2paddle_326, name='x2paddle_327')
x2paddle_328 = fluid.layers.conv2d(x2paddle_327, num_filters=128, filter_size=[3, 3], stride=[1, 1],
padding=[1, 1], dilation=[1, 1], groups=32,
param_attr='x2paddle_fdresnext_layer1_0_conv2_weight', name='x2paddle_328',
bias_attr=False)
x2paddle_329 = fluid.layers.batch_norm(x2paddle_328, momentum=0.8999999761581421, epsilon=9.999999747378752e-06,
data_layout='NCHW', is_test=True,
param_attr='x2paddle_fdresnext_layer1_0_bn2_weight',
bias_attr='x2paddle_fdresnext_layer1_0_bn2_bias',
moving_mean_name='x2paddle_fdresnext_layer1_0_bn2_running_mean',
moving_variance_name='x2paddle_fdresnext_layer1_0_bn2_running_var',
use_global_stats=False, name='x2paddle_329')
x2paddle_330 = fluid.layers.relu(x2paddle_329, name='x2paddle_330')
x2paddle_331 = fluid.layers.conv2d(x2paddle_330, num_filters=256, filter_size=[1, 1], stride=[1, 1],
padding=[0, 0], dilation=[1, 1], groups=1,
param_attr='x2paddle_fdresnext_layer1_0_conv3_weight', name='x2paddle_331',
bias_attr=False)
x2paddle_332 = fluid.layers.batch_norm(x2paddle_331, momentum=0.8999999761581421, epsilon=9.999999747378752e-06,
data_layout='NCHW', is_test=True,
param_attr='x2paddle_fdresnext_layer1_0_bn3_weight',
bias_attr='x2paddle_fdresnext_layer1_0_bn3_bias',
moving_mean_name='x2paddle_fdresnext_layer1_0_bn3_running_mean',
moving_variance_name='x2paddle_fdresnext_layer1_0_bn3_running_var',
use_global_stats=False, name='x2paddle_332')
x2paddle_335 = fluid.layers.elementwise_add(x=x2paddle_332, y=x2paddle_334, name='x2paddle_335')
x2paddle_336 = fluid.layers.relu(x2paddle_335, name='x2paddle_336')
x2paddle_337 = fluid.layers.conv2d(x2paddle_336, num_filters=128, filter_size=[1, 1], stride=[1, 1],
padding=[0, 0], dilation=[1, 1], groups=1,
param_attr='x2paddle_fdresnext_layer1_1_conv1_weight', name='x2paddle_337',
bias_attr=False)
x2paddle_338 = fluid.layers.batch_norm(x2paddle_337, momentum=0.8999999761581421, epsilon=9.999999747378752e-06,
data_layout='NCHW', is_test=True,
param_attr='x2paddle_fdresnext_layer1_1_bn1_weight',
bias_attr='x2paddle_fdresnext_layer1_1_bn1_bias',
moving_mean_name='x2paddle_fdresnext_layer1_1_bn1_running_mean',
moving_variance_name='x2paddle_fdresnext_layer1_1_bn1_running_var',
use_global_stats=False, name='x2paddle_338')
x2paddle_339 = fluid.layers.relu(x2paddle_338, name='x2paddle_339')
x2paddle_340 = fluid.layers.conv2d(x2paddle_339, num_filters=128, filter_size=[3, 3], stride=[1, 1],
padding=[1, 1], dilation=[1, 1], groups=32,
param_attr='x2paddle_fdresnext_layer1_1_conv2_weight', name='x2paddle_340',
bias_attr=False)
x2paddle_341 = fluid.layers.batch_norm(x2paddle_340, momentum=0.8999999761581421, epsilon=9.999999747378752e-06,
data_layout='NCHW', is_test=True,
param_attr='x2paddle_fdresnext_layer1_1_bn2_weight',
bias_attr='x2paddle_fdresnext_layer1_1_bn2_bias',
moving_mean_name='x2paddle_fdresnext_layer1_1_bn2_running_mean',
moving_variance_name='x2paddle_fdresnext_layer1_1_bn2_running_var',
use_global_stats=False, name='x2paddle_341')
x2paddle_342 = fluid.layers.relu(x2paddle_341, name='x2paddle_342')
x2paddle_343 = fluid.layers.conv2d(x2paddle_342, num_filters=256, filter_size=[1, 1], stride=[1, 1],
padding=[0, 0], dilation=[1, 1], groups=1,
param_attr='x2paddle_fdresnext_layer1_1_conv3_weight', name='x2paddle_343',
bias_attr=False)
x2paddle_344 = fluid.layers.batch_norm(x2paddle_343, momentum=0.8999999761581421, epsilon=9.999999747378752e-06,
data_layout='NCHW', is_test=True,
param_attr='x2paddle_fdresnext_layer1_1_bn3_weight',
bias_attr='x2paddle_fdresnext_layer1_1_bn3_bias',
moving_mean_name='x2paddle_fdresnext_layer1_1_bn3_running_mean',
moving_variance_name='x2paddle_fdresnext_layer1_1_bn3_running_var',
use_global_stats=False, name='x2paddle_344')
x2paddle_345 = fluid.layers.elementwise_add(x=x2paddle_344, y=x2paddle_336, name='x2paddle_345')
x2paddle_346 = fluid.layers.relu(x2paddle_345, name='x2paddle_346')
x2paddle_347 = fluid.layers.conv2d(x2paddle_346, num_filters=128, filter_size=[1, 1], stride=[1, 1],
padding=[0, 0], dilation=[1, 1], groups=1,
param_attr='x2paddle_fdresnext_layer1_2_conv1_weight', name='x2paddle_347',
bias_attr=False)
x2paddle_348 = fluid.layers.batch_norm(x2paddle_347, momentum=0.8999999761581421, epsilon=9.999999747378752e-06,
data_layout='NCHW', is_test=True,
param_attr='x2paddle_fdresnext_layer1_2_bn1_weight',
bias_attr='x2paddle_fdresnext_layer1_2_bn1_bias',
moving_mean_name='x2paddle_fdresnext_layer1_2_bn1_running_mean',
moving_variance_name='x2paddle_fdresnext_layer1_2_bn1_running_var',
use_global_stats=False, name='x2paddle_348')
x2paddle_349 = fluid.layers.relu(x2paddle_348, name='x2paddle_349')
x2paddle_350 = fluid.layers.conv2d(x2paddle_349, num_filters=128, filter_size=[3, 3], stride=[1, 1],
padding=[1, 1], dilation=[1, 1], groups=32,
param_attr='x2paddle_fdresnext_layer1_2_conv2_weight', name='x2paddle_350',
bias_attr=False)
x2paddle_351 = fluid.layers.batch_norm(x2paddle_350, momentum=0.8999999761581421, epsilon=9.999999747378752e-06,
data_layout='NCHW', is_test=True,
param_attr='x2paddle_fdresnext_layer1_2_bn2_weight',
bias_attr='x2paddle_fdresnext_layer1_2_bn2_bias',
moving_mean_name='x2paddle_fdresnext_layer1_2_bn2_running_mean',
moving_variance_name='x2paddle_fdresnext_layer1_2_bn2_running_var',
use_global_stats=False, name='x2paddle_351')
x2paddle_352 = fluid.layers.relu(x2paddle_351, name='x2paddle_352')
x2paddle_353 = fluid.layers.conv2d(x2paddle_352, num_filters=256, filter_size=[1, 1], stride=[1, 1],
padding=[0, 0], dilation=[1, 1], groups=1,
param_attr='x2paddle_fdresnext_layer1_2_conv3_weight', name='x2paddle_353',
bias_attr=False)
x2paddle_354 = fluid.layers.batch_norm(x2paddle_353, momentum=0.8999999761581421, epsilon=9.999999747378752e-06,
data_layout='NCHW', is_test=True,
param_attr='x2paddle_fdresnext_layer1_2_bn3_weight',
bias_attr='x2paddle_fdresnext_layer1_2_bn3_bias',
moving_mean_name='x2paddle_fdresnext_layer1_2_bn3_running_mean',
moving_variance_name='x2paddle_fdresnext_layer1_2_bn3_running_var',
use_global_stats=False, name='x2paddle_354')
x2paddle_355 = fluid.layers.elementwise_add(x=x2paddle_354, y=x2paddle_346, name='x2paddle_355')
x2paddle_356 = fluid.layers.relu(x2paddle_355, name='x2paddle_356')
x2paddle_357 = fluid.layers.conv2d(x2paddle_356, num_filters=256, filter_size=[1, 1], stride=[1, 1],
padding=[0, 0], dilation=[1, 1], groups=1,
param_attr='x2paddle_fdresnext_layer2_0_conv1_weight', name='x2paddle_357',
bias_attr=False)
x2paddle_365 = fluid.layers.conv2d(x2paddle_356, num_filters=512, filter_size=[1, 1], stride=[2, 2],
padding=[0, 0], dilation=[1, 1], groups=1,
param_attr='x2paddle_fdresnext_layer2_0_downsample_0_weight',
name='x2paddle_365', bias_attr=False)
x2paddle_358 = fluid.layers.batch_norm(x2paddle_357, momentum=0.8999999761581421, epsilon=9.999999747378752e-06,
data_layout='NCHW', is_test=True,
param_attr='x2paddle_fdresnext_layer2_0_bn1_weight',
bias_attr='x2paddle_fdresnext_layer2_0_bn1_bias',
moving_mean_name='x2paddle_fdresnext_layer2_0_bn1_running_mean',
moving_variance_name='x2paddle_fdresnext_layer2_0_bn1_running_var',
use_global_stats=False, name='x2paddle_358')
x2paddle_366 = fluid.layers.batch_norm(x2paddle_365, momentum=0.8999999761581421, epsilon=9.999999747378752e-06,
data_layout='NCHW', is_test=True,
param_attr='x2paddle_fdresnext_layer2_0_downsample_1_weight',
bias_attr='x2paddle_fdresnext_layer2_0_downsample_1_bias',
moving_mean_name='x2paddle_fdresnext_layer2_0_downsample_1_running_mean',
moving_variance_name='x2paddle_fdresnext_layer2_0_downsample_1_running_var',
use_global_stats=False, name='x2paddle_366')
x2paddle_359 = fluid.layers.relu(x2paddle_358, name='x2paddle_359')
x2paddle_360 = fluid.layers.conv2d(x2paddle_359, num_filters=256, filter_size=[3, 3], stride=[2, 2],
padding=[1, 1], dilation=[1, 1], groups=32,
param_attr='x2paddle_fdresnext_layer2_0_conv2_weight', name='x2paddle_360',
bias_attr=False)
x2paddle_361 = fluid.layers.batch_norm(x2paddle_360, momentum=0.8999999761581421, epsilon=9.999999747378752e-06,
data_layout='NCHW', is_test=True,
param_attr='x2paddle_fdresnext_layer2_0_bn2_weight',
bias_attr='x2paddle_fdresnext_layer2_0_bn2_bias',
moving_mean_name='x2paddle_fdresnext_layer2_0_bn2_running_mean',
moving_variance_name='x2paddle_fdresnext_layer2_0_bn2_running_var',
use_global_stats=False, name='x2paddle_361')
x2paddle_362 = fluid.layers.relu(x2paddle_361, name='x2paddle_362')
x2paddle_363 = fluid.layers.conv2d(x2paddle_362, num_filters=512, filter_size=[1, 1], stride=[1, 1],
padding=[0, 0], dilation=[1, 1], groups=1,
param_attr='x2paddle_fdresnext_layer2_0_conv3_weight', name='x2paddle_363',
bias_attr=False)
x2paddle_364 = fluid.layers.batch_norm(x2paddle_363, momentum=0.8999999761581421, epsilon=9.999999747378752e-06,
data_layout='NCHW', is_test=True,
param_attr='x2paddle_fdresnext_layer2_0_bn3_weight',
bias_attr='x2paddle_fdresnext_layer2_0_bn3_bias',
moving_mean_name='x2paddle_fdresnext_layer2_0_bn3_running_mean',
moving_variance_name='x2paddle_fdresnext_layer2_0_bn3_running_var',
use_global_stats=False, name='x2paddle_364')
x2paddle_367 = fluid.layers.elementwise_add(x=x2paddle_364, y=x2paddle_366, name='x2paddle_367')
x2paddle_368 = fluid.layers.relu(x2paddle_367, name='x2paddle_368')
x2paddle_369 = fluid.layers.conv2d(x2paddle_368, num_filters=256, filter_size=[1, 1], stride=[1, 1],
padding=[0, 0], dilation=[1, 1], groups=1,
param_attr='x2paddle_fdresnext_layer2_1_conv1_weight', name='x2paddle_369',
bias_attr=False)
x2paddle_370 = fluid.layers.batch_norm(x2paddle_369, momentum=0.8999999761581421, epsilon=9.999999747378752e-06,
data_layout='NCHW', is_test=True,
param_attr='x2paddle_fdresnext_layer2_1_bn1_weight',
bias_attr='x2paddle_fdresnext_layer2_1_bn1_bias',
moving_mean_name='x2paddle_fdresnext_layer2_1_bn1_running_mean',
moving_variance_name='x2paddle_fdresnext_layer2_1_bn1_running_var',
use_global_stats=False, name='x2paddle_370')
x2paddle_371 = fluid.layers.relu(x2paddle_370, name='x2paddle_371')
x2paddle_372 = fluid.layers.conv2d(x2paddle_371, num_filters=256, filter_size=[3, 3], stride=[1, 1],
padding=[1, 1], dilation=[1, 1], groups=32,
param_attr='x2paddle_fdresnext_layer2_1_conv2_weight', name='x2paddle_372',
bias_attr=False)
x2paddle_373 = fluid.layers.batch_norm(x2paddle_372, momentum=0.8999999761581421, epsilon=9.999999747378752e-06,
data_layout='NCHW', is_test=True,
param_attr='x2paddle_fdresnext_layer2_1_bn2_weight',
bias_attr='x2paddle_fdresnext_layer2_1_bn2_bias',
moving_mean_name='x2paddle_fdresnext_layer2_1_bn2_running_mean',
moving_variance_name='x2paddle_fdresnext_layer2_1_bn2_running_var',
use_global_stats=False, name='x2paddle_373')
x2paddle_374 = fluid.layers.relu(x2paddle_373, name='x2paddle_374')
x2paddle_375 = fluid.layers.conv2d(x2paddle_374, num_filters=512, filter_size=[1, 1], stride=[1, 1],
padding=[0, 0], dilation=[1, 1], groups=1,
param_attr='x2paddle_fdresnext_layer2_1_conv3_weight', name='x2paddle_375',
bias_attr=False)
x2paddle_376 = fluid.layers.batch_norm(x2paddle_375, momentum=0.8999999761581421, epsilon=9.999999747378752e-06,
data_layout='NCHW', is_test=True,
param_attr='x2paddle_fdresnext_layer2_1_bn3_weight',
bias_attr='x2paddle_fdresnext_layer2_1_bn3_bias',
moving_mean_name='x2paddle_fdresnext_layer2_1_bn3_running_mean',
moving_variance_name='x2paddle_fdresnext_layer2_1_bn3_running_var',
use_global_stats=False, name='x2paddle_376')
x2paddle_377 = fluid.layers.elementwise_add(x=x2paddle_376, y=x2paddle_368, name='x2paddle_377')
x2paddle_378 = fluid.layers.relu(x2paddle_377, name='x2paddle_378')
x2paddle_379 = fluid.layers.conv2d(x2paddle_378, num_filters=256, filter_size=[1, 1], stride=[1, 1],
padding=[0, 0], dilation=[1, 1], groups=1,
param_attr='x2paddle_fdresnext_layer2_2_conv1_weight', name='x2paddle_379',
bias_attr=False)
x2paddle_380 = fluid.layers.batch_norm(x2paddle_379, momentum=0.8999999761581421, epsilon=9.999999747378752e-06,
data_layout='NCHW', is_test=True,
param_attr='x2paddle_fdresnext_layer2_2_bn1_weight',
bias_attr='x2paddle_fdresnext_layer2_2_bn1_bias',
moving_mean_name='x2paddle_fdresnext_layer2_2_bn1_running_mean',
moving_variance_name='x2paddle_fdresnext_layer2_2_bn1_running_var',
use_global_stats=False, name='x2paddle_380')
x2paddle_381 = fluid.layers.relu(x2paddle_380, name='x2paddle_381')
x2paddle_382 = fluid.layers.conv2d(x2paddle_381, num_filters=256, filter_size=[3, 3], stride=[1, 1],
padding=[1, 1], dilation=[1, 1], groups=32,
param_attr='x2paddle_fdresnext_layer2_2_conv2_weight', name='x2paddle_382',
bias_attr=False)
x2paddle_383 = fluid.layers.batch_norm(x2paddle_382, momentum=0.8999999761581421, epsilon=9.999999747378752e-06,
data_layout='NCHW', is_test=True,
param_attr='x2paddle_fdresnext_layer2_2_bn2_weight',
bias_attr='x2paddle_fdresnext_layer2_2_bn2_bias',
moving_mean_name='x2paddle_fdresnext_layer2_2_bn2_running_mean',
moving_variance_name='x2paddle_fdresnext_layer2_2_bn2_running_var',
use_global_stats=False, name='x2paddle_383')
x2paddle_384 = fluid.layers.relu(x2paddle_383, name='x2paddle_384')
x2paddle_385 = fluid.layers.conv2d(x2paddle_384, num_filters=512, filter_size=[1, 1], stride=[1, 1],
padding=[0, 0], dilation=[1, 1], groups=1,
param_attr='x2paddle_fdresnext_layer2_2_conv3_weight', name='x2paddle_385',
bias_attr=False)
x2paddle_386 = fluid.layers.batch_norm(x2paddle_385, momentum=0.8999999761581421, epsilon=9.999999747378752e-06,
data_layout='NCHW', is_test=True,
param_attr='x2paddle_fdresnext_layer2_2_bn3_weight',
bias_attr='x2paddle_fdresnext_layer2_2_bn3_bias',
moving_mean_name='x2paddle_fdresnext_layer2_2_bn3_running_mean',
moving_variance_name='x2paddle_fdresnext_layer2_2_bn3_running_var',
use_global_stats=False, name='x2paddle_386')
x2paddle_387 = fluid.layers.elementwise_add(x=x2paddle_386, y=x2paddle_378, name='x2paddle_387')
x2paddle_388 = fluid.layers.relu(x2paddle_387, name='x2paddle_388')
x2paddle_389 = fluid.layers.conv2d(x2paddle_388, num_filters=256, filter_size=[1, 1], stride=[1, 1],
padding=[0, 0], dilation=[1, 1], groups=1,
param_attr='x2paddle_fdresnext_layer2_3_conv1_weight', name='x2paddle_389',
bias_attr=False)
x2paddle_390 = fluid.layers.batch_norm(x2paddle_389, momentum=0.8999999761581421, epsilon=9.999999747378752e-06,
data_layout='NCHW', is_test=True,
param_attr='x2paddle_fdresnext_layer2_3_bn1_weight',
bias_attr='x2paddle_fdresnext_layer2_3_bn1_bias',
moving_mean_name='x2paddle_fdresnext_layer2_3_bn1_running_mean',
moving_variance_name='x2paddle_fdresnext_layer2_3_bn1_running_var',
use_global_stats=False, name='x2paddle_390')
x2paddle_391 = fluid.layers.relu(x2paddle_390, name='x2paddle_391')
x2paddle_392 = fluid.layers.conv2d(x2paddle_391, num_filters=256, filter_size=[3, 3], stride=[1, 1],
padding=[1, 1], dilation=[1, 1], groups=32,
param_attr='x2paddle_fdresnext_layer2_3_conv2_weight', name='x2paddle_392',
bias_attr=False)
x2paddle_393 = fluid.layers.batch_norm(x2paddle_392, momentum=0.8999999761581421, epsilon=9.999999747378752e-06,
data_layout='NCHW', is_test=True,
param_attr='x2paddle_fdresnext_layer2_3_bn2_weight',
bias_attr='x2paddle_fdresnext_layer2_3_bn2_bias',
moving_mean_name='x2paddle_fdresnext_layer2_3_bn2_running_mean',
moving_variance_name='x2paddle_fdresnext_layer2_3_bn2_running_var',
use_global_stats=False, name='x2paddle_393')
x2paddle_394 = fluid.layers.relu(x2paddle_393, name='x2paddle_394')
x2paddle_395 = fluid.layers.conv2d(x2paddle_394, num_filters=512, filter_size=[1, 1], stride=[1, 1],
padding=[0, 0], dilation=[1, 1], groups=1,
param_attr='x2paddle_fdresnext_layer2_3_conv3_weight', name='x2paddle_395',
bias_attr=False)
x2paddle_396 = fluid.layers.batch_norm(x2paddle_395, momentum=0.8999999761581421, epsilon=9.999999747378752e-06,
data_layout='NCHW', is_test=True,
param_attr='x2paddle_fdresnext_layer2_3_bn3_weight',
bias_attr='x2paddle_fdresnext_layer2_3_bn3_bias',
moving_mean_name='x2paddle_fdresnext_layer2_3_bn3_running_mean',
moving_variance_name='x2paddle_fdresnext_layer2_3_bn3_running_var',
use_global_stats=False, name='x2paddle_396')
x2paddle_397 = fluid.layers.elementwise_add(x=x2paddle_396, y=x2paddle_388, name='x2paddle_397')
x2paddle_398 = fluid.layers.relu(x2paddle_397, name='x2paddle_398')
x2paddle_399 = fluid.layers.conv2d(x2paddle_398, num_filters=512, filter_size=[1, 1], stride=[1, 1],
padding=[0, 0], dilation=[1, 1], groups=1,
param_attr='x2paddle_fdresnext_layer3_0_conv1_weight', name='x2paddle_399',
bias_attr=False)
x2paddle_407 = fluid.layers.conv2d(x2paddle_398, num_filters=1024, filter_size=[1, 1], stride=[2, 2],
padding=[0, 0], dilation=[1, 1], groups=1,
param_attr='x2paddle_fdresnext_layer3_0_downsample_0_weight',
name='x2paddle_407', bias_attr=False)
x2paddle_400 = fluid.layers.batch_norm(x2paddle_399, momentum=0.8999999761581421, epsilon=9.999999747378752e-06,
data_layout='NCHW', is_test=True,
param_attr='x2paddle_fdresnext_layer3_0_bn1_weight',
bias_attr='x2paddle_fdresnext_layer3_0_bn1_bias',
moving_mean_name='x2paddle_fdresnext_layer3_0_bn1_running_mean',
moving_variance_name='x2paddle_fdresnext_layer3_0_bn1_running_var',
use_global_stats=False, name='x2paddle_400')
x2paddle_408 = fluid.layers.batch_norm(x2paddle_407, momentum=0.8999999761581421, epsilon=9.999999747378752e-06,
data_layout='NCHW', is_test=True,
param_attr='x2paddle_fdresnext_layer3_0_downsample_1_weight',
bias_attr='x2paddle_fdresnext_layer3_0_downsample_1_bias',
moving_mean_name='x2paddle_fdresnext_layer3_0_downsample_1_running_mean',
moving_variance_name='x2paddle_fdresnext_layer3_0_downsample_1_running_var',
use_global_stats=False, name='x2paddle_408')
x2paddle_401 = fluid.layers.relu(x2paddle_400, name='x2paddle_401')
x2paddle_402 = fluid.layers.conv2d(x2paddle_401, num_filters=512, filter_size=[3, 3], stride=[2, 2],
padding=[1, 1], dilation=[1, 1], groups=32,
param_attr='x2paddle_fdresnext_layer3_0_conv2_weight', name='x2paddle_402',
bias_attr=False)
x2paddle_403 = fluid.layers.batch_norm(x2paddle_402, momentum=0.8999999761581421, epsilon=9.999999747378752e-06,
data_layout='NCHW', is_test=True,
param_attr='x2paddle_fdresnext_layer3_0_bn2_weight',
bias_attr='x2paddle_fdresnext_layer3_0_bn2_bias',
moving_mean_name='x2paddle_fdresnext_layer3_0_bn2_running_mean',
moving_variance_name='x2paddle_fdresnext_layer3_0_bn2_running_var',
use_global_stats=False, name='x2paddle_403')
x2paddle_404 = fluid.layers.relu(x2paddle_403, name='x2paddle_404')
x2paddle_405 = fluid.layers.conv2d(x2paddle_404, num_filters=1024, filter_size=[1, 1], stride=[1, 1],
padding=[0, 0], dilation=[1, 1], groups=1,
param_attr='x2paddle_fdresnext_layer3_0_conv3_weight', name='x2paddle_405',
bias_attr=False)
x2paddle_406 = fluid.layers.batch_norm(x2paddle_405, momentum=0.8999999761581421, epsilon=9.999999747378752e-06,
data_layout='NCHW', is_test=True,
param_attr='x2paddle_fdresnext_layer3_0_bn3_weight',
bias_attr='x2paddle_fdresnext_layer3_0_bn3_bias',
moving_mean_name='x2paddle_fdresnext_layer3_0_bn3_running_mean',
moving_variance_name='x2paddle_fdresnext_layer3_0_bn3_running_var',
use_global_stats=False, name='x2paddle_406')
x2paddle_409 = fluid.layers.elementwise_add(x=x2paddle_406, y=x2paddle_408, name='x2paddle_409')
x2paddle_410 = fluid.layers.relu(x2paddle_409, name='x2paddle_410')
x2paddle_411 = fluid.layers.conv2d(x2paddle_410, num_filters=512, filter_size=[1, 1], stride=[1, 1],
padding=[0, 0], dilation=[1, 1], groups=1,
param_attr='x2paddle_fdresnext_layer3_1_conv1_weight', name='x2paddle_411',
bias_attr=False)
x2paddle_412 = fluid.layers.batch_norm(x2paddle_411, momentum=0.8999999761581421, epsilon=9.999999747378752e-06,
data_layout='NCHW', is_test=True,
param_attr='x2paddle_fdresnext_layer3_1_bn1_weight',
bias_attr='x2paddle_fdresnext_layer3_1_bn1_bias',
moving_mean_name='x2paddle_fdresnext_layer3_1_bn1_running_mean',
moving_variance_name='x2paddle_fdresnext_layer3_1_bn1_running_var',
use_global_stats=False, name='x2paddle_412')
x2paddle_413 = fluid.layers.relu(x2paddle_412, name='x2paddle_413')
x2paddle_414 = fluid.layers.conv2d(x2paddle_413, num_filters=512, filter_size=[3, 3], stride=[1, 1],
padding=[1, 1], dilation=[1, 1], groups=32,
param_attr='x2paddle_fdresnext_layer3_1_conv2_weight', name='x2paddle_414',
bias_attr=False)
x2paddle_415 = fluid.layers.batch_norm(x2paddle_414, momentum=0.8999999761581421, epsilon=9.999999747378752e-06,
data_layout='NCHW', is_test=True,
param_attr='x2paddle_fdresnext_layer3_1_bn2_weight',
bias_attr='x2paddle_fdresnext_layer3_1_bn2_bias',
moving_mean_name='x2paddle_fdresnext_layer3_1_bn2_running_mean',
moving_variance_name='x2paddle_fdresnext_layer3_1_bn2_running_var',
use_global_stats=False, name='x2paddle_415')
x2paddle_416 = fluid.layers.relu(x2paddle_415, name='x2paddle_416')
x2paddle_417 = fluid.layers.conv2d(x2paddle_416, num_filters=1024, filter_size=[1, 1], stride=[1, 1],
padding=[0, 0], dilation=[1, 1], groups=1,
param_attr='x2paddle_fdresnext_layer3_1_conv3_weight', name='x2paddle_417',
bias_attr=False)
x2paddle_418 = fluid.layers.batch_norm(x2paddle_417, momentum=0.8999999761581421, epsilon=9.999999747378752e-06,
data_layout='NCHW', is_test=True,
param_attr='x2paddle_fdresnext_layer3_1_bn3_weight',
bias_attr='x2paddle_fdresnext_layer3_1_bn3_bias',
moving_mean_name='x2paddle_fdresnext_layer3_1_bn3_running_mean',
moving_variance_name='x2paddle_fdresnext_layer3_1_bn3_running_var',
use_global_stats=False, name='x2paddle_418')
x2paddle_419 = fluid.layers.elementwise_add(x=x2paddle_418, y=x2paddle_410, name='x2paddle_419')
x2paddle_420 = fluid.layers.relu(x2paddle_419, name='x2paddle_420')
x2paddle_421 = fluid.layers.conv2d(x2paddle_420, num_filters=512, filter_size=[1, 1], stride=[1, 1],
padding=[0, 0], dilation=[1, 1], groups=1,
param_attr='x2paddle_fdresnext_layer3_2_conv1_weight', name='x2paddle_421',
bias_attr=False)
x2paddle_422 = fluid.layers.batch_norm(x2paddle_421, momentum=0.8999999761581421, epsilon=9.999999747378752e-06,
data_layout='NCHW', is_test=True,
param_attr='x2paddle_fdresnext_layer3_2_bn1_weight',
bias_attr='x2paddle_fdresnext_layer3_2_bn1_bias',
moving_mean_name='x2paddle_fdresnext_layer3_2_bn1_running_mean',
moving_variance_name='x2paddle_fdresnext_layer3_2_bn1_running_var',
use_global_stats=False, name='x2paddle_422')
x2paddle_423 = fluid.layers.relu(x2paddle_422, name='x2paddle_423')
x2paddle_424 = fluid.layers.conv2d(x2paddle_423, num_filters=512, filter_size=[3, 3], stride=[1, 1],
padding=[1, 1], dilation=[1, 1], groups=32,
param_attr='x2paddle_fdresnext_layer3_2_conv2_weight', name='x2paddle_424',
bias_attr=False)
x2paddle_425 = fluid.layers.batch_norm(x2paddle_424, momentum=0.8999999761581421, epsilon=9.999999747378752e-06,
data_layout='NCHW', is_test=True,
param_attr='x2paddle_fdresnext_layer3_2_bn2_weight',
bias_attr='x2paddle_fdresnext_layer3_2_bn2_bias',
moving_mean_name='x2paddle_fdresnext_layer3_2_bn2_running_mean',
moving_variance_name='x2paddle_fdresnext_layer3_2_bn2_running_var',
use_global_stats=False, name='x2paddle_425')
x2paddle_426 = fluid.layers.relu(x2paddle_425, name='x2paddle_426')
x2paddle_427 = fluid.layers.conv2d(x2paddle_426, num_filters=1024, filter_size=[1, 1], stride=[1, 1],
padding=[0, 0], dilation=[1, 1], groups=1,
param_attr='x2paddle_fdresnext_layer3_2_conv3_weight', name='x2paddle_427',
bias_attr=False)
x2paddle_428 = fluid.layers.batch_norm(x2paddle_427, momentum=0.8999999761581421, epsilon=9.999999747378752e-06,
data_layout='NCHW', is_test=True,
param_attr='x2paddle_fdresnext_layer3_2_bn3_weight',
bias_attr='x2paddle_fdresnext_layer3_2_bn3_bias',
moving_mean_name='x2paddle_fdresnext_layer3_2_bn3_running_mean',
moving_variance_name='x2paddle_fdresnext_layer3_2_bn3_running_var',
use_global_stats=False, name='x2paddle_428')
x2paddle_429 = fluid.layers.elementwise_add(x=x2paddle_428, y=x2paddle_420, name='x2paddle_429')
x2paddle_430 = fluid.layers.relu(x2paddle_429, name='x2paddle_430')
x2paddle_431 = fluid.layers.conv2d(x2paddle_430, num_filters=512, filter_size=[1, 1], stride=[1, 1],
padding=[0, 0], dilation=[1, 1], groups=1,
param_attr='x2paddle_fdresnext_layer3_3_conv1_weight', name='x2paddle_431',
bias_attr=False)
x2paddle_432 = fluid.layers.batch_norm(x2paddle_431, momentum=0.8999999761581421, epsilon=9.999999747378752e-06,
data_layout='NCHW', is_test=True,
param_attr='x2paddle_fdresnext_layer3_3_bn1_weight',
bias_attr='x2paddle_fdresnext_layer3_3_bn1_bias',
moving_mean_name='x2paddle_fdresnext_layer3_3_bn1_running_mean',
moving_variance_name='x2paddle_fdresnext_layer3_3_bn1_running_var',
use_global_stats=False, name='x2paddle_432')
x2paddle_433 = fluid.layers.relu(x2paddle_432, name='x2paddle_433')
x2paddle_434 = fluid.layers.conv2d(x2paddle_433, num_filters=512, filter_size=[3, 3], stride=[1, 1],
padding=[1, 1], dilation=[1, 1], groups=32,
param_attr='x2paddle_fdresnext_layer3_3_conv2_weight', name='x2paddle_434',
bias_attr=False)
x2paddle_435 = fluid.layers.batch_norm(x2paddle_434, momentum=0.8999999761581421, epsilon=9.999999747378752e-06,
data_layout='NCHW', is_test=True,
param_attr='x2paddle_fdresnext_layer3_3_bn2_weight',
bias_attr='x2paddle_fdresnext_layer3_3_bn2_bias',
moving_mean_name='x2paddle_fdresnext_layer3_3_bn2_running_mean',
moving_variance_name='x2paddle_fdresnext_layer3_3_bn2_running_var',
use_global_stats=False, name='x2paddle_435')
x2paddle_436 = fluid.layers.relu(x2paddle_435, name='x2paddle_436')
x2paddle_437 = fluid.layers.conv2d(x2paddle_436, num_filters=1024, filter_size=[1, 1], stride=[1, 1],
padding=[0, | |
********** PROBLEM SOLVING ****************
U.P.E.R.
1. UNDERSTAND:
Before doing anything else, make sure you understand precisely what the problem is asking. A helpful starting point could be transcribing the technical description of the problem into your own words.
Questions
Here is a list of starter questions that might come up during this step:
What are the inputs your code receives?
What is the range of the input?
How big can the input be (how much data)?
What are the outputs your code produces?
What is the range of the output?
How big can the output be (how much data)?
How performant must the code be?
Is there anything missing from the task description that needs to be clarified?
What assumptions are you making?
Does anyone else on the team need to validate these assumptions?
2. PLAN:
This step is where you will ask, "what steps will I take to solve the problem?" You will take your description of the problem and transform it into a complete, actionable plan to solve that problem. If you realize you still don't truly understand the problem while planning, return to Understand until you resolve the ambiguity. If you have not yet completed Understand, you will end up planning to solve the wrong problem! When interviewing, you must do this step aloud!
Remember, you aren’t coding during this step unless it’s a small piece of throwaway code to test a hypothesis. It would be best if you wrote pseudocode during this step, however.
Questions
Do you know the answer to a similar problem that has comparable inputs and outputs?
Does this problem remind you an anything else?
Can you bring that knowledge to bear here?
Does my plan meet the performance requirements?
What’s the time complexity?
What’s the space complexity?
How big can my input data be?
Can sorting the input data ahead of time lead to any improvements in time complexity?
Does recursion help?
Is the problem made up of identical subproblems?
Can you state the problem with itself in its definition?
Think like a villain. Does your plan cover the edge cases?
3. EXECUTE:
This step is where you take your plan and convert it to actual working code. This step isn’t easy, but it’s much easier if you've done an excellent job with the "Understand" and "Plan" steps above. If you find shortcomings in your plan while implementing the solution, return to the "Plan" phase until you resolve the ambiguity. If you have not yet completed the "Plan" step, you will spend far longer on the "Execute" step than you have to.
Questions
Think like a villain. Does your implementation handle all inputs?
What is the best way to split this code into separate functions or classes?
Does this functionality already exist?
Are there built-in libraries I can leverage?
Are there third-party libraries I can leverage?
4. REFLECT:
The primary question you are dealing with during this step is — "is this implementation as good as I can make it?" Would I be proud to show my code to another programmer?
Questions
Does your solution work in all cases?
Main case?
Edge cases?
Is the solution performant enough?
Is the code documented?
In retrospect, what would you do differently? What will you do differently next time?
What went right?
What went wrong?
-------------------------------------
********** ASKING FOR HELP **********
Overview
The necessary action when asking a question is to imagine that you are trying to answer your question. Because you are deep in your problem's weeds, it's easy to forget that the person who answers your question does not have all the context. We should keep this general rule in mind — it's the overarching rule for all the details we go into next.
Search and Research
Before you do anything else, search for a solution to your problem on your own. One thing you should start doing is keeping track of all your research when solving a problem. One easy way to do this is to have a browser window represent a specific search for a solution, and each open tab represents an attempt at solving it. Keeping track of your research is vital because it's helpful to provide examples of similar questions or similar problems and explain why those didn't answer your specific problem or question. It also helps the person answering your question avoid pointing you toward resources you've already explored, and lets them know that you've already put in the work.
Introduce the Problem
The first thing you do when you ask a question is to introduce the problem. The first paragraph of your written question should serve as an executive summary of the problem. All the following paragraphs should fill in the details of the problem.
An important thing to include in your problem introduction is a precise explanation of how you encountered the problem. Write about the difficulties that kept you from solving it. Describe what you already tried and include the results of the research you've done.
You should also provide as much detail about the context as possible. For instance, include the language version, the platform version, the operating system, the database type, specific IDE, and any web server information. You should also include your particular constraints. For example, you may not be allowed to use feature A or B that would provide an obvious solution. If you have an odd constraint, it may also help explain why you have that constraint.
Help Others Reproduce the Problem
One thing to remember is that not all questions benefit from including code. However, if you include code, definitely do not just copy in your entire program! By having irrelevant details, you make your question much harder to answer and decrease the chances of someone helping you.
Here are some guidelines for when to include code in your question.
Minimal
Include just enough code to allow others to reproduce your specific problem. One way to do this is to restart from scratch. Do not include snippets of your entire program. Instead, create a new program, but only add what's necessary to recreate the problem.
If you aren't exactly sure where the problem code is, one way to find it is by removing code chunks one at a time until the problem disappears — then add back the last part. This way, you can deduce that the last piece of code you added back is likely the source of your problem.
Be careful not to remove too much code, either. Keep your question brief, but maintain enough context for clarity.
Complete
Make sure you include all the portions of the code needed to reproduce the problem. It would be best if you assumed that the person who is answering your question would not write any code to reproduce your issue. Again, remember, do not use images of code—those trying to help you need direct access to the code you include in your question.
Reproducible
When you include your code, it's also important to tell the reader exactly what you expect the behavior to be. Be sure to show the reader the exact wording of the error message you encountered (if there was one). It's also crucial to double-check that your included example reproduces the problem.
One other thing you can do is create a live example on a site like sqlfiddle.com or jsbin.com. If you do, make sure you also include a copy of your code in your question. Not everyone will utilize the link to the live example.
And to reiterate, do not post images of any code, data, or error messages—reserve images for things like rendering bugs—things that are impossible to describe accurately with just text.
Proofread
Don't send a question you haven't proofread. When you post your question, you should have already read and reread it, taking care to follow all the best practices and making sure your question makes sense. It would be best if you imagined that you're coming to your question fresh, with no other context but the question itself. You want to make your question as easy for someone to answer as possible. Remember, the reader | |
will paginate through responses from :py:meth:`LexModelBuildingService.Client.get_bots`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetBots>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
nameContains='string',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'bots': [
{
'name': 'string',
'description': 'string',
'status': 'BUILDING'|'READY'|'READY_BASIC_TESTING'|'FAILED'|'NOT_BUILT',
'lastUpdatedDate': datetime(2015, 1, 1),
'createdDate': datetime(2015, 1, 1),
'version': 'string'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **bots** *(list) --*
An array of ``botMetadata`` objects, with one entry for each bot.
- *(dict) --*
Provides information about a bot. .
- **name** *(string) --*
The name of the bot.
- **description** *(string) --*
A description of the bot.
- **status** *(string) --*
The status of the bot.
- **lastUpdatedDate** *(datetime) --*
The date that the bot was updated. When you create a bot, the creation date and last updated date are the same.
- **createdDate** *(datetime) --*
The date that the bot was created.
- **version** *(string) --*
The version of the bot. For a new bot, the version is always ``$LATEST`` .
- **NextToken** *(string) --*
A token to resume pagination.
:type nameContains: string
:param nameContains:
Substring to match in bot names. A bot will be returned if any part of its name matches the substring. For example, \"xyz\" matches both \"xyzabc\" and \"abcxyz.\"
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class GetBuiltinIntents(Paginator):
def paginate(self, locale: str = None, signatureContains: str = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`LexModelBuildingService.Client.get_builtin_intents`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetBuiltinIntents>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
locale='en-US'|'en-GB'|'de-DE',
signatureContains='string',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'intents': [
{
'signature': 'string',
'supportedLocales': [
'en-US'|'en-GB'|'de-DE',
]
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **intents** *(list) --*
An array of ``builtinIntentMetadata`` objects, one for each intent in the response.
- *(dict) --*
Provides metadata for a built-in intent.
- **signature** *(string) --*
A unique identifier for the built-in intent. To find the signature for an intent, see `Standard Built-in Intents <https://developer.amazon.com/public/solutions/alexa/alexa-skills-kit/docs/built-in-intent-ref/standard-intents>`__ in the *Alexa Skills Kit* .
- **supportedLocales** *(list) --*
A list of identifiers for the locales that the intent supports.
- *(string) --*
- **NextToken** *(string) --*
A token to resume pagination.
:type locale: string
:param locale:
A list of locales that the intent supports.
:type signatureContains: string
:param signatureContains:
Substring to match in built-in intent signatures. An intent will be returned if any part of its signature matches the substring. For example, \"xyz\" matches both \"xyzabc\" and \"abcxyz.\" To find the signature for an intent, see `Standard Built-in Intents <https://developer.amazon.com/public/solutions/alexa/alexa-skills-kit/docs/built-in-intent-ref/standard-intents>`__ in the *Alexa Skills Kit* .
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class GetBuiltinSlotTypes(Paginator):
def paginate(self, locale: str = None, signatureContains: str = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`LexModelBuildingService.Client.get_builtin_slot_types`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetBuiltinSlotTypes>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
locale='en-US'|'en-GB'|'de-DE',
signatureContains='string',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'slotTypes': [
{
'signature': 'string',
'supportedLocales': [
'en-US'|'en-GB'|'de-DE',
]
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **slotTypes** *(list) --*
An array of ``BuiltInSlotTypeMetadata`` objects, one entry for each slot type returned.
- *(dict) --*
Provides information about a built in slot type.
- **signature** *(string) --*
A unique identifier for the built-in slot type. To find the signature for a slot type, see `Slot Type Reference <https://developer.amazon.com/public/solutions/alexa/alexa-skills-kit/docs/built-in-intent-ref/slot-type-reference>`__ in the *Alexa Skills Kit* .
- **supportedLocales** *(list) --*
A list of target locales for the slot.
- *(string) --*
- **NextToken** *(string) --*
A token to resume pagination.
:type locale: string
:param locale:
A list of locales that the slot type supports.
:type signatureContains: string
:param signatureContains:
Substring to match in built-in slot type signatures. A slot type will be returned if any part of its signature matches the substring. For example, \"xyz\" matches both \"xyzabc\" and \"abcxyz.\"
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class GetIntentVersions(Paginator):
def paginate(self, name: str, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`LexModelBuildingService.Client.get_intent_versions`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetIntentVersions>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
name='string',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'intents': [
{
'name': 'string',
'description': 'string',
'lastUpdatedDate': datetime(2015, 1, 1),
'createdDate': datetime(2015, 1, 1),
'version': 'string'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **intents** *(list) --*
An array of ``IntentMetadata`` objects, one for each numbered version of the intent plus one for the ``$LATEST`` version.
- *(dict) --*
Provides information about an intent.
- **name** *(string) --*
The name of the intent.
- **description** *(string) --*
A description of the intent.
- **lastUpdatedDate** *(datetime) --*
The date that the intent was updated. When you create an intent, the creation date and last updated date are the same.
- **createdDate** *(datetime) --*
The date that the intent was created.
- **version** *(string) --*
The version of the intent.
- **NextToken** *(string) --*
A token to resume pagination.
:type name: string
:param name: **[REQUIRED]**
The name of the intent for which versions should be returned.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class GetIntents(Paginator):
def paginate(self, nameContains: str = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`LexModelBuildingService.Client.get_intents`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/lex-models-2017-04-19/GetIntents>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
nameContains='string',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
| |
import torch
from torch import nn
from torch import optim
from torch.nn import functional as F
from torch.utils.data import TensorDataset, DataLoader
from torch import optim
import numpy as np
from copy import deepcopy
import pdb
class Learner(nn.Module):
"""
"""
def __init__(self, config):
"""
:param config: network config file, type:list of (string, list)
"""
super(Learner, self).__init__()
self.config = config
# this dict contains all tensors needed to be optimized
self.vars = nn.ParameterList()
for i, (name, param) in enumerate(self.config):
if name is 'conv2d':
# [ch_out, ch_in, kernelsz, kernelsz]
w = nn.Parameter(torch.ones(*param[:4]))
# gain=1 according to cbfin's implementation
torch.nn.init.kaiming_normal_(w)
self.vars.append(w)
# [ch_out]
self.vars.append(nn.Parameter(torch.zeros(param[0])))
elif name is 'convt2d':
# [ch_in, ch_out, kernelsz, kernelsz, stride, padding]
w = nn.Parameter(torch.ones(*param[:4]))
# gain=1 according to cbfin's implementation
torch.nn.init.kaiming_normal_(w)
self.vars.append(w)
# [ch_in, ch_out]
self.vars.append(nn.Parameter(torch.zeros(param[1])))
elif name is 'linear':
# [ch_out, ch_in]
w = nn.Parameter(torch.ones(*param))
# gain=1 according to cbfinn's implementation
torch.nn.init.kaiming_normal_(w)
self.vars.append(w)
# [ch_out]
self.vars.append(nn.Parameter(torch.zeros(param[0])))
elif name is 'bn':
# [ch_out]
w = nn.Parameter(torch.ones(param[0]))
self.vars.append(w)
# [ch_out]
self.vars.append(nn.Parameter(torch.zeros(param[0])))
# must set requires_grad=False
running_mean = nn.Parameter(torch.zeros(param[0]), requires_grad=False)
running_var = nn.Parameter(torch.ones(param[0]), requires_grad=False)
self.vars_bn.extend([running_mean, running_var])
elif name in ['tanh', 'relu', 'upsample', 'avg_pool2d', 'max_pool2d',
'flatten', 'reshape', 'leakyrelu', 'sigmoid']:
continue
else:
raise NotImplementedError
def extra_repr(self):
info = ''
for name, param in self.config:
if name is 'conv2d':
tmp = 'conv2d:(ch_in:%d, ch_out:%d, k:%dx%d, stride:%d, padding:%d)'\
%(param[1], param[0], param[2], param[3], param[4], param[5],)
info += tmp + '\n'
elif name is 'convt2d':
tmp = 'convTranspose2d:(ch_in:%d, ch_out:%d, k:%dx%d, stride:%d, padding:%d)'\
%(param[0], param[1], param[2], param[3], param[4], param[5],)
info += tmp + '\n'
elif name is 'linear':
tmp = 'linear:(in:%d, out:%d)'%(param[1], param[0])
info += tmp + '\n'
elif name is 'leakyrelu':
tmp = 'leakyrelu:(slope:%f)'%(param[0])
info += tmp + '\n'
elif name is 'avg_pool2d':
tmp = 'avg_pool2d:(k:%d, stride:%d, padding:%d)'%(param[0], param[1], param[2])
info += tmp + '\n'
elif name is 'max_pool2d':
tmp = 'max_pool2d:(k:%d, stride:%d, padding:%d)'%(param[0], param[1], param[2])
info += tmp + '\n'
elif name in ['flatten', 'tanh', 'relu', 'upsample', 'reshape', 'sigmoid', 'use_logits', 'bn']:
tmp = name + ':' + str(tuple(param))
info += tmp + '\n'
else:
raise NotImplementedError
return info
def forward(self, x, vars=None, bn_training=True):
"""
This function can be called by finetunning, however, in finetunning, we dont wish to update
running_mean/running_var. Thought weights/bias of bn is updated, it has been separated by fast_weights.
Indeed, to not update running_mean/running_var, we need set update_bn_statistics=False
but weight/bias will be updated and not dirty initial theta parameters via fast_weiths.
:param x: [b, 1, 28, 28]
:param vars:
:param bn_training: set False to not update
:return: x, loss, likelihood, kld
"""
if vars is None:
vars = self.vars
idx = 0
bn_idx = 0
for name, param in self.config:
if name is 'conv2d':
w, b = vars[idx], vars[idx + 1]
# remember to keep synchrozied of forward_encoder and forward_decoder!
x = F.conv2d(x, w, b, stride=param[4], padding=param[5])
idx += 2
# print(name, param, '\tout:', x.shape)
elif name is 'convt2d':
w, b = vars[idx], vars[idx + 1]
# remember to keep synchrozied of forward_encoder and forward_decoder!
x = F.conv_transpose2d(x, w, b, stride=param[4], padding=param[5])
idx += 2
# print(name, param, '\tout:', x.shape)
elif name is 'linear':
w, b = vars[idx], vars[idx + 1]
x = F.linear(x, w, b)
idx += 2
# print('forward:', idx, x.norm().item())
elif name is 'bn':
w, b = vars[idx], vars[idx + 1]
running_mean, running_var = self.vars_bn[bn_idx], self.vars_bn[bn_idx+1]
x = F.batch_norm(x, running_mean, running_var, weight=w, bias=b, training=bn_training)
idx += 2
bn_idx += 2
elif name is 'flatten':
# print(x.shape)
x = x.view(x.size(0), -1)
elif name is 'reshape':
# [b, 8] => [b, 2, 2, 2]
x = x.view(x.size(0), *param)
elif name is 'relu':
x = F.relu(x, inplace=param[0])
elif name is 'leakyrelu':
x = F.leaky_relu(x, negative_slope=param[0], inplace=param[1])
elif name is 'tanh':
x = F.tanh(x)
elif name is 'sigmoid':
x = torch.sigmoid(x)
elif name is 'upsample':
x = F.upsample_nearest(x, scale_factor=param[0])
elif name is 'max_pool2d':
x = F.max_pool2d(x, param[0], param[1], param[2])
elif name is 'avg_pool2d':
x = F.avg_pool2d(x, param[0], param[1], param[2])
else:
raise NotImplementedError
# make sure variable is used properly
assert idx == len(vars)
return x
def zero_grad(self, vars=None):
"""
:param vars:
:return:
"""
with torch.no_grad():
if vars is None:
for p in self.vars:
if p.grad is not None:
p.grad.zero_()
else:
for p in vars:
if p.grad is not None:
p.grad.zero_()
def parameters(self):
"""
override this function since initial parameters will return with a generator.
:return:
"""
return self.vars
class Meta(nn.Module):
"""
Meta Learner
"""
def __init__(self, args, config):
"""
:param args:
"""
super(Meta, self).__init__()
self.update_lr = args.update_lr
self.meta_lr = args.meta_lr
self.m_coef = args.m_coef
self.k_spt = args.k_spt
self.mu = args.mu
self.task_num = args.task_num
self.update_step = args.update_step
self.update_step_test = args.update_step_test
self.opt = args.opt
self.mult_state = args.mult_state
self.net = Learner(config)
if self.mult_state:
self.momentum_weight = [None] * 25
else:
self.momentum_weight = None
if self.opt == 'sgd':
self.meta_optim = optim.SGD(self.net.parameters(), lr=self.meta_lr)
elif self.opt == 'momentum':
self.meta_optim = optim.SGD(self.net.parameters(), lr=self.meta_lr, momentum=self.mu)
else:
self.meta_optim = optim.Adam(self.net.parameters(), lr=self.meta_lr)
def clip_grad_by_norm_(self, grad, max_norm):
"""
in-place gradient clipping.
:param grad: list of gradients
:param max_norm: maximum norm allowable
:return:
"""
total_norm = 0
counter = 0
for g in grad:
param_norm = g.data.norm(2)
total_norm += param_norm.item() ** 2
counter += 1
total_norm = total_norm ** (1. / 2)
clip_coef = max_norm / (total_norm + 1e-6)
if clip_coef < 1:
for g in grad:
g.data.mul_(clip_coef)
return total_norm/counter
def forward(self, x_spt, y_spt, x_qry, y_qry, task_code=None):
"""
:param x_spt: [b, setsz, c_, h, w]
:param y_spt: [b, setsz]
:param x_qry: [b, querysz, c_, h, w]
:param y_qry: [b, querysz]
:return:
"""
# pdb.set_trace()
task_num, setsz, _ = x_spt.size()
querysz = x_qry.size(1)
assert np.max(task_code) <= 24
losses_q = [0 for _ in range(2)] # losses_q[i] is the loss on step i
# this is the loss and accuracy before first update
# tmp_weights = [torch.zeros_like(p) for p in self.net.parameters()]
if self.mult_state:
tmp_state = [[torch.zeros_like(p) for p in self.net.parameters()] ]* 25
tmp_count = [0]*25
else:
tmp_state = [torch.zeros_like(p) for p in self.net.parameters()]
tmp_grad = [torch.zeros_like(p) for p in self.net.parameters()]
for i in range(task_num):
with torch.no_grad():
# [setsz, nway]
logits_q = self.net(x_qry[i], self.net.parameters(), bn_training=True)
loss_q = F.mse_loss(logits_q, y_qry[i])/querysz
losses_q[0] += loss_q
fast_weights = list(map(lambda p: p, self.net.parameters()))
# pdb.set_trace()
for k in range(self.update_step):
# 1. run the i-th task and compute loss for k=1~K-1
logits = self.net(x_spt[i], fast_weights, bn_training=True)
loss = F.mse_loss(logits, y_spt[i])/setsz
# print(k,loss)
# 2. compute grad on theta_pi
# if k == self.update_step - 1:
# total_weight = torch.sum(torch.cat([torch.norm((f_p - p.detach().clone())**2).view(1,-1) for f_p, p in zip(fast_weights, self.net.parameters())]))
# # print(total_weight)
# loss = loss + 1e-2 * total_weight
grad = torch.autograd.grad(loss, fast_weights, create_graph=True)
# 3. theta_pi = theta_pi - train_lr * grad
fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, fast_weights)))
# tmp_weights = [tmp_w + fast_w/task_num for tmp_w, fast_w in zip(tmp_weights, fast_weights)]
# pdb.set_trace()
if self.mult_state:
if self.momentum_weight[task_code[i]] is None:
u_state = [u.detach().clone().requires_grad_() for u in fast_weights]
else:
u_state = list(map(lambda p: (1 - self.m_coef) * p[0] + self.m_coef * p[1].detach().clone(), \
zip(self.momentum_weight[task_code[i]], fast_weights)))
u_state = [u.detach().clone().requires_grad_() for u in u_state]
else:
if self.momentum_weight is None:
u_state = [u.detach().clone().requires_grad_() for u in fast_weights]
else:
u_state = list(map(lambda p: (1 - self.m_coef) * p[0] + self.m_coef * p[1].detach().clone(), \
zip(self.momentum_weight, fast_weights)))
u_state = [u.detach().clone().requires_grad_() for u in u_state]
logits_q = self.net(x_qry[i], u_state, bn_training=True)
loss_q = F.mse_loss(logits_q, y_qry[i])/querysz; losses_q[1] += loss_q.detach().clone()
grad_q = torch.autograd.grad(loss_q, u_state)
grad = torch.autograd.grad(fast_weights, self.net.parameters(), grad_outputs=grad_q)
tmp_grad = [tmp_g + fast_g/task_num for tmp_g, fast_g in zip(tmp_grad, grad)]
if self.mult_state:
tmp_state[task_code[i]] = [tmp_st + state_cur for tmp_st, state_cur in zip(tmp_state[task_code[i]], u_state)]
tmp_count[task_code[i]] += 1
else:
tmp_state = [tmp_st + state_cur/task_num for tmp_st, state_cur in zip(tmp_state, u_state)]
# tmp_grad = [torch.zeros_like(p) for p in self.net.parameters()]
# for i in range(task_num):
# logits_q = self.net(x_qry[i], tmp_state, bn_training=True)
# loss_q = F.mse_loss(logits_q, y_qry[i]); losses_q[1] += loss_q.detach().clone()
# grad_q = torch.autograd.grad(loss_q, tmp_state)
# tmp_grad = [tmp_g + fast_g/task_num for tmp_g, fast_g in zip(tmp_grad, grad_q)]
# grad = torch.autograd.grad(tmp_weights, self.net.parameters(), grad_outputs=tmp_grad)
# optimize theta parameters
# print(grad[-1])
# self.momentum_weight = [u.detach().clone() for u in tmp_state]
if | |
<reponame>dhyon/go-spacemesh
import base64
from datetime import datetime
import pytest
from pytest_testconfig import config as testconfig
import re
import random
from random import choice
from string import ascii_lowercase
import time
# noinspection PyUnresolvedReferences
import tests.config as cnf
from tests.context import ES
from tests.convenience import sleep_print_backwards
from tests.queries import query_message, poll_query_message
from tests.setup_utils import add_multi_clients
from tests.utils import get_conf, api_call, get_curr_ind
current_index = get_curr_ind()
timeout_factor = 1
# For purposes of these tests, we override the PoetProof protocol
gossip_message_query_fields = {'M': 'api_test_gossip: got test gossip message'}
def query_bootstrap_es(namespace, bootstrap_po_name):
hits = poll_query_message(current_index, namespace, bootstrap_po_name, {"M": "local node identity"}, expected=1)
for h in hits:
if getattr(h, 'identity', None):
return h.identity
return None
# ==============================================================================
# Fixtures
# ==============================================================================
# The following fixture should not be used if you wish to add many clients during test.
@pytest.fixture()
def add_client(request, setup_bootstrap, setup_clients):
global client_name
def _add_single_client():
global client_name
if not setup_bootstrap.pods:
raise Exception("Could not find bootstrap node")
bs_info = setup_bootstrap.pods[0]
cspec = get_conf(bs_info, testconfig['client'], testconfig['genesis_delta'])
client_name = add_multi_clients(testconfig, setup_bootstrap.deployment_id, cspec, 1)[0]
return client_name
return _add_single_client()
@pytest.fixture()
def add_clients(setup_bootstrap, setup_clients):
"""
add_clients returns a function for the user to run in order to add more clients
:param setup_bootstrap: DeploymentInfo, bootstrap info
:param setup_clients: DeploymentInfo, client info
:return: function, _add_client
"""
def _add_clients(num_of_clients, version=None, version_separator=''):
# TODO make a generic function that _add_clients can use
"""
adds a clients to namespace
:param num_of_clients: int, number of replicas
:param version: string, the wanted client version
:param version_separator: string, separator to separate between client key and client version
:return: list, all created client pods
"""
if version and not isinstance(version, str):
raise ValueError("version must be type string")
if not setup_bootstrap.pods:
raise Exception("Could not find bootstrap node")
bs_info = setup_bootstrap.pods[0]
client_key = 'client'
if version:
client_key += f'{version_separator}{version}'
cspec = get_conf(bs_info, testconfig[client_key], testconfig['genesis_delta'])
pods_names = add_multi_clients(testconfig, setup_bootstrap.deployment_id, cspec, size=num_of_clients)
return pods_names
return _add_clients
# ==============================================================================
# TESTS
# ==============================================================================
def test_bootstrap(init_session, add_elk, add_node_pool, add_curl, setup_bootstrap):
print("running test_bootstrap")
sleep_print_backwards(10 * timeout_factor, "wait for the bootstrap logs to be available in ElasticSearch")
bs_id = query_bootstrap_es(testconfig['namespace'], setup_bootstrap.pods[0]['name'])
ass_err = f"setup_bootstrap.pods[0]['identity'] = {setup_bootstrap.pods[0]['identity']}, bootstrap returned ID: {bs_id}"
assert setup_bootstrap.pods[0]['identity'] == bs_id, ass_err
def test_client(init_session, add_elk, add_node_pool, add_curl, setup_clients, save_log_on_exit):
fields = {'M': 'discovery_bootstrap'}
timetowait = len(setup_clients.pods) * timeout_factor
print(f"Sleeping {str(timetowait)} before checking bootstrap results")
time.sleep(timetowait)
peers = poll_query_message(indx=current_index,
namespace=testconfig['namespace'],
client_po_name=setup_clients.deployment_name,
fields=fields,
findFails=False,
expected=len(setup_clients.pods))
assert len(peers) == len(setup_clients.pods)
def test_add_client(add_client):
# Sleep a while before checking the node is bootstrapped
time.sleep(20 * timeout_factor)
fields = {'M': 'discovery_bootstrap'}
hits = poll_query_message(indx=current_index,
namespace=testconfig['namespace'],
client_po_name=add_client,
fields=fields,
findFails=True,
expected=1)
assert len(hits) == 1, "Could not find new Client bootstrap message pod:{0}".format(add_client)
def test_add_many_clients(init_session, add_elk, add_node_pool, setup_bootstrap, setup_clients):
bs_info = setup_bootstrap.pods[0]
cspec = get_conf(bs_info, testconfig['client'], testconfig['genesis_delta'])
pods = add_multi_clients(testconfig, setup_bootstrap.deployment_id, cspec, size=4)
time.sleep(40 * timeout_factor) # wait for the new clients to finish bootstrap and for logs to get to elasticsearch
fields = {'M': 'discovery_bootstrap'}
for p in pods:
hits = poll_query_message(indx=current_index,
namespace=testconfig['namespace'],
client_po_name=p,
fields=fields,
findFails=True,
expected=1)
assert len(hits) == 1, "Could not find new Client bootstrap message pod:{0}".format(p)
def test_gossip(init_session, add_elk, add_node_pool, setup_clients, add_curl):
initial = len(query_message(
current_index, testconfig['namespace'], setup_clients.deployment_name, gossip_message_query_fields))
# *note*: this already waits for bootstrap so we can send the msg right away.
# send message to client via rpc
client_ip = setup_clients.pods[0]['pod_ip']
podname = setup_clients.pods[0]['name']
print("Sending gossip from client ip: {0}/{1}".format(podname, client_ip))
# todo: take out broadcast and rpcs to helper methods.
api = 'v1/gateway/broadcastpoet'
# this is messy: this gets passed to curl as a command, so it needs to be a string
# grpc expects binary data as base64
data = '{"data":"%s"}' % base64.b64encode(b"foo").decode('utf-8')
out = api_call(client_ip, data, api, testconfig['namespace'])
assert "{'status': {}}" in out
# Need to sleep for a while in order to enable the propagation of the gossip message - 0.5 sec for each node
# TODO: check frequently before timeout so we might be able to finish earlier.
gossip_propagation_sleep = 3 * timeout_factor # currently we expect short propagation times.
print('sleep for {0} sec to enable gossip propagation'.format(gossip_propagation_sleep))
time.sleep(gossip_propagation_sleep)
total_expected_gossip = initial + len(setup_clients.pods)
after = poll_query_message(indx=current_index,
namespace=testconfig['namespace'],
client_po_name=setup_clients.deployment_name,
fields=gossip_message_query_fields,
findFails=False,
expected=total_expected_gossip)
assert total_expected_gossip <= len(after), "test_gossip: Total gossip messages in ES is not as expected"
def test_many_gossip_messages(setup_clients, add_elk, add_node_pool, add_curl):
initial = len(query_message(
current_index, testconfig['namespace'], setup_clients.deployment_name, gossip_message_query_fields))
# *note*: this already waits for bootstrap so we can send the msg right away.
# send message to client via rpc
test_messages = 10
for i in range(test_messages):
rnd = random.randint(0, len(setup_clients.pods) - 1)
client_ip = setup_clients.pods[rnd]['pod_ip']
podname = setup_clients.pods[rnd]['name']
print("Sending gossip from client ip: {0}/{1}".format(podname, client_ip))
# todo: take out broadcast and rpcs to helper methods.
api = 'v1/gateway/broadcastpoet'
# this is messy: this gets passed to curl as a command, so it needs to be a string
# grpc expects binary data as base64
# it doesn't matter what the data contains as long as each is unique
data = '{"data":"%s"}' % base64.b64encode(i.to_bytes(1, byteorder='big')).decode('utf-8')
out = api_call(client_ip, data, api, testconfig['namespace'])
assert "{'status': {}}" in out
# Need to sleep for a while in order to enable the propagation of the gossip message - 0.5 sec for each node
# TODO: check frequently before timeout so we might be able to finish earlier.
gossip_propagation_sleep = 3 * timeout_factor # currently we expect short propagation times.
print('sleep for {0} sec to enable gossip propagation'.format(gossip_propagation_sleep))
time.sleep(gossip_propagation_sleep)
total_expected_gossip = initial + len(setup_clients.pods) * (i + 1)
after = poll_query_message(indx=current_index,
namespace=testconfig['namespace'],
client_po_name=setup_clients.deployment_name,
fields=gossip_message_query_fields,
findFails=False,
expected=total_expected_gossip)
assertion_msg = "test_many_gossip_messages: Total gossip messages in ES is not as expected"
assert total_expected_gossip <= len(after), assertion_msg
def send_msgs(setup_clients, api, headers, total_expected_gossip, msg_size=10000, prop_sleep_time=20, num_of_msg=100,
expected_ret="{'status': {}}", msg_field="data"):
"""
sends a protocol message to a random node and asserts its propagation
:param setup_clients: DeploymentInfo, clients info
:param api: string, api path
:param headers: string, protocol header fields
:param total_expected_gossip: int, expected number of hits result
:param msg_size: int, message size in bits
:param prop_sleep_time: int, time to sleep before propagation is done
:param num_of_msg: int
:param expected_ret: string, expected query return status
:param msg_field: string, message field
currently this argument gets only one value but in the future for a more
generic function we'll get a list of strings (10.11.19)
"""
# in our case each pod contains one node
pods_num = len(setup_clients.pods)
print("Sending {0} gossip messages".format(num_of_msg))
for i in range(num_of_msg):
rnd = random.randint(0, pods_num - 1)
client_ip = setup_clients.pods[rnd]['pod_ip']
pod_name = setup_clients.pods[rnd]['name']
print("Sending gossip {0} from client ip: {1}/{2}".format(i+1, pod_name, client_ip))
# todo: take out broadcast and rpcs to helper methods.
msg = "".join(choice(ascii_lowercase) for _ in range(msg_size))
# TODO in the future this may be changed for a more generic function
data = '{{"{msg_field}": "{msg}"}}'.format(msg_field=msg_field, msg=msg)
out = api_call(client_ip, data, api, testconfig['namespace'])
ass_err = f"test_invalid_msg: expected \"{expected_ret}\" and got \"{out}\""
assert expected_ret in out, ass_err
# we expect short propagation times
gossip_propagation_sleep = prop_sleep_time * timeout_factor
print('sleep for {0} sec to enable gossip propagation'.format(gossip_propagation_sleep))
time.sleep(gossip_propagation_sleep)
# run one global query (for results from all pods), then run one query per pod to figure out which ones failed
results = poll_query_message(indx=current_index,
namespace=testconfig['namespace'],
client_po_name=setup_clients.deployment_name,
fields=headers,
findFails=False,
expected=total_expected_gossip)
# run some additional queries to make debugging easier
if total_expected_gossip > len(results):
for pod in setup_clients.pods:
# query_fields = headers.copy()
# query_fields['kubernetes.pod_name'] = pod['name']
results_pod = query_message(current_index, testconfig['namespace'], pod['name'], headers)
print("Count of results for pod {0}: {1}".format(pod['name'], len(results_pod)))
err_msg = "msg_testing: Total gossip messages in ES is not as expected"
err_msg += f"\nexpected {total_expected_gossip}, got {len(results)}"
assert total_expected_gossip <= len(results), err_msg
# Deploy X peers
# Wait for bootstrap
# Broadcast Y messages (make sure that all of them are live simultaneously)
# Validate that all nodes got exactly Y messages (X*Y messages)
# Sample few nodes and validate that they got all 5 messages
def test_many_gossip_sim(setup_clients, add_elk, add_node_pool, add_curl):
api = 'v1/gateway/broadcastpoet'
msg_size = 10000 # 1kb TODO: increase up to 2mb
test_messages = 100
pods_num = len(setup_clients.pods)
prev_num_of_msg = len(query_message(
current_index, testconfig['namespace'], setup_clients.deployment_name, gossip_message_query_fields))
# if msg is valid we should see the message at each node msg * pods(nodes)
total_expected_gossip = prev_num_of_msg + test_messages * pods_num
send_msgs(setup_clients, api, gossip_message_query_fields, total_expected_gossip, num_of_msg=test_messages)
# NOTE: this test is run in the end because it affects | |
= self.get_id(tableDesc)
if not tableId:
#try to find the table from the first node
#table not found, create it
tableId = self.create_node_from_path(tableDesc,properties={"type":"table"})
if tableId:
columnsId = self.create_node(parent=tableId, name="columns", properties={"type": "referencer"})
variablesId = self.create_node(parent=tableId, name="variables", properties={"type": "folder"})
else:
self.logger.error(f"cant create table {tableDesc}")
return False
else:
columnsId = self.get_child(tableId,"columns")
variablesId = self.get_child(tableId, "variables")
#now we know the tableId, columnsId, variablesId
# iterate over all blobs and find the ids of the names in the blobs, if not found, create it
# exchange the descriptors to ids
desc2Id = {} # key: the descriptor from the input blob v: the id in the model
tableVars = self.get_leaves(columnsId)
desc2Id = {dic["name"]:dic["id"] for dic in tableVars} # key: the descriptor from the input blob v: the id in the model, preload with the names
#convert all to ids
newBlobs=[]
idsInBlobs=[]
for blob in blobs:
newBlob={}
for k,v in blob.items():
if k=="__time":
newBlob[k]=v
else:
#does this id already exist?
if k in desc2Id:
id = desc2Id[k]
else:
id = None
#try to find
for var in tableVars:
if var["name"] == k:
id = v["id"]
break
if not id:
#still not found, we need to create it
id = self.create_node(parent=variablesId,name=k,properties={"type": "timeseries"})
if not id:
self.logger.error(f"cant find or create {name}")
continue
else:
self.add_forward_refs(columnsId,[id])
desc2Id[k]=id #remember to speed up next time
newBlob[id] = v
idsInBlobs.append(id)
newBlobs.append(newBlob)
self.logger.debug(f"inserting blobs {len(newBlobs)}")
self.__notify_observers(idsInBlobs, "value")
result = self.ts.insert_blobs(newBlobs)
return result
# ########################################
# event series api
def event_series_create(self,desc,map={}):
id = self.get_id(desc)
if "eventMap" in self.model[id]:
self.model[id]["eventMap"].update(map)
else:
self.model[id]["eventMap"]=map.copy()
return self.ts.create(id)
def event_series_get_new_number_entry(self,id):
eventMap = self.model[id]["eventMap"]
numbers = [v for k, v in eventMap.items()]
newNumber = max(numbers)+1
while newNumber in numbers:
newNumber = newNumber+1
return newNumber
def event_series_get_event_number(self, desc, event, autoCreate=True):
id = self.get_id(desc)
if not id:
return None
with self.lock:
eventMap = self.model[id]["eventMap"] # a dict like {"starting":1, "machineStop":2,...}
if type(event) in [str,numpy.str_]:
if event not in [k for k,v in eventMap.items()]:
if not autoCreate:
return None
# we must put a new eventString
if eventMap == {}:
newEventNumber = 1
else:
newEventNumber = self.event_series_get_new_number_entry(id)
self.model[id]["eventMap"][event] = newEventNumber
return newEventNumber
else:
#is a known event string, get the number
return eventMap[event]
else:
#this is a number already, check if it is in the map
eventNumbers = [v for k,v in eventMap.items()]
if event in eventNumbers:
return event
else:
if not autoCreate:
return None
#must create a new entry
try:
#to make sure we have only numbers there
newEventString = "event_"+str(int(event))
self.model[id]["eventMap"][newEventString]=int(event)
except:
self.log_error()
return None
return event
def event_series_insert(self, desc, values=None, times=None, allowEventDuplicates = False):
"""
Args:
values: list of events, where the event is either an eventString or an event number
if values is a scalar, we assume that for all times the same event will be inserted
allowEventDuplicates: set this to true allowes the same events to appear multiple times on the same time
different events are always allowed on the same time
"""
id = self.get_id(desc)
if not id in self.model:
return None
if type(values)==type(None) or type(times)==type(None):
return None
if not(type(values) is list or type(values) is numpy.ndarray):
values = [values]*len(times)
#convert the values to numbers and create new map entry if needed
numbers = numpy.asarray([self.event_series_get_event_number(id,event) for event in values],dtype=numpy.int)
#convert the times to epoch if not already done
epochs = numpy.asarray([t if type(t) is not str else date2secs(t) for t in times ],dtype=numpy.float64)
if not allowEventDuplicates:
# we must delete the events which exist already at the same time with the same event
data = self.event_series_get(desc)
takeIndices = numpy.full(len(times),True)
for idx,tim in enumerate(times):
duplicates = numpy.where(data["__time"]==tim)[0]
for pos in duplicates:
if numbers[idx] == data["values"][pos]:
takeIndices[idx] = False
numbers = numbers[takeIndices]
epochs = epochs[takeIndices]
with self.lock:
#on the TimeSeries class the allowDuplicates means that the same time can appear mulitple times
# such that different or the same events can happen at the same time and thus produce the same
# time stamp in the time series
result = self.ts.insert(id,numbers, epochs, allowDuplicates=True)# we allow 2 events to appear on the same time!
self.__notify_observers(id, "value")
return result
def event_series_set(self,desc,values=None,times=None):
id = self.get_id(desc)
if not id in self.model:
return None
if self.lock:
# now "refresh" the event map
#self.model[id]["eventMap"]={}
numbers = [self.event_series_get_event_number(id, event) for event in values]
result = self.ts.set(id,values=numbers,times=times)
self.__notify_observers(id, "value")
return result
def event_series_get(self,desc, start=None,end=None,format="default",eventFilter=None):
"""
get events from a event series
Args:
desc: node descricptor
start , end [float]:
the start and endtime of the table given as seconds since epoch
we also allow the special case of endTime = 0 and startTime = -interval
we also allow the special case of startTime given and end time= 0
format: [enum] "default"
eventFilter : [string] a list of eventStrings as positive match filter
toList: (bool) True: return data as python list, False: return numpy arrays
examples:
- get all data of the variables
data = m.get_time_series_table(["root.mytable.variables.a","root.mytable.variables.b"]) # get all data
- request max 300 values of data (this is what the UI does)
data = m.get_time_series_table(["a","b"],"root.mytable",start=1581483065.323,end=1581483080.323,noBins=300,includeIntervalLimits=True)
- request data and resample to equiditant 25 sec spacing, also fill possible nan values with interpolation
times = list(range(1581483065,1581483065+100,25))
data = m.get_time_series_table(["a","b"],"root.mytable",resampleTimes = times,resampleMethod = "linearfill")
Returns(dict)
formatting depends on the "format" option
"defaut": return the result as {"values":[],"__time":[], "eventstrings": "map":{1:"myevent",2:"anotherevent"}
"""
id = self.get_id(desc)
if not id:
return None
data = self.ts.get_table([id], start=start, end=end)
if data == {}:
#this variable is not in the store
data = {id:{"values":numpy.asarray([]),"__time":numpy.asarray([])}}
eventMap = self.model[id]["eventMap"].copy()
reverseMap = {v:k for k,v in eventMap.items()}
values = data[id]["values"].astype(numpy.int)
times = data[id]["__time"]
#now filter
if eventFilter:
filter = []
if type(eventFilter) is not list:
eventFilter = [eventFilter]
for evString in eventFilter:
if evString in eventMap:
filter.append(eventMap[evString])
indices = [idx for idx,val in enumerate(values) if val in filter]
values = values[indices]
times = times[indices]
result = {
"values":values,
"__time":times,
"eventMap":eventMap,
"eventStrings":[reverseMap[v] for v in values]
}
if format == "iso":
#convert the timestamps to iso
result["__time"]=[epochToIsoString(t) for t in result["__time"]]
if format == "events":
existingEvents = set(result["values"])
events = {reverseMap[ev]:[] for ev in existingEvents}
for ev,ti in zip(result["values"],result["__time"]):
events[reverseMap[ev]].append(ti)
result["events"]=events
del result["values"]
del result["__time"]
del result["eventStrings"]
return result
def event_series_insert_blob(self,blob):
"""
insert events in various blob syntax
Args:
desc: the node descriptor
blob: a dictionary in various styles
a) {
"node": nodedescriptor
"events":"startMachine"
"__time": ["2018.01.01T00:10:08.445+02:00",1546437120.2,1546437121.2,1546437122.2]# allowes iso or epoch
}
b) {
"node": nodedescriptor
"events":["startMachine","stopMachine","startMachine","startMachine]
"__time": ["2018.01.01T00:10:08.445+02:00",1546437120.2,1546437121.2,1546437122.2]# allowes iso or epoch
}
c) "events:[
{"event":"startMachine",
"__time":"2018.01.01T00:10:08.445+02:00"
},
{"event":"stopMachine",
"__time":"2018.01.01T00:10:08.445+02:00"
}
Returns
true/false for success
"""
if type(blob["events"]) is not list:
#style a)
events = blob["events"]
times = blob["__time"]
else:
#events is a list
if type(blob["events"][0]) is dict:
#style c)
events = []
times = []
for d in blob["events"]:
events.append(d["event"])
times.append(d["__time"])
else:
#style b)
events = blob["events"]
times = blob["__time"]
return self.event_series_insert(blob["node"],events,times)
def event_series_delete(self,desc,start=None, end = None, eventsToDelete=[]):
id = self.get_id(desc)
if not id:
return None
if start == None and end == None and eventsToDelete == []:
#delete all
with self.lock:
self.model[id]["eventMap"]={}
result = self.ts.set(id, values=[], times=[])
else:
#delete some events
with self.lock:
data = self.ts.get_table([id])
if not start:
start = 0
if not end:
end = numpy.inf
times = data[id]["__time"]
values = data[id]["values"]
over = times>=start
under = times<=end
deleteMaskTime = over & under
if eventsToDelete == []:
deleteMaskValues = numpy.full(len(deleteMaskTime),True)
else:
deleteMaskValues = numpy.full(len(deleteMaskTime),False)
for ev in eventsToDelete:
evNumber = self.model[id]["eventMap"][ev]
mask = values == evNumber
deleteMaskValues = deleteMaskValues | mask
deleteMask = deleteMaskTime & deleteMaskValues
times = times[~deleteMask]
values = values[~deleteMask]
self.event_series_set(id,values,times)
print(data)
def get_object(self,desc):
id = self.get_id(desc)
if not id:
return False
with self.lock:
if not self.model[id]["type"] == "object":
return None
if "object" not in self.model[id]:
return None
return self.model[id]["object"]
def instantiate_object(self,desc,writeToModel=True):
id = self.get_id(desc)
if not id:
return False
with self.lock:
if not self.model[id]["type"] == "object":
return False
try:
className = self.model[id]["class"]
if "autoReload" in self.model[id] and self.model[id]["autoReload"]==True and self.global_auto_reload_enabled():
# must reload the module
module = importlib.reload(self.objectClasses[className]["module"])
classDefinition = getattr(module, className.split('.', 1).pop())
# now update | |
self.get_dataset_LL(
mod_params,
self.obs[ii],
cur_pred,
self.eval_mask[ii],
CL_offset)
dataset_avg_nLLs.append(cur_nLL)
hp_offset += LFC_mats[ii].shape[0]
CL_offset += LFC_mats[ii].shape[1]
self.avg_effect_loss = tf.add_n(dataset_avg_nLLs)
#calc R2
self.R2 = 1 - self.nLL / tot_SS
#calc regularization penalty
self.CL_l2_lambda = tf.Variable(reg_params['CL_l2_lambda'], dtype = self.float)
self.hairpin_l2_lambda = tf.Variable(reg_params['hairpin_l2_lambda'], dtype = self.float)
self.hp_unpred_l2_lambda = tf.Variable(reg_params['hp_unpred_l2_lambda'], dtype = self.float)
self.rel_gene_l2_lambda = tf.Variable(reg_params['rel_gene_l2_lambda'], dtype = self.float)
self.rel_seed_l2_lambda = tf.Variable(reg_params['rel_seed_l2_lambda'], dtype = self.float)
self.gene_l2_lambda = tf.Variable(reg_params['gene_l2_lambda'], dtype = self.float)
self.seed_l2_lambda = tf.Variable(reg_params['seed_l2_lambda'], dtype = self.float)
self.pen = 0
self.pen += 0.5 * tf.reduce_sum(tf.pow(self.CL_offset, 2)) * self.CL_l2_lambda
self.pen += 0.5 * tf.reduce_sum(tf.pow(self.hairpin_offset, 2)) * self.hairpin_l2_lambda
self.pen += 0.5 * tf.reduce_sum(tf.pow(self.hairpin_unpred, 2)) * self.hp_unpred_l2_lambda
self.pen += 0.5 * tf.reduce_sum(tf.pow(self.gene_score, 2)) * self.rel_gene_l2_lambda
self.pen += 0.5 * tf.reduce_sum(tf.pow(self.seed_score, 2)) * self.rel_seed_l2_lambda
self.pen += 0.5 * tf.reduce_sum(tf.pow(self.gene_score_avgs, 2)) * self.gene_l2_lambda
self.pen += 0.5 * tf.reduce_sum(tf.pow(self.seed_score_avgs, 2)) * self.seed_l2_lambda
#get total loss as likelihood plus penalty
self.loss = self.nLL + self.pen
self.avg_effect_loss += self.pen
#make optimizer op for score estimation
score_var_list = [self.gene_score, self.gene_score_avgs, self.seed_score, self.seed_score_avgs,
self.hairpin_offset, self.hairpin_unpred, self.CL_offset]
self.score_optim = ScipyOptimizerInterface(self.loss,
options = self.optim_params,
var_list = score_var_list,
method = 'L-BFGS-B')
#make optimizer op for estimating guide efficacies
guide_var_list = [self.hairpin_offset, self.hairpin_unpred, self.CL_offset, self.guide_Geff, self.guide_Seff]
n_uncon = self.n_hp_batches + self.n_hairpins + self.n_CL_batches
n_bcon = self.n_Geffs + self.n_Seffs
bound_constraints = np.concatenate([
np.tile([None, None], [n_uncon, 1]),
np.tile([0, 1], [n_bcon, 1])],
axis = 0)
self.guide_optim = ScipyOptimizerInterface(self.loss,
options = self.optim_params,
var_list = guide_var_list,
method = 'L-BFGS-B',
bounds = bound_constraints)
#make optimizer ops for estimating gene slopes
gene_slope_var_list = [self.hairpin_offset, self.hairpin_unpred, self.CL_offset, self.gene_slope]
n_uncon = self.n_hp_batches + self.n_hairpins + self.n_CL_batches
n_pcon = self.n_CLs
bound_constraints = np.concatenate([
np.tile([None, None], [n_uncon, 1]),
np.tile([0, None], [n_pcon, 1])],
axis = 0)
self.gene_slope_optim = ScipyOptimizerInterface(self.avg_effect_loss,
options = self.optim_params,
var_list = gene_slope_var_list,
method = 'L-BFGS-B',
bounds = bound_constraints)
#make optimizer ops for estimating CL slopes
ov_slope_var_list = [self.hairpin_offset, self.CL_offset, self.CL_slope]
n_uncon = self.n_hp_batches + self.n_CL_batches
n_pcon = self.n_CL_batches
bound_constraints = np.concatenate([
np.tile([None, None], [n_uncon, 1]),
np.tile([0, None], [n_pcon, 1])],
axis = 0)
self.ov_slope_optim = ScipyOptimizerInterface(self.avg_effect_loss,
options = self.optim_params,
var_list = ov_slope_var_list,
method = 'L-BFGS-B',
bounds = bound_constraints)
init = tf.global_variables_initializer()
self.sess.run(init)
if self._log_file is not None:
self._log_file.close()
def write(self, data, silent = False):
'''Internal method to print to stdout and logfile
INPUTS:
data: string to print_freq
silent: print to terminal?
'''
if not silent:
print(data)
if self._log_file is not None:
if self._log_file.closed:
self._log_file = open(self.log_file, 'a')
self._log_file.write(data + '\n')
def predict(self):
'''Get model prediction'''
return(self.sess.run(self.pred))
def get_SSMD(self, use_test = False):
'''Calculate SSMD for a set of gene avgs, given sets of positive and negative controls'''
gene_score, gene_score_avgs, CL_noise_vars = \
self.sess.run([self.gene_score, self.gene_score_avgs, self.CL_noise_vars])
gene_scores = gene_score + gene_score_avgs
noise_vars_per_CL = pd.DataFrame({'noise_vars': CL_noise_vars.flatten(), 'CL_name': self.all_CL_names}) \
.groupby('CL_name').mean().ix[self.data_names['CLs'],:].values
weights = 1 / noise_vars_per_CL.reshape(-1,1)
weights = weights / np.nanmean(weights)
weight_avg = np.nanmean(gene_scores * weights, axis = 0)
if use_test: #if using cross-val on set of control genes
pop1 = weight_avg[np.in1d(self.data_names['genes'], self.gene_sets['neg_test'])]
pop2 = weight_avg[np.in1d(self.data_names['genes'], self.gene_sets['pos_test'])]
else:
pop1 = weight_avg[np.in1d(self.data_names['genes'], self.gene_sets['neg'])]
pop2 = weight_avg[np.in1d(self.data_names['genes'], self.gene_sets['pos'])]
return(SSMD(pop1, pop2))
def fit(self, LFC_mats, fit_params = 'scores', ignore_test = False):
'''
Train subset of model parameters
INPUTS:
LFC_mats: List of [n_hairpins, n_CLs] training data sets of measured hairpin-level LFCs
fit_params: model parameter set to estimate ['scores', 'guide_effs', 'gene_slopes', 'ov_slopes', 'noise_vars', 'gene_slopes_ML', 'ov_slopes_ML']
ignore_test: optional fit to all data even if test_inds are defined
'''
poss_fit_params = ['scores', 'guide_effs', 'gene_slopes', 'ov_slopes', 'noise_vars', 'gene_slopes_ML', 'ov_slopes_ML']
assert(fit_params in poss_fit_params)
if self.log_file is not None:
self._log_file = open(self.log_file, 'a')
if self.test_inds is not None and not ignore_test:
train_eval_masks = make_eval_masks(LFC_mats, self.test_inds)
test_eval_masks = make_eval_masks(LFC_mats, self.test_inds, inverse = True)
else:
train_eval_masks = make_eval_masks(LFC_mats, None)
LFC_mats_no_na = []
for LFC_mat in LFC_mats:
cur = LFC_mat.copy()
cur[np.isnan(cur)] = 0
LFC_mats_no_na.append(cur)
feed_dict = {i: d for i, d in zip(self.obs, LFC_mats_no_na)}
train_mask_dict = {i: d for i, d in zip(self.eval_mask, train_eval_masks)}
train_dict = merge_dicts(feed_dict, train_mask_dict)
if self.test_inds is not None and not ignore_test:
test_mask_dict = {i: d for i, d in zip(self.eval_mask, test_eval_masks)}
test_dict = merge_dicts(feed_dict, test_mask_dict)
if fit_params == 'scores':
R2_evals = self.sess.run(self.R2, feed_dict = train_dict)
self.write('Init R2: {}'.format(R2_evals))
if self.test_inds and not ignore_test:
R2_evals = self.sess.run(self.R2, feed_dict = test_dict)
self.write('Init Test R2: {}'.format(R2_evals))
t0 = time.time()
if fit_params == 'scores':
self.write('Fitting model scores')
optim_res = self._fit_scores(train_dict)
elif fit_params == 'guide_effs':
self.write('Fitting guide efficacies')
optim_res = self._fit_guide_efficacies(train_dict)
elif fit_params == 'gene_slopes':
self._fit_gene_slopes()
elif fit_params == 'ov_slopes':
self._fit_ov_slopes(LFC_data, ignore_test = ignore_test)
elif fit_params == 'gene_slopes_ML':
optim_res = self._fit_gene_slopes_ML(train_dict)
elif fit_params == 'ov_slopes_ML':
optim_res = self._fit_ov_slopes_ML(train_dict)
elif fit_params == 'noise_vars':
self._fit_noise_vars(LFC_mats, ignore_test = ignore_test)
elif fit_params == 'slopes':
self._fit_slopes(train_dict)
if fit_params in ['scores', 'guide_effs', 'gene_slopes_ML', 'ov_slopes_ML']:
self.write(optim_res['message'].decode('utf-8'))
self.write('Optimization finished after: {} sec, {} iter, {} fevals'.format(int(time.time() - t0),
optim_res['nit'],optim_res['nfev']))
if fit_params == 'scores':
R2_evals = self.sess.run(self.R2, feed_dict = train_dict)
self.R2_vals['train'].append(R2_evals)
self.write('New R2: {}'.format(R2_evals))
if self.test_inds and not ignore_test:
R2_evals = self.sess.run(self.R2, feed_dict = test_dict)
self.R2_vals['test'].append(R2_evals)
self.write('New Test R2: {}'.format(R2_evals))
self.SSMD['train'].append(self.get_SSMD(use_test = False))
self.write('Train SSMD: {}'.format(self.SSMD['train'][-1]))
if ('pos_test' in self.gene_sets) and (len(self.gene_sets['pos_test']) > 0):
self.SSMD['test'].append(self.get_SSMD(use_test = True))
self.write('Test SSMD: {}'.format(self.SSMD['test'][-1]))
self.loss_evals.append(self.sess.run(self.loss, feed_dict = train_dict))
if self._log_file is not None:
self._log_file.close()
def _fit_scores(self, feed_dict):
'''
Fit scores + intercepts using BFGS
INPUTS:
feed_dict: input data
optim_params: dict of optimization parameters
'''
optim_res = run_sklearn_optim(self.score_optim, feed_dict, self.sess, self.loss,
print_freq = self.optim_params['print_freq'])
return(optim_res)
def _fit_gene_slopes_ML(self, feed_dict):
'''
Fit slopes + intercepts using BFGS
INPUTS:
feed_dict: input data
optim_params: dict of optimization parameters
'''
init_gene_slopes = self.sess.run([self.gene_slope])
optim_res = run_sklearn_optim(self.gene_slope_optim, feed_dict, self.sess, self.avg_effect_loss,
print_freq = self.optim_params['print_freq'])
new_gene_slopes = self.sess.run(self.gene_slope)
self.write('init gene slopes avg: {}, new gene slope avg: {}'.format(np.mean(init_gene_slopes), np.mean(new_gene_slopes)))
new_gene_slopes[new_gene_slopes < self.min_slope] = self.min_slope #constrain to be non negative
# new_gene_slopes = euclidean_proj_simplex(new_gene_slopes.flatten(), s=self.n_CLs).reshape(1,-1)
new_gene_slopes = new_gene_slopes / np.nanmean(new_gene_slopes)
_=self.sess.run(self.gene_slope.assign(new_gene_slopes.reshape(-1,1)))
return(optim_res)
def _fit_ov_slopes_ML(self, feed_dict):
'''
Fit slopes + intercepts using BFGS
INPUTS:
feed_dict: input data
optim_params: dict of optimization parameters
'''
init_CL_slopes = self.sess.run([self.CL_slope])
optim_res = run_sklearn_optim(self.ov_slope_optim, feed_dict, self.sess, self.avg_effect_loss,
print_freq = self.optim_params['print_freq'])
new_CL_slopes = self.sess.run(self.CL_slope)
self.write('init ov slopes avg: {}, new ov slope avg: {}'.format(np.mean(init_CL_slopes), np.mean(new_CL_slopes)))
new_CL_slopes[new_CL_slopes < self.min_slope] = self.min_slope
# new_CL_slopes = euclidean_proj_simplex(new_CL_slopes.flatten(), s=self.n_CLs).reshape(1,-1)
new_CL_slopes = new_CL_slopes / np.nanmean(new_CL_slopes)
_=self.sess.run(self.CL_slope.assign(new_CL_slopes))
return(optim_res)
def _fit_gene_slopes(self):
'''Re-estimate gene score slope terms using pos/neg control gene set median separation'''
init_gene_slopes, init_gene_effects = self.sess.run([self.gene_slope, self.ind_gene_effects])
# NA out gene scores for cell lines where we dont have targeting guides
init_gene_effects[self.n_used_hairpins_per_gene.transpose() < self.min_hairpins_per] = np.nan
#estimate centers of positive and negative gene set distributions
pos_med = np.nanmedian(init_gene_effects[:, np.in1d(self.data_names['genes'], self.gene_sets['pos'])], axis = 1)
neg_med = np.nanmedian(init_gene_effects[:, np.in1d(self.data_names['genes'], self.gene_sets['neg'])], axis = 1)
new_gene_slopes = neg_med - pos_med
self.write('negative gene slopes: {}/{}'.format(np.sum(new_gene_slopes < 0), self.n_CLs))
self.write('init gene slopes avg: {}, new gene slope avg: {}'.format(np.mean(init_gene_slopes), np.mean(new_gene_slopes)))
new_gene_slopes = new_gene_slopes / np.nanmean(new_gene_slopes) #normalize to have mean 1
_=self.sess.run(self.gene_slope.assign(new_gene_slopes.reshape(-1,1)))
def _fit_guide_efficacies(self, feed_dict):
'''
Fit guide_efficacies + intercepts using BFGS
INPUTS:
feed_dict: input data
optim_params: dict of optimization parameters
'''
init_guide_Geffs, init_guide_Seffs = self.sess.run([self.guide_Geff, self.guide_Seff])
optim_res = run_sklearn_optim(self.guide_optim, feed_dict, self.sess, self.loss,
print_freq = self.optim_params['print_freq'])
new_guide_Geffs, new_guide_Seffs = self.sess.run([self.guide_Geff, self.guide_Seff])
self.write('init avg Geff: {} Seff: {}, new avg Geff: {} Seff: {}'.format(np.mean(init_guide_Geffs),
np.mean(init_guide_Seffs), np.mean(new_guide_Geffs), np.mean(new_guide_Seffs)))
return(optim_res)
def _fit_noise_vars(self, LFC_mats, ignore_test = False):
'''Estimate noise variance per CL'''
tot_SSE = np.zeros(self.n_CL_batches)
tot_used_hps = np.zeros(self.n_CL_batches)
batch_offset = 0
for batch_ii, (LFC_mat, pred_mat) in enumerate(zip(LFC_mats, self.predict())):
cur_CL_inds = np.arange(LFC_mat.shape[1]) + batch_offset
cur_d = LFC_mat.values.copy()
if not ignore_test and self.test_inds is not None:
cur_d[self.test_inds[batch_ii]] = np.nan
tot_SSE[cur_CL_inds] += np.nansum((pred_mat - cur_d)**2, axis = 0)
tot_used_hps[cur_CL_inds] += np.sum(~np.isnan(cur_d), axis = 0)
batch_offset += LFC_mat.shape[1]
# dof = tot_used_hps - self.n_targeted_genes - self.n_targeted_seeds - 1 #dof per CL (approximate)
dof = tot_used_hps #dof per CL (gives biased estimate)
per_CL_noise_var = tot_SSE / np.max(np.concatenate([dof.reshape(-1,1), np.ones((self.n_CL_batches, 1))], axis = 1), axis = 1)
self.sess.run(self.CL_noise_vars.assign(per_CL_noise_var.reshape(-1,1).astype(np.float32)))
def compute_R2_stats(self, LFC_mats):
'''
Computes R2 values per CL, and per hairpin
'''
self.CL_R2_df = pd.DataFrame()
self.hp_R2_df = pd.DataFrame()
for batch_id, (LFC_data, | |
<filename>RPSNetwork.py<gh_stars>0
"""
Copyright (c) 2016 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import socket, struct, threading, datetime
from thread import *
#
# Broadcast message of the client.
#
REQ_HELLO = 'HELLO_SERVER'
#
# Answer message of the server for a udp request.
#
ANS_HELLO = 'RPS_SERVER'
#
# Message to request graphs
#
GRAPHS_NEED = 'NEED_GRAPHS'
#
# Message to initiate graph sending
#
GRAPHS_SEND_START = 'START_SENDING_GRAPHS'
#
# Message to stop graph sending
#
GRAPHS_SEND_END = 'END_SENDING_GRAPHS'
#
# Message to request a turn
#
TURN_NEED = 'NEED_TURN'
#
# Message to inform the client, that the next message contains a graph
#
TURN_SEND = 'SEND_TURN'
#
# Request of a client when he want's a regame.
#
PLAY_AGAIN_TRUE = 'PL_TRUE'
#
# Request of a client when he don't want to play again.
#
PLAY_AGAIN_FALSE = 'PL_FALSE'
#
# Helper functions
#
def send_msg(sock, msg):
"""
Sends the given message with the given socket. This function appends four bytes at the begin of msg.
These four bytes contains the length of the origin msg.
:param sock: Socket object to send.
:param msg: String message.
"""
# Stores the length of message in big-endian order
msg = struct.pack('>I', len(msg)) + msg
# Writes all bytes to the stream
sock.sendall(msg)
def recv_msg(sock):
"""
Receives a message from the given socket. This function reads the first four bytes from the stream
to determine the byte-length of the message. Afterwards it reads exactly so many bytes from
the stream.
:param sock: Socket object to receive.
"""
# Reads message length and unpack it into an integer
raw_msglen = recvall(sock, 4)
if not raw_msglen:
return None
msglen = struct.unpack('>I', raw_msglen)[0]
# Reads the whole message
return recvall(sock, msglen)
def recvall(sock, n):
"""
Reads bytes from the given socket stream as long as the received data length is lower than n.
:param sock: Socket object to receive data.
:param n: The target length of the message.
"""
data = ''
while len(data) < n:
packet = sock.recv(n - len(data))
if not packet:
return None
data += packet
return data
def out(msg):
"""
Prints out the given message with a timestamp. This function is for debugging purposes.
:param msg:
:return:
"""
now = datetime.datetime.now()
#print (now.__str__() + ' - ' + msg)
class RPSServer:
"""
This class represents the server side of the RPS network.
"""
def __init__(self):
"""
Creates a new RPSServer object.
"""
self.running = False
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock_udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.p1 = None
self.p2 = None
def udp_listener(self):
"""
Listen for messages on the servers udp port and sends an answer for each incoming request.
"""
while self.running:
m = self.sock_udp.recvfrom(1024)
self.sock_udp.sendto(ANS_HELLO, m[1])
def start(self, tcp_port, udp_port):
"""
Starts listening on udp and tcp sockets.
:param tcp_port: The port to listen with the tcp socket.
:param udp_port: The port to listen with the udp socket.
"""
self.running = True
try:
self.sock.bind(('', tcp_port))
self.sock_udp.bind(('', udp_port))
self.sock.listen(2)
start_new_thread(self.udp_listener, ())
out('Server listen on port '+str(tcp_port)+'/tcp')
out('Server listen on port '+str(udp_port)+'/udp')
except socket.error, msg:
self.sock.close()
self.sock = None
self.running = False
print 'Bind failed. Error Code : ' + str(msg[0]) + ' Message ' + msg[1]
if self.sock is not None:
gt = None
while self.running:
conn, addr = self.sock.accept()
if self.p1 is None:
self.p1 = conn
out('Player 1 is connected')
elif self.p2 is None:
self.p2 = conn
out('Player 2 is connected')
else:
conn.close()
out('rps is already running')
if gt is None and self.p1 is not None and self.p2 is not None:
# If both players are connected, start the game
gt = RPSThread(self.p1, self.p2)
gt.start()
class RPSThread(threading.Thread):
"""
This class represents a thread to handle a game.
"""
def __init__(self, p1, p2):
"""
Creates a new RPSThread object.
:param p1: Socket of player 1.
:param p2: Socket of player 2.
"""
threading.Thread.__init__(self)
self.p1 = p1
self.p2 = p2
def run(self):
is_over = False
proto = RPSProtocol()
out('New rps game started')
while not is_over:
# Loop to process the protocol.
is_over = proto.next_step(self.p1, self.p2)
out('The current rps is over')
self.p1.close()
self.p2.close()
class RPSClient:
"""
This class represents the client side of the RPS network
"""
def __init__(self, timeout=5):
"""
Creates a new RPSClient object.
:param timeout: The timeout of the diagram socket in seconds. The default value are 5 seconds.
"""
self.sock = None
self.timeout = timeout
def discover(self, srv_port):
"""
Sends a broadcast to look for a RPSServer on the given port.
:param srv_port: The port number of the RPSServer.
:return: The address tuple of the RPSServer or None, if it can't find a server.
"""
addr = None
answ = None
# Creates a new datagram socket to broadcast
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
s.settimeout(self.timeout)
s.sendto(REQ_HELLO, ('255.255.255.255', srv_port))
# Wait for a server answer
try:
answ = s.recvfrom(1024)
except socket.timeout:
print 'Timeout exceeded...'
# Close the diagram socket.
s.close()
if answ is not None and answ[0] == ANS_HELLO:
# Saves the address if the server answer was correct.
addr = answ[1]
return addr
def connect(self, addr):
"""
Connects the tcp socket to the given address.
:param addr: Address tuple of the server.
:return: Whether the connect was successfully or not.
"""
result = False
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.sock.connect(addr)
result = True
except socket.error, msg:
self.sock = None
print 'Connect failed. Error Code : ' + str(msg[0]) + ' Message ' + msg[1]
return result
def send(self, msg):
"""
Sends the given message with the tcp socket of self. This function invokes the
send_msg function of that module.
:param msg: String message to send.
"""
if self.sock is not None:
try:
send_msg(self.sock, msg)
except socket.error, msg:
self.sock = None
print 'Send failed. Error Code : ' + str(msg[0]) + ' Message ' + msg[1]
def receive(self):
"""
Receives a message and returns it. This function invokes the recv_msg function
of that module.
:return: The received message or None if the socket of self is None.
"""
if self.sock is not None:
return recv_msg(self.sock)
return None
def prepare(p1, p2):
"""
First protocol step. Exchanges the player names.
:param p1: Socket of player 1.
:param p2: Socket of player 2.
:return:
"""
n1 = recv_msg(p1)
n2 = recv_msg(p2)
out('The name of player 1 is ' + n1)
out('The name of player 2 is ' + n2)
send_msg(p1, n2)
send_msg(p2, n1)
return False
def share_graphs(p1, p2):
"""
Second protocol step. Initializes the graphs and sends them to both players.
:param p1: Socket of player 1.
:param p2: Socket of player 2.
:return:
"""
# Send a message to player 1, so that this player creates the graphs and send
# them to this server.
send_msg(p1, GRAPHS_NEED)
e1 = recv_msg(p1)
e2 = recv_msg(p1)
e3 = recv_msg(p1)
send_msg(p2, GRAPHS_SEND_START)
send_msg(p2, e1)
send_msg(p2, e2)
send_msg(p2, e3)
send_msg(p2, GRAPHS_SEND_END)
return False
def turn(p1, p2):
"""
Third protocol step. This function handles a turn.
:param p1: Socket of player 1.
:param p2: Socket of player 2.
:return: Whether the game is over or not.
"""
send_msg(p1, TURN_NEED)
send_msg(p2, TURN_SEND)
t1 = recv_msg(p1)
send_msg(p2, t1)
t2 = recv_msg(p2)
send_msg(p1, t2)
t3 = recv_msg(p1)
send_msg(p2, t3)
res_p1 = int(recv_msg(p1))
res_p2 = int(recv_msg(p2))
if res_p1 == res_p2:
# If the results aren't equals, the game | |
def objective_hyperopt(space):
"""
Objective function, taking set of hyperparameters and returning mean validation accuracy of k-fold cross-validation.
"""
xgb_ordinal_hyperopt = xgb.XGBRegressor(**space, objective="reg:squarederror", verbosity=0, nthread=-1, seed=1)
xgb_ordinal_hyperopt.fit(trainX_ordinal, trainy)
cv_score = cross_val_score(xgb_ordinal_hyperopt, trainX_ordinal, trainy, scoring="neg_root_mean_squared_error", cv=10)
return (-sum(cv_score)/len(cv_score))
# Time duration of HYPEROPT. Give HYPEROPT only 100 evaluations (compared to grid/random search's 8748) to demonstrate the superior performance due to Bayesian Optimisation.
rstate = np.random.default_rng(1) # Reproducibility.
t5 = time()
# return_argmin=False is essential for obtaining the correct best hyperparameters: https://github.com/hyperopt/hyperopt/issues/530
params_hyperopt_best = fmin(fn = objective_hyperopt, space = params_hyperopt, algo = tpe.suggest, max_evals = 100, rstate=rstate, return_argmin=False)
t6 = time()
print("Time elapsed in training: %f" % (t6-t5))
# 100% ... 100/100 [09:17<00:00, 5.57s/trial, best loss: 12.183767804749547]
# Time elapsed in training: 557.220675
# Create XGB using HYPEROPT's best found hyperparameters.
xgb_ordinal_hyperopt_best = xgb.XGBRegressor(**params_hyperopt_best, objective="reg:squarederror", verbosity=0, nthread=-1, seed=1)
xgb_ordinal_hyperopt_best.fit(trainX_ordinal, trainy)
trainPreds = xgb_ordinal_hyperopt_best.predict(trainX_ordinal)
testPreds = xgb_ordinal_hyperopt_best.predict(testX_ordinal)
print("Best HYPEROPT hyperparameters: ")
print(params_hyperopt_best)
# {'colsample_bytree': 0.6597818584110863, 'gamma': 1.7700638017993198, 'learning_rate': 0.16348928243486646, 'max_depth': 6,
# 'min_child_weight': 9, 'n_estimators': 407, 'reg_alpha': 0.1535235589281927, 'subsample': 0.5063560817838623}
print("Train RMSE: %f" % (np.sqrt(mean_squared_error(trainy, trainPreds))))
print("Test RMSE: %f" % (np.sqrt(mean_squared_error(testy, testPreds))))
print("Train Accuracy: %f" % (r2_score(trainy, trainPreds)))
print("Test Accuracy: %f" % (r2_score(testy, testPreds)))
# Train RMSE: 10.567621
# Test RMSE: 11.801092
# Train Accuracy: 0.962829
# Test Accuracy: 0.954932
xgb_ordinal_hyperopt_best.save_model("./results/models/trained/XGB_ord_hyperopt.json")
print("=====================================================================================================================================================================")
print("(4)")
print("XGB - HYPEROPT + TPE Bayesian Optimisation, More Hyperparameters")
print("")
# Hyper-parameters for HYPEROPT Bayesian Optimisation (with TPE).
params_hyperopt_more={
"max_depth": hp.choice("max_depth", np.arange(3, 18+1, dtype=int)),
"gamma": hp.uniform("gamma", 0.05, 20),
"reg_alpha": hp.uniform("reg_alpha", 0, 20),
"reg_lambda": hp.uniform("reg_lambda", 0, 20),
"subsample": hp.uniform("subsample", 0.1, 1),
"colsample_bytree": hp.uniform("colsample_bytree", 0.1, 1),
"colsample_bylevel": hp.uniform("colsample_bylevel", 0.1, 1),
"colsample_bynode": hp.uniform("colsample_bynode", 0.1, 1),
"min_child_weight": hp.choice("min_child_weight", np.arange(0, 10+1, dtype=int)),
"learning_rate": hp.uniform ("learning_rate", 0.005, 1),
"n_estimators": hp.choice("n_estimators", np.arange(10, 1000+1, dtype=int))
}
def objective_hyperopt_more(space):
"""
Objective function, taking set of hyperparameters and returning mean validation accuracy of k-fold cross-validation.
"""
xgb_ordinal_hyperopt_more = xgb.XGBRegressor(**space, objective="reg:squarederror", verbosity=0, nthread=-1, seed=1)
xgb_ordinal_hyperopt_more.fit(trainX_ordinal, trainy)
cv_score = cross_val_score(xgb_ordinal_hyperopt_more, trainX_ordinal, trainy, scoring="neg_root_mean_squared_error", cv=10)
return (-sum(cv_score)/len(cv_score))
# Time duration of HYPEROPT.
rstate = np.random.default_rng(1) # Reproducibility.
t5 = time()
# return_argmin=False is essential for obtaining the correct best hyperparameters: https://github.com/hyperopt/hyperopt/issues/530
params_hyperopt_more_best = fmin(fn = objective_hyperopt_more, space = params_hyperopt_more, algo = tpe.suggest, max_evals = 300, rstate=rstate, return_argmin=False)
t6 = time()
print("Time elapsed in training: %f" % (t6-t5))
# 100% ... 300/300 [38:03<00:00, 7.61s/trial, best loss: 12.154993299522264]
# Time elapsed in training: 2283.569938
# Create XGB using HYPEROPTS's best found hyperparameters.
xgb_ordinal_hyperopt_more_best = xgb.XGBRegressor(**params_hyperopt_more_best, objective="reg:squarederror", verbosity=0, nthread=-1, seed=1)
xgb_ordinal_hyperopt_more_best.fit(trainX_ordinal, trainy)
trainPreds = xgb_ordinal_hyperopt_more_best.predict(trainX_ordinal)
testPreds = xgb_ordinal_hyperopt_more_best.predict(testX_ordinal)
print("Best HYPEROPT hyperparameters: ")
print(params_hyperopt_more_best)
# Best HYPEROPT hyperparameters:
# {'colsample_bylevel': 0.7959522883412514, 'colsample_bynode': 0.5887365734644787, 'colsample_bytree': 0.5206615408214966, 'gamma': 4.2522350513885865,
# 'learning_rate': 0.24702299052479343, 'max_depth': 18, 'min_child_weight': 5, 'n_estimators': 854, 'reg_alpha': 5.139231471392408,
# 'reg_lambda': 9.537270700292027, 'subsample': 0.8959962452852309}
print("Train RMSE: %f" % (np.sqrt(mean_squared_error(trainy, trainPreds))))
print("Test RMSE: %f" % (np.sqrt(mean_squared_error(testy, testPreds))))
print("Train Accuracy: %f" % (r2_score(trainy, trainPreds)))
print("Test Accuracy: %f" % (r2_score(testy, testPreds)))
# Train RMSE: 10.285699
# Test RMSE: 11.579282
# Train Accuracy: 0.964786
# Test Accuracy: 0.956611
xgb_ordinal_hyperopt_more_best.save_model("./results/models/trained/XGB_ord_hyperopt_more.json")
print("=====================================================================================================================================================================")
print("(5)")
print("XGB - Trieste + Gaussian Process (GPFlow) Bayesian Optimisation, Same Hyperparameters as Grid/Random Search")
print("")
def objective_trieste(space):
"""
Objective function, taking set of hyperparameters and returning mean validation accuracy of k-fold cross-validation.
"""
cv_scores = []
for i in range(0,space.numpy().shape[0]):
# Hyper-parameters for Trieste Bayesian Optimisation (with GP).
# Use same hyperparameters and ranges as for grid/random search for fairness of comparison.
xgb_ordinal_trieste = xgb.XGBRegressor(
**{"gamma": space.numpy()[i][0],
"reg_alpha": space.numpy()[i][1],
"subsample": space.numpy()[i][2],
"colsample_bytree": space.numpy()[i][3],
"learning_rate": space.numpy()[i][4],
"max_depth": space.numpy()[i][5].astype(int),
"min_child_weight": space.numpy()[i][6].astype(int),
"n_estimators": space.numpy()[i][7].astype(int),
},
objective="reg:squarederror", verbosity=0, nthread=-1, seed=1)
xgb_ordinal_trieste.fit(trainX_ordinal, trainy)
cv_k_scores = cross_val_score(xgb_ordinal_trieste, trainX_ordinal, trainy, scoring="neg_root_mean_squared_error", cv=10)
cv_scores.append( [-sum(cv_k_scores)/len(cv_k_scores)] )
return tf.convert_to_tensor(cv_scores, dtype=tf.float64, dtype_hint=None, name=None)
# Time duration of Trieste. Give Trieste only 100 evaluations (compared to grid/random search's 8748) to demonstrate the superior performance due to Bayesian Optimisation.
t5 = time()
observer = mk_observer(objective=objective_trieste)
# Continuous search space/box for [gamma, reg_alpha, subsample, colsample_bytree, learning_rate]
# Discrete hyperparameters are max_depth, min_child_weight, n_estimators.
search_space = space.TaggedProductSearchSpace(
[space.Box([0.5, 0, 0.4, 0.4, 0.01], [5, 5, 1, 1, 0.5]),
space.DiscreteSearchSpace(tf.constant(np.arange(3,7+1, dtype=float).reshape(-1,1))),
space.DiscreteSearchSpace(tf.constant(np.arange(1,10+1, dtype=float).reshape(-1,1))),
space.DiscreteSearchSpace(tf.constant(np.arange(100,600+1, dtype=float).reshape(-1,1)))]
)
# Sample initial objective function points.
num_initial_points = 20
initial_query_points = search_space.sample(num_initial_points)
initial_data = observer(initial_query_points)
# Construct GPFlow model.
def build_model(data):
variance = tf.math.reduce_variance(data.observations)
# Using lengthscales approximately 20% of max possible hyperparameter values appears reasonable.
kernel = gpflow.kernels.Matern52(variance=variance, lengthscales=[1, 1, 0.2, 0.2, 0.1, 1, 2, 100])
prior_scale = tf.cast(1.0, dtype=tf.float64)
kernel.variance.prior = tfp.distributions.LogNormal(
tf.cast(-2.0, dtype=tf.float64), prior_scale
)
kernel.lengthscales.prior = tfp.distributions.LogNormal(
tf.math.log(kernel.lengthscales), prior_scale
)
gpr = gpflow.models.GPR(data.astuple(), kernel, noise_variance=1e-5)
gpflow.set_trainable(gpr.likelihood, False)
return GaussianProcessRegression(gpr, num_kernel_samples=100)
model = build_model(initial_data)
# Define Trieste optimizer and optimize hyperparameters.
trieste_bo = bayesian_optimizer.BayesianOptimizer(observer=observer, search_space=search_space)
num_steps = 80
result = trieste_bo.optimize(num_steps, initial_data, model)
dataset = result.try_get_final_dataset()
# Retrieve best hyperparameters.
query_points = dataset.query_points.numpy()
observations = dataset.observations.numpy()
arg_min_idx = tf.squeeze(tf.argmin(observations, axis=0))
best_point = query_points[arg_min_idx, :]
params_trieste_best = {"gamma": best_point[0],
"reg_alpha": best_point[1],
"subsample": best_point[2],
"colsample_bytree": best_point[3],
"learning_rate": best_point[4],
"max_depth": best_point[5].astype(int),
"min_child_weight": best_point[6].astype(int),
"n_estimators": best_point[7].astype(int),
}
t6 = time()
print("Time elapsed in training: %f" % (t6-t5))
# Time elapsed in training: 786.772372
# Create XGB using Trieste's best found hyperparameters.
xgb_ordinal_trieste_best = xgb.XGBRegressor(**params_trieste_best, objective="reg:squarederror", verbosity=0, nthread=-1, seed=1)
xgb_ordinal_trieste_best.fit(trainX_ordinal, trainy)
trainPreds = xgb_ordinal_trieste_best.predict(trainX_ordinal)
testPreds = xgb_ordinal_trieste_best.predict(testX_ordinal)
print("Best Trieste hyperparameters: ")
print(params_trieste_best)
# Best Trieste hyperparameters:
# {'gamma': 0.5, 'reg_alpha': 5.0, 'subsample': 1.0, 'colsample_bytree': 0.4, 'learning_rate': 0.5,
# 'max_depth': 7, 'min_child_weight': 10, 'n_estimators': 581}
print("Train RMSE: %f" % (np.sqrt(mean_squared_error(trainy, trainPreds))))
print("Test RMSE: %f" % (np.sqrt(mean_squared_error(testy, testPreds))))
print("Train Accuracy: %f" % (r2_score(trainy, trainPreds)))
print("Test Accuracy: %f" % (r2_score(testy, testPreds)))
# Train RMSE: 10.341544
# Test RMSE: 11.626880
# Train Accuracy: 0.964402
# Test Accuracy: 0.956253
xgb_ordinal_trieste_best.save_model("./results/models/trained/XGB_ord_trieste.json")
print("=====================================================================================================================================================================")
print("(6)")
print("XGB - Trieste + Gaussian Process (GPFlow) Bayesian Optimisation, More Hyperparameters")
print("")
def objective_trieste_more(space):
"""
Objective function, taking set of hyperparameters and returning mean validation accuracy of k-fold cross-validation.
"""
cv_scores = []
for i in range(0,space.numpy().shape[0]):
# Hyper-parameters for Trieste Bayesian Optimisation (with GP).
xgb_ordinal_trieste_more = xgb.XGBRegressor(
**{"gamma": space.numpy()[i][0],
"reg_alpha": space.numpy()[i][1],
"reg_lambda": space.numpy()[i][2],
"subsample": space.numpy()[i][3],
"colsample_bytree": space.numpy()[i][4],
"colsample_bylevel": space.numpy()[i][5],
"colsample_bynode": space.numpy()[i][6],
"learning_rate": space.numpy()[i][7],
"max_depth": space.numpy()[i][8].astype(int),
"min_child_weight": space.numpy()[i][9].astype(int),
"n_estimators": space.numpy()[i][10].astype(int),
},
objective="reg:squarederror", verbosity=0, nthread=-1, seed=1)
xgb_ordinal_trieste_more.fit(trainX_ordinal, trainy)
cv_k_scores = cross_val_score(xgb_ordinal_trieste_more, trainX_ordinal, trainy, scoring="neg_root_mean_squared_error", cv=10)
cv_scores.append( [-sum(cv_k_scores)/len(cv_k_scores)] )
return tf.convert_to_tensor(cv_scores, dtype=tf.float64, dtype_hint=None, name=None)
# Time duration of Trieste.
t5 = time()
observer = mk_observer(objective=objective_trieste_more)
# Continuous search space/box for [gamma, reg_alpha, reg_lambda, subsample, colsample_bytree, colsample_bylevel, colsample_bynode, learning_rate]
# Discrete hyperparameters are max_depth, min_child_weight, n_estimators.
search_space = space.TaggedProductSearchSpace(
[space.Box([0.05, 0, 0, 0.1, 0.1, 0.1, 0.1, 0.005], [20, 20, 20, 1, 1, 1, 1, 1]),
space.DiscreteSearchSpace(tf.constant(np.arange(3,18+1, dtype=float).reshape(-1,1))),
space.DiscreteSearchSpace(tf.constant(np.arange(0,10+1, dtype=float).reshape(-1,1))),
space.DiscreteSearchSpace(tf.constant(np.arange(10,1000+1, dtype=float).reshape(-1,1)))]
)
# Sample initial objective function points.
num_initial_points = 20
initial_query_points = search_space.sample(num_initial_points)
initial_data = observer(initial_query_points)
# Construct GPFlow model.
def build_model_more(data):
variance = tf.math.reduce_variance(data.observations)
# Using lengthscales approximately 20% of max possible hyperparameter values appears reasonable.
kernel = gpflow.kernels.Matern52(variance=variance, lengthscales=[4, 4, 4, 0.2, 0.2, 0.2, 0.2, 0.2, 3, 2, 200])
prior_scale = tf.cast(1.0, dtype=tf.float64)
kernel.variance.prior = tfp.distributions.LogNormal(
tf.cast(-2.0, dtype=tf.float64), prior_scale
)
kernel.lengthscales.prior = tfp.distributions.LogNormal(
tf.math.log(kernel.lengthscales), prior_scale
)
gpr = gpflow.models.GPR(data.astuple(), kernel, noise_variance=1e-5)
gpflow.set_trainable(gpr.likelihood, False)
return GaussianProcessRegression(gpr, num_kernel_samples=100)
model = build_model_more(initial_data)
# Define Trieste optimizer and optimize hyperparameters.
trieste_bo = bayesian_optimizer.BayesianOptimizer(observer=observer, search_space=search_space)
num_steps = 280
result = trieste_bo.optimize(num_steps, initial_data, model)
dataset = result.try_get_final_dataset()
# Retrieve best hyperparameters.
query_points = dataset.query_points.numpy()
observations = dataset.observations.numpy()
arg_min_idx = tf.squeeze(tf.argmin(observations, axis=0))
best_point_more = query_points[arg_min_idx, :]
params_trieste_more_best = {"gamma": best_point_more[0],
"reg_alpha": best_point_more[1],
"reg_lambda": best_point_more[2],
"subsample": best_point_more[3],
"colsample_bytree": best_point_more[4],
"colsample_bylevel": best_point_more[5],
"colsample_bynode": best_point_more[6],
"learning_rate": best_point_more[7],
"max_depth": best_point_more[8].astype(int),
"min_child_weight": best_point_more[9].astype(int),
"n_estimators": best_point_more[10].astype(int),
}
t6 = time()
print("Time elapsed in training: %f" % (t6-t5))
# Time elapsed in training: 3120.666984
# Create XGB using Trieste's best found hyperparameters.
xgb_ordinal_trieste_more_best = xgb.XGBRegressor(**params_trieste_more_best, objective="reg:squarederror", verbosity=0, nthread=-1, seed=1)
xgb_ordinal_trieste_more_best.fit(trainX_ordinal, trainy)
trainPreds = xgb_ordinal_trieste_more_best.predict(trainX_ordinal)
testPreds = xgb_ordinal_trieste_more_best.predict(testX_ordinal)
print("Best Trieste hyperparameters: ")
print(params_trieste_more_best)
# Best Trieste hyperparameters:
# {'gamma': 1.385229919602636, 'reg_alpha': 17.33424059817828, 'reg_lambda': 18.28910938536196, 'subsample': 0.885860728296831,
# 'colsample_bytree': 0.980718423322313, 'colsample_bylevel': 0.9448228957956979, 'colsample_bynode': 0.2456709938043513,
# 'learning_rate': 0.24339192476833982, 'max_depth': 16, 'min_child_weight': 6, 'n_estimators': 884}
print("Train RMSE: %f" % (np.sqrt(mean_squared_error(trainy, trainPreds))))
print("Test RMSE: %f" | |
self._table = {'data': None}
# build list of currently added Species.
self._species = None
self._species_list = ReturnTable.objects.filter(ret=a_return)
# self._species_list.append(_species.name)
# if (_species.has_rows()):
# self._species_saved.append(_species.name)
# self._species = _species.name
self.get_species_list()
logger.debug('ReturnSheet.__init__() - end')
# @staticmethod
# def set_licence_species(the_return):
# """
# Sets the species from the licence for the current Running Sheet.
# :return:
# """
# # TODO: create default entries for each species on the licence.
# # TODO: Each species has a defaulted Stock Activity (0 Totals).
# # TODO: Call _set_activity_from_previous to carry over Stock totals
# # for Licence reissues.
# '''
# _data = []
# new_sheet = the_return.sheet
# for species in the_return.licence.species_list:
# try:
# _data = {''}
# table_rows = new_sheet._get_table_rows(_data)
# self._return.save_return_table(species, table_rows, request)
# except AttributeError:
# continue
# '''
# pass
@property
def table(self):
"""
Running Sheet Table of data for Species. Defaults to a Species on the
Return if exists.
:return: formatted data.
"""
return self._get_activity(self._species)['data']
@property
def species(self):
"""
Species type associated with this Running Sheet of Activities.
:return:
"""
return self._species
@property
def species_list(self):
'''
Property list of species available on this return sheet.
'''
# return self.get_species_list()
logger.debug('ReturnSheet.species_list() - count {}'.format(
len(self._species_list)
))
return self.get_species_list()
@property
def activity_list(self):
"""
List of stock movement activities applicable for Running Sheet.
Format: "SA01": {
"label": "Stock",
"auto": "false",
"licence": "false",
"pay": "false",
"outward": "SA04"}
Label: Activity Description.
Auto: Flag indicating automated activity.
Licence: Flag indicating licence required for activity.
Pay: Flag indicating payment required for activity.
Inward/Outward: Transfer type with Activity Type for outward transfer.
:return: List of Activities applicable for Running Sheet.
"""
return self.ACTIVITY_OPTIONS
# todo: more generic method name for payment transfer
def process_transfer_fee_payment(self, request):
'''
Process transfer fees.
NOTE: redundant.
'''
from ledger.payments.models import BpointToken
# if self.return_ee_paid:
# return True
application = self.application
applicant = application.proxy_applicant \
if application.proxy_applicant else application.submitter
card_owner_id = applicant.id
card_token = BpointToken.objects.filter(
user_id=card_owner_id).order_by('-id').first()
if not card_token:
logger.error("No card token found for user: %s" % card_owner_id)
return False
product_lines = []
return_submission = u'Transfer of stock for {} Return {}'.format(
u'{} {}'.format(applicant.first_name, applicant.last_name),
application.lodgement_number)
oracle_code = self._return.return_type.oracle_account_code
product_lines.append({
'ledger_description': '{}'.format(self._return.id),
'quantity': 1,
'price_incl_tax': str(self._return.return_fee),
'price_excl_tax': str(calculate_excl_gst(self.licence_fee)),
'oracle_code': oracle_code
})
checkout(
request, application, lines=product_lines,
invoice_text=return_submission,
internal=True,
add_checkout_params={
'basket_owner': request.user.id,
'payment_method': 'card',
'checkout_token': card_<PASSWORD>,
}
)
try:
invoice_ref = request.session['checkout_invoice']
except KeyError:
ID = self.licence_activity_id
logger.error(
"No invoice reference generated for Activity ID: %s" % ID)
return False
ReturnInvoice.objects.get_or_create(
invoice_return=self,
invoice_reference=invoice_ref
)
flush_checkout_session(request.session)
# return self.licence_fee_paid and
# send_activity_invoice_email_notification(
# application, self, invoice_ref, request)
return self.licence_fee_paid
def store(self, request):
"""
Save the current state of this Return Sheet.
:param request:
:return:
"""
for species in self.species_list:
_data = request.data.get(species)
if not _data:
continue
try:
# _data = request.data.get(species).encode('utf-8')
_data = ast.literal_eval(_data) # ast should convert to tuple.
table_rows = self._get_table_rows(_data)
self._return.save_return_table(species, table_rows, request)
except AttributeError as e:
logger.info('ReturnSheet.store() ID: {0} {1} - {2}'.format(
self._return.id, species, e
))
continue
self._add_transfer_activity(request)
def set_species(self, _species):
"""
Sets the species for the current Running Sheet.
:param _species:
:return:
"""
self._species = _species
# self._species_list.add(_species)
def get_species(self):
"""
Gets the species for the current Running Sheet.
:return:
"""
return self._species
def get_species_saved(self):
'''
Getter for saved species on the Return for this sheet.
{
'S000001': 'Western Grey Kangaroo', 'S000002': 'Western Red Kangaroo',
'S000003': 'Blue Banded Bee', 'S000004': 'Orange-Browed Resin Bee'
}
:return: list of species saved on the return.
'''
logger.debug('ReturnSheet.get_species_saved() - start')
new_list = {}
util = ReturnSpeciesUtility(self._return)
ordered = self._species_list.order_by('name')
for _species in ordered:
if (_species.has_rows()):
name_str = util.get_species_name_from_id(_species.name)
new_list[_species.name] = name_str
self._species_saved.append(new_list)
logger.debug('ReturnSheet.get_species_saved() - end')
return new_list
def get_species_list(self):
'''
Getter for species available on the Return for this sheet.
{
'S000001': 'Western Grey Kangaroo', 'S000002': 'Western Red Kangaroo',
'S000003': 'Blue Banded Bee', 'S000004': 'Orange-Browed Resin Bee'
}
:return: list of species available on the return.
'''
logger.debug('ReturnSheet.get_species_list() - start')
available_list = {}
util = ReturnSpeciesUtility(self._return)
for _species in self._species_list:
self._species = _species.name
if (_species.has_rows()):
self._species = _species.name
break
name_str = util.get_species_name_from_id(self._species)
available_list[self._species] = name_str
for _species in self._species_list:
name_str = util.get_species_name_from_id(_species.name)
available_list[_species.name] = name_str
# if _species.name != self._species:
# name_str = util.get_species_name_from_id(_species.name)
# available_list[_species.name] = name_str
logger.debug('ReturnSheet.get_species_list() - end')
return available_list
def get_species_list_from_tsc(self):
'''
Getter for species available on the Return for this sheet. The species
are taken from TSC server (redundant).
{
'S000001': 'Western Grey Kangaroo', 'S000002': 'Western Red Kangaroo',
'S000003': 'Blue Banded Bee', 'S000004': 'Orange-Browed Resin Bee'
}
:return: List of Species.
'''
from wildlifecompliance.components.licences.models import (
LicenceSpecies
)
new_list = {}
for _species in ReturnTable.objects.filter(ret=self._return):
lic_specie = LicenceSpecies.objects.filter(
specie_id=int(_species.name)
)
lic_specie_data = lic_specie[0].data
lic_specie_name = lic_specie_data[0]['vernacular_names']
_species_detail = ReturnRow.objects.filter(return_table=_species)
if _species_detail.exists():
value = lic_specie_name
new_list[_species.name] = value
self._species = _species.name
self._species_list.append(new_list)
return new_list
def is_valid_transfer(self, req):
"""
Validate transfer request details.
:param request:
:return:
"""
is_valid = True # applying fuzzy logic.
if not req.data.get('transfer'):
return False
# _data = req.data.get('transfer').encode('utf-8')
_data = req.data.get('transfer')
_transfers = ast.literal_eval(_data)
_lic = _transfers['licence']
is_valid = \
False if not is_valid else self._is_valid_transfer_licence(_lic)
is_valid = \
False if not is_valid else self._is_valid_transfer_quantity(req)
return is_valid
def _get_activity(self, _species_id):
"""
Get Running Sheet activity for the movement of Species stock.
:return:
formatted data {
'name': 'speciesId',
'data': [{'date': '2019/01/23', 'activity': 'SA01', ..., }]}
"""
self._species = _species_id
for resource in self._return.return_type.resources:
_resource_name = _species_id
_schema = Schema(resource.get('schema'))
try:
_r_table = self._return.returntable_set.get(
name=_resource_name)
rows = [
_r_row.data for _r_row in _r_table.returnrow_set.all()
]
_schema.set_field_for(rows) # Add missing schema fields.
_schema.rows_validator(rows)
self._table['data'] = rows
self._table['echo'] = 1
self._table['totalRecords'] = str(rows.__len__())
self._table['totalDisplayRecords'] = str(rows.__len__())
except ReturnTable.DoesNotExist:
self._table = self._NO_ACTIVITY
return self._table
def _get_table_rows(self, _data):
"""
Gets the formatted row of data from Species data.
:param _data:
:return:
"""
by_column = dict([])
# by_column is of format {'col_header':[row1_val, row2_val,...],...}
key_values = []
num_rows = 0
# if isinstance(_data, tuple):
# for key in _data[0].keys():
# for cnt in range(_data.__len__()):
# key_values.append(_data[cnt][key])
# by_column[key] = key_values
# key_values = []
# num_rows = len(list(by_column.values())[0])\
# if len(by_column.values()) > 0 else 0
# else:
# for key in _data[0].keys():
# by_column[key] = _data[0][key]
# num_rows = num_rows + 1
for key in _data[0].keys():
for cnt in range(_data.__len__()):
key_values.append(_data[cnt][key])
by_column[key] = key_values
key_values = []
num_rows = len(list(by_column.values())[0])\
if len(by_column.values()) > 0 else 0
rows = []
for row_num in range(num_rows):
row_data = {}
# if num_rows > 1:
# for key, value in by_column.items():
# row_data[key] = value[row_num]
# else:
# row_data = by_column
for key, value in by_column.items():
row_data[key] = value[row_num]
# filter empty rows.
is_empty = True
for value in row_data.values():
if value and len(value.strip()) > 0:
is_empty = False
break
if not is_empty:
row_data['rowId'] = str(row_num)
rows.append(row_data)
return rows
def _get_licence_return(self, licence_no):
"""
Method to retrieve Return with Running Sheet from a Licence No.
:param licence_no:
:return: a Return object.
"""
TYPE = ReturnType.FORMAT_SHEET
try:
return Return.objects.filter(licence__licence_number=licence_no,
return_type__data_format=TYPE
).first()
except Return.DoesNotExist:
raise ValidationError({'error': 'Error exception.'})
def _add_transfer_activity(self, request):
"""
Add transfer activity to a validated receiving licence return.
:param request:
:return:
"""
if not request.data.get('transfer'):
return False
# _data = request.data.get('transfer').encode('utf-8')
_data = request.data.get('transfer')
_transfers = ast.literal_eval(_data)
if isinstance(_transfers, tuple):
for transfer in _transfers:
a_transfer = ReturnActivity.factory(transfer)
a_transfer.store_transfer_activity(
transfer['species_id'], request, self._return)
else:
a_transfer = ReturnActivity.factory(_transfers)
a_transfer.store_transfer_activity(
_transfers['species_id'], request, self._return)
def _is_valid_transfer_licence(self, _licence):
"""
Method to check if licence is current.
:return: boolean
"""
return True if self._get_licence_return(_licence) else False
def _is_valid_transfer_quantity(self, request):
"""
Method to check transfer transfer quantity does not exceed total.
:param request:
:return: boolean
"""
# TODO: This validation is not completed.
if not request.data.get('transfer'):
return False
# data = request.data.get('transfer').encode('utf-8')
data = request.data.get('transfer')
ast.literal_eval(data)
# quantity = transfers['qty']
# species_id = transfers['transfer']
'''
return_table = ReturnTable.objects.get(
name=species, ret=to_return)[0]
rows = ReturnRow.objects.filter(return_table=return_table)
# optimistic load of rows.
table_rows = []
r_exists = False
total = 0
for r in rows: # update total and status for accepted activity.
if r.data[self._ACTIVITY_DATE] == self.date:
r_exists = True
| |
in the entire bot",
cog_name="developer"
)
@is_developer()
async def disable_command(self, ctx, cmds: Greedy[CommandConverter]=None):
"""Allows a developer to disable a command in the whole bot
:param ctx: The context of where the message was sent
:param cmd: The command to disable
"""
# Check if there is no command to enable
if not cmds:
await ctx.send(
embed=get_error_message("You need to specify the command(s) to diable.")
)
# There is a command to enable
else:
# Check that it's a valid command in the bot
messages = []
for cmd in cmds:
if isinstance(cmd, str):
messages.append(f"**{cmd} is not a valid command!**")
# The command is valid, enable it if possible
else:
disabled = await database.bot.disable_command(cmd.qualified_name)
if not disabled:
messages.append(f"__`{cmd.qualified_name}` is already disabled!__")
else:
messages.append(f"*`{cmd.qualified_name}` has been disabled*")
await ctx.send(embed = Embed(
title = "Globally Disable Commands",
description = "\n".join(messages),
colour = await get_embed_color(ctx.author)
))
@command(
name="globallyEnableCog",
aliases=["geco", "gEnableC"],
description="Enables a specified cog in the whole bot",
cog_name="developer"
)
@is_developer()
async def enable_cog(self, ctx, cog_name=None):
"""Enables a specified cog in the whole bot
:param cog_name: The cog to enable
"""
# Check if there is no command to disable
if not cog_name:
await ctx.send(
embed=get_error_message("You need to specify the cog to disable.")
)
# There is a command to enable
else:
# Check that it's a valid command in the bot
cog = self.bot.get_cog(cog_name)
if not cog:
await ctx.send(
embed=get_error_message("That cog does not exist!")
)
# The command is valid, disable it if possible
else:
enabled = await database.bot.enable_cog(cog.qualified_name)
if not enabled:
await ctx.send(
embed=get_error_message("That cog is already enabled!")
)
else:
await ctx.send(
embed=Embed(
title="Cog enabled",
description="`{}` has been enabled".format(cog.qualified_name),
colour=await get_embed_color(ctx.author)
)
)
@command(
name="globallyDisableCog",
aliases=["gdco", "gDisableC"],
description="Disables a specified cog in the entire bot",
cog_name="developer"
)
@is_developer()
async def disable_cog(self, ctx, cog_name=None):
"""Allows a developer to disable a cog in the whole bot
:param ctx: The context of where the message was sent
:param cmd: The cog to disable
"""
# Check if there is no cog to disable
if not cog_name:
await ctx.send(
embed=get_error_message("You need to specify the cog to disable.")
)
# There is a cog to disable
else:
# Check that it's a valid cog in the bot
cog = self.bot.get_cog(cog_name)
if not cog:
await ctx.send(
embed=get_error_message("That cog does not exist!")
)
# The cog is valid, disable it if possible
else:
disabled = await database.bot.disable_cog(cog.qualified_name)
if not disabled:
await ctx.send(
embed=get_error_message("That cog is already disabled!")
)
else:
await ctx.send(
embed=Embed(
title="Cog disabled",
description="`{}` has been disabled".format(cog.qualified_name),
colour=await get_embed_color(ctx.author)
)
)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
async def view_cases(self, ctx, specific=None, *, bugs=True, unseen_only=True):
"""Let's a user view and scroll through given case numbers pertaining to either
bug reports or suggestions
:param ctx: The context of where the message was sent
:param specific: The specific case to look at first (Default: 1)
:param bugs: Whether or not the cases passed through are bug cases
:param unseen_only: Whether or not view unseen cases only
"""
# Check if the cases are from bug reports or suggestions
case_id = "bug" if bugs else "suggestion"
if bugs:
cases = await database.case_numbers.get_bug_cases()
else:
cases = await database.case_numbers.get_suggestion_cases()
cases = cases["cases"]
# Get a list of all unseen cases in the bot
cases = {
case: cases[case]
for case in cases
if not unseen_only or not cases[case]["seen"]
}
# Check if there are any unseen bugs
if len(cases) > 0:
# Keep track of the case numbers, the current case, and
# create a lambda function to get the current case with ease
case_numbers = list(cases.keys())
current_case = 0
def get_case(specific_case):
return cases[case_numbers[specific_case]]
# Check if the user wants to start at a specific case number
# if the case number is not found, the current case number still starts
# at the lowest one
if specific is not None:
if specific in case_numbers:
current_case = case_numbers.index(specific)
else:
current_case = -1
await ctx.send(embed=get_error_message("There are no cases with the specified case number."))
# Setup an embed to view the current unseen bug case
if current_case != -1:
author = self.bot.get_user(int(get_case(current_case)["author"]))
developer = self.bot.get_user(int(get_case(current_case)["seen"])) if get_case(current_case)[
"seen"] else None
def create_bug_embed(color, *, without_reactions=True):
embed = Embed(
title="Bug (#{})".format(str(case_numbers[current_case])),
description="_ _",
colour=color,
timestamp=dict_to_datetime(get_case(current_case)["time"]),
url = GITHUB_ISSUE_URL.format(get_case(current_case)["github_issue"])
).add_field(
name="Source Type",
value=get_case(current_case)["source_type"]
).add_field(
name="Source",
value=get_case(current_case)["source"]
).add_field(
name="Bug",
value=get_case(current_case)["bug"],
inline=False
).add_field(
name="Seen?",
value="No" if developer is None else "Yes, by {}".format(str(developer))
).add_field(
name="Fixed?",
value="No" if not get_case(current_case)["fixed"] else "Yes"
)
if not without_reactions:
embed.add_field(
name = "_ _",
value = (
"React with {} to view the bug\n" +
"React with {} to mark the bug as fixed\n"
).format(CHECK_MARK, CONSIDER),
inline = False
)
return embed
def create_suggestion_embed(color, *, without_reactions=True):
embed = Embed(
title="Suggestion (#{})".format(str(case_numbers[current_case])),
description="_ _",
colour=color,
timestamp=dict_to_datetime(get_case(current_case)["time"]),
url = GITHUB_ISSUE_URL.format(get_case(current_case)["github_issue"])
).add_field(
name="Suggestion",
value=get_case(current_case)["suggestion"],
inline=False
).add_field(
name="Seen?",
value="No" if developer is None else "Yes, by {}".format(str(developer))
).add_field(
name="Considered?",
value="Not Yet" if get_case(current_case)["consideration"] is None else (
"No\n**Reason**: {}".format(get_case(current_case)["consideration"]["reason"])
if not get_case(current_case)["consideration"]["considered"] else "Yes"
)
)
if not without_reactions:
embed.add_field(
name = "_ _",
value = (
"React with {} to mark the suggestion as seen\n" +
"React with {} to consider the suggestion\n" +
"React with {} to not consider the suggestion\n"
).format(CHECK_MARK, CONSIDER, NOT_CONSIDER),
inline = False
)
return embed
if bugs:
embed = create_bug_embed(await get_embed_color(author))
embed_copy = create_bug_embed(await get_embed_color(author), without_reactions = False)
# Update the message in the Bug channel
channel = self.bot.get_channel(int(environ["BUG_CHANNEL"]))
bug_message = await channel.fetch_message(get_case(current_case)["message_id"])
else:
embed = create_suggestion_embed(await get_embed_color(author))
embed_copy = create_suggestion_embed(await get_embed_color(author), without_reactions = False)
# Update the message in the Suggestion channel
channel = self.bot.get_channel(int(environ["SUGGESTION_CHANNEL"]))
suggestion_message = await channel.fetch_message(get_case(current_case)["message_id"])
# Let the user view all the bug reports in a scrolling embed
message = await ctx.send(embed=embed_copy)
await add_scroll_reactions(message, cases)
await message.add_reaction(CHECK_MARK)
await message.add_reaction(CONSIDER)
if not bugs:
await message.add_reaction(NOT_CONSIDER)
while True:
# Wait for the user to react with what reaction they want to do
# i.e. left arrow moves to the case to the left (if any)
def check_reaction(r, u):
return (
r.message.id == message.id and
u.id == ctx.author.id and
str(r) in (SCROLL_REACTIONS + [CHECK_MARK, CONSIDER, NOT_CONSIDER])
)
done, pending = await wait([
self.bot.wait_for("reaction_add", check=check_reaction),
self.bot.wait_for("reaction_remove", check=check_reaction)
], return_when=FIRST_COMPLETED)
reaction, user = done.pop().result()
for future in pending:
future.cancel()
# Check if the reaction is the first page
if str(reaction) == FIRST_PAGE:
current_case = 0
# The reaction is the last page
elif str(reaction) == LAST_PAGE:
current_case = len(case_numbers) - 1
# The reaction is the previous page
elif str(reaction) == PREVIOUS_PAGE:
current_case -= 1 if current_case > 0 else 0
# The reaction is the next page
elif str(reaction) == NEXT_PAGE:
current_case += 1 if current_case < len(case_numbers) - 1 else 0
# The reaction is the check mark (mark case number as seen)
elif (str(reaction) == CHECK_MARK and not get_case(current_case)["seen"]) or str(reaction) in [
CONSIDER, NOT_CONSIDER]:
# Mark the case as seen
if bugs:
# Mark the bug as fixed
if str(reaction) == CONSIDER and not get_case(current_case)["fixed"]:
await fix_issue(get_case(current_case)["github_issue"])
await database.case_numbers.fix_bug(case_numbers[current_case])
await database.case_numbers.mark_bug_seen(case_numbers[current_case], ctx.author)
else:
# Consider or Don't Consider the suggestion
if str(reaction) in [CONSIDER, NOT_CONSIDER]:
# Ask the developer for the reason why the suggestion was not considered
reason = None
if str(reaction) == NOT_CONSIDER:
ask_for_reason_message = await ctx.send(embed=Embed(
title="Give a reason",
description="Specify the reason why the suggestion was not considered.",
colour=await get_embed_color(ctx.author)
))
reason_message = await self.bot.wait_for("message", check=lambda m: (
m.author.id == ctx.author.id and
m.channel.id == ctx.channel.id
))
reason = reason_message.content
await ask_for_reason_message.delete()
await reason_message.delete()
# Consider or Don't Consider the suggestion
await fix_issue(
get_case(current_case)["github_issue"],
reason = reason if | |
try:
from urllib.parse import quote_plus
except ImportError:
from urllib import quote_plus
import processout
import json
from processout.networking.request import Request
from processout.networking.response import Response
# The content of this file was automatically generated
class Card(object):
def __init__(self, client, prefill=None):
self._client = client
self._id = None
self._project = None
self._project_id = None
self._token = None
self._scheme = None
self._co_scheme = None
self._preferred_scheme = None
self._type = None
self._bank_name = None
self._brand = None
self._category = None
self._iin = None
self._last_4_digits = None
self._exp_month = None
self._exp_year = None
self._cvc_check = None
self._avs_check = None
self._name = None
self._address1 = None
self._address2 = None
self._city = None
self._state = None
self._zip = None
self._country_code = None
self._ip_address = None
self._fingerprint = None
self._token_type = None
self._metadata = None
self._expires_soon = None
self._sandbox = None
self._created_at = None
if prefill is not None:
self.fill_with_data(prefill)
@property
def id(self):
"""Get id"""
return self._id
@id.setter
def id(self, val):
"""Set id
Keyword argument:
val -- New id value"""
self._id = val
return self
@property
def project(self):
"""Get project"""
return self._project
@project.setter
def project(self, val):
"""Set project
Keyword argument:
val -- New project value"""
if val is None:
self._project = val
return self
if isinstance(val, dict):
obj = processout.Project(self._client)
obj.fill_with_data(val)
self._project = obj
else:
self._project = val
return self
@property
def project_id(self):
"""Get project_id"""
return self._project_id
@project_id.setter
def project_id(self, val):
"""Set project_id
Keyword argument:
val -- New project_id value"""
self._project_id = val
return self
@property
def token(self):
"""Get token"""
return self._token
@token.setter
def token(self, val):
"""Set token
Keyword argument:
val -- New token value"""
if val is None:
self._token = val
return self
if isinstance(val, dict):
obj = processout.Token(self._client)
obj.fill_with_data(val)
self._token = obj
else:
self._token = val
return self
@property
def scheme(self):
"""Get scheme"""
return self._scheme
@scheme.setter
def scheme(self, val):
"""Set scheme
Keyword argument:
val -- New scheme value"""
self._scheme = val
return self
@property
def co_scheme(self):
"""Get co_scheme"""
return self._co_scheme
@co_scheme.setter
def co_scheme(self, val):
"""Set co_scheme
Keyword argument:
val -- New co_scheme value"""
self._co_scheme = val
return self
@property
def preferred_scheme(self):
"""Get preferred_scheme"""
return self._preferred_scheme
@preferred_scheme.setter
def preferred_scheme(self, val):
"""Set preferred_scheme
Keyword argument:
val -- New preferred_scheme value"""
self._preferred_scheme = val
return self
@property
def type(self):
"""Get type"""
return self._type
@type.setter
def type(self, val):
"""Set type
Keyword argument:
val -- New type value"""
self._type = val
return self
@property
def bank_name(self):
"""Get bank_name"""
return self._bank_name
@bank_name.setter
def bank_name(self, val):
"""Set bank_name
Keyword argument:
val -- New bank_name value"""
self._bank_name = val
return self
@property
def brand(self):
"""Get brand"""
return self._brand
@brand.setter
def brand(self, val):
"""Set brand
Keyword argument:
val -- New brand value"""
self._brand = val
return self
@property
def category(self):
"""Get category"""
return self._category
@category.setter
def category(self, val):
"""Set category
Keyword argument:
val -- New category value"""
self._category = val
return self
@property
def iin(self):
"""Get iin"""
return self._iin
@iin.setter
def iin(self, val):
"""Set iin
Keyword argument:
val -- New iin value"""
self._iin = val
return self
@property
def last_4_digits(self):
"""Get last_4_digits"""
return self._last_4_digits
@last_4_digits.setter
def last_4_digits(self, val):
"""Set last_4_digits
Keyword argument:
val -- New last_4_digits value"""
self._last_4_digits = val
return self
@property
def exp_month(self):
"""Get exp_month"""
return self._exp_month
@exp_month.setter
def exp_month(self, val):
"""Set exp_month
Keyword argument:
val -- New exp_month value"""
self._exp_month = val
return self
@property
def exp_year(self):
"""Get exp_year"""
return self._exp_year
@exp_year.setter
def exp_year(self, val):
"""Set exp_year
Keyword argument:
val -- New exp_year value"""
self._exp_year = val
return self
@property
def cvc_check(self):
"""Get cvc_check"""
return self._cvc_check
@cvc_check.setter
def cvc_check(self, val):
"""Set cvc_check
Keyword argument:
val -- New cvc_check value"""
self._cvc_check = val
return self
@property
def avs_check(self):
"""Get avs_check"""
return self._avs_check
@avs_check.setter
def avs_check(self, val):
"""Set avs_check
Keyword argument:
val -- New avs_check value"""
self._avs_check = val
return self
@property
def name(self):
"""Get name"""
return self._name
@name.setter
def name(self, val):
"""Set name
Keyword argument:
val -- New name value"""
self._name = val
return self
@property
def address1(self):
"""Get address1"""
return self._address1
@address1.setter
def address1(self, val):
"""Set address1
Keyword argument:
val -- New address1 value"""
self._address1 = val
return self
@property
def address2(self):
"""Get address2"""
return self._address2
@address2.setter
def address2(self, val):
"""Set address2
Keyword argument:
val -- New address2 value"""
self._address2 = val
return self
@property
def city(self):
"""Get city"""
return self._city
@city.setter
def city(self, val):
"""Set city
Keyword argument:
val -- New city value"""
self._city = val
return self
@property
def state(self):
"""Get state"""
return self._state
@state.setter
def state(self, val):
"""Set state
Keyword argument:
val -- New state value"""
self._state = val
return self
@property
def zip(self):
"""Get zip"""
return self._zip
@zip.setter
def zip(self, val):
"""Set zip
Keyword argument:
val -- New zip value"""
self._zip = val
return self
@property
def country_code(self):
"""Get country_code"""
return self._country_code
@country_code.setter
def country_code(self, val):
"""Set country_code
Keyword argument:
val -- New country_code value"""
self._country_code = val
return self
@property
def ip_address(self):
"""Get ip_address"""
return self._ip_address
@ip_address.setter
def ip_address(self, val):
"""Set ip_address
Keyword argument:
val -- New ip_address value"""
self._ip_address = val
return self
@property
def fingerprint(self):
"""Get fingerprint"""
return self._fingerprint
@fingerprint.setter
def fingerprint(self, val):
"""Set fingerprint
Keyword argument:
val -- New fingerprint value"""
self._fingerprint = val
return self
@property
def token_type(self):
"""Get token_type"""
return self._token_type
@token_type.setter
def token_type(self, val):
"""Set token_type
Keyword argument:
val -- New token_type value"""
self._token_type = val
return self
@property
def metadata(self):
"""Get metadata"""
return self._metadata
@metadata.setter
def metadata(self, val):
"""Set metadata
Keyword argument:
val -- New metadata value"""
self._metadata = val
return self
@property
def expires_soon(self):
"""Get expires_soon"""
return self._expires_soon
@expires_soon.setter
def expires_soon(self, val):
"""Set expires_soon
Keyword argument:
val -- New expires_soon value"""
self._expires_soon = val
return self
@property
def sandbox(self):
"""Get sandbox"""
return self._sandbox
@sandbox.setter
def sandbox(self, val):
"""Set sandbox
Keyword argument:
val -- New sandbox value"""
self._sandbox = val
return self
@property
def created_at(self):
"""Get created_at"""
return self._created_at
@created_at.setter
def created_at(self, val):
"""Set created_at
Keyword argument:
val -- New created_at value"""
self._created_at = val
return self
def fill_with_data(self, data):
"""Fill the current object with the new values pulled from data
Keyword argument:
data -- The data from which to pull the new values"""
if "id" in data.keys():
self.id = data["id"]
if "project" in data.keys():
self.project = data["project"]
if "project_id" in data.keys():
self.project_id = data["project_id"]
if "token" in data.keys():
self.token = data["token"]
if "scheme" in data.keys():
self.scheme = data["scheme"]
if "co_scheme" in data.keys():
self.co_scheme = data["co_scheme"]
if "preferred_scheme" in data.keys():
self.preferred_scheme = data["preferred_scheme"]
if "type" in data.keys():
self.type = data["type"]
if "bank_name" in data.keys():
self.bank_name = data["bank_name"]
if "brand" in data.keys():
self.brand = data["brand"]
if "category" in data.keys():
self.category = data["category"]
if "iin" in data.keys():
self.iin = data["iin"]
if "last_4_digits" in data.keys():
self.last_4_digits = data["last_4_digits"]
if "exp_month" in data.keys():
self.exp_month = data["exp_month"]
if "exp_year" in data.keys():
self.exp_year = data["exp_year"]
if "cvc_check" in data.keys():
self.cvc_check = data["cvc_check"]
if "avs_check" in data.keys():
self.avs_check = data["avs_check"]
if "name" in data.keys():
self.name = data["name"]
if "address1" in data.keys():
self.address1 = data["address1"]
if "address2" in data.keys():
self.address2 = data["address2"]
if "city" in data.keys():
self.city = data["city"]
if "state" in data.keys():
self.state = data["state"]
if "zip" in data.keys():
self.zip = data["zip"]
if "country_code" in data.keys():
self.country_code = data["country_code"]
if "ip_address" in data.keys():
self.ip_address = data["ip_address"]
if "fingerprint" in data.keys():
self.fingerprint = data["fingerprint"]
if "token_type" in data.keys():
self.token_type = data["token_type"]
if "metadata" in data.keys():
self.metadata = data["metadata"]
if "expires_soon" in data.keys():
self.expires_soon = data["expires_soon"]
if "sandbox" in data.keys():
self.sandbox = data["sandbox"]
if "created_at" in data.keys():
self.created_at = data["created_at"]
return self
def to_json(self):
return {
"id": self.id,
"project": self.project,
"project_id": self.project_id,
"token": self.token,
"scheme": self.scheme,
"co_scheme": self.co_scheme,
"preferred_scheme": self.preferred_scheme,
"type": self.type,
"bank_name": self.bank_name,
"brand": self.brand,
"category": self.category,
"iin": self.iin,
"last_4_digits": self.last_4_digits,
"exp_month": self.exp_month,
"exp_year": self.exp_year,
"cvc_check": self.cvc_check,
"avs_check": self.avs_check,
"name": self.name,
"address1": self.address1,
"address2": self.address2,
"city": self.city,
"state": self.state,
"zip": self.zip,
"country_code": self.country_code,
"ip_address": self.ip_address,
"fingerprint": self.fingerprint,
"token_type": self.token_type,
"metadata": self.metadata,
"expires_soon": self.expires_soon,
"sandbox": self.sandbox,
"created_at": self.created_at,
}
def all(self, options={}):
"""Get all the cards.
Keyword argument:
options -- Options for the request"""
self.fill_with_data(options)
request = Request(self._client)
| |
<filename>micropsi_core/tests/test_node_netapi.py<gh_stars>100-1000
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
"""
Tests for netapi, i.e. the interface native modules will be developed against
"""
import pytest
from micropsi_core import runtime as micropsi
def prepare(fixed_nodenet):
nodenet = micropsi.get_nodenet(fixed_nodenet)
netapi = nodenet.netapi
source = netapi.create_node("Register", None, "Source")
netapi.link(source, "gen", source, "gen")
source.activation = 1
nodenet.step()
return nodenet, netapi, source
def test_node_netapi_create_register_node(fixed_nodenet):
# test register node creation
net, netapi, source = prepare(fixed_nodenet)
node = netapi.create_node("Register", None, "TestName")
# basic logic tests
assert node is not None
root_ns = netapi.get_nodespace(None)
assert node.parent_nodespace == root_ns.uid
assert node.type == "Register"
assert node.uid is not None
assert len(node.get_gate('gen').get_links()) == 0
assert len(node.get_gate('gen').activations) == 1
# frontend/persistency-oriented data dictionary test
data = node.get_data()
assert data['uid'] == node.uid
assert data['name'] == node.name
assert data['type'] == node.type
node = netapi.create_node("Register", None)
# TODO: teh weirdness, server-internally, we return uids as names, clients don't see this, confusion ensues
# assert data['name'] == node.name
def test_node_netapi_create_pipe_node(fixed_nodenet):
# test concept node generation
net, netapi, source = prepare(fixed_nodenet)
node = netapi.create_node("Pipe", None, "TestName")
# basic logic tests
assert node is not None
assert node.parent_nodespace == netapi.get_nodespace(None).uid
assert node.type == "Pipe"
assert node.uid is not None
assert len(node.get_gate('gen').get_links()) == 0
assert len(node.get_gate('gen').activations) == 1
assert len(node.get_gate('sub').get_links()) == 0
assert len(node.get_gate('sub').activations) == 1
assert len(node.get_gate('sur').get_links()) == 0
assert len(node.get_gate('sur').activations) == 1
assert len(node.get_gate('por').get_links()) == 0
assert len(node.get_gate('por').activations) == 1
assert len(node.get_gate('ret').get_links()) == 0
assert len(node.get_gate('ret').activations) == 1
assert len(node.get_gate('cat').get_links()) == 0
assert len(node.get_gate('cat').activations) == 1
assert len(node.get_gate('exp').get_links()) == 0
assert len(node.get_gate('exp').activations) == 1
# frontend/persistency-oriented data dictionary test
data = node.get_data()
assert data['uid'] == node.uid
for key in node.get_gate_types():
assert key not in data['gate_parameters']
for parameter, value in node.nodetype.gate_defaults[key].items():
assert node.get_gate(key).get_parameter(parameter) == value
assert data['name'] == node.name
assert data['type'] == node.type
node = netapi.create_node("Pipe", None)
# TODO: teh weirdness, server-internally, we return uids as names, clients don't see this, confusion ensues
# assert data['name'] == node.name
@pytest.mark.engine("dict_engine")
def test_node_netapi_create_concept_node(fixed_nodenet):
# test concept node generation
net, netapi, source = prepare(fixed_nodenet)
node = netapi.create_node("Concept", None, "TestName")
# basic logic tests
assert node is not None
assert node.parent_nodespace == netapi.get_nodespace(None).uid
assert node.type == "Concept"
assert node.uid is not None
assert len(node.get_gate('gen').get_links()) == 0
assert len(node.get_gate('gen').activations) == 1
assert len(node.get_gate('sub').get_links()) == 0
assert len(node.get_gate('sub').activations) == 1
assert len(node.get_gate('sur').get_links()) == 0
assert len(node.get_gate('sur').activations) == 1
assert len(node.get_gate('por').get_links()) == 0
assert len(node.get_gate('por').activations) == 1
assert len(node.get_gate('ret').get_links()) == 0
assert len(node.get_gate('ret').activations) == 1
assert len(node.get_gate('cat').get_links()) == 0
assert len(node.get_gate('cat').activations) == 1
assert len(node.get_gate('exp').get_links()) == 0
assert len(node.get_gate('exp').activations) == 1
assert len(node.get_gate('sym').get_links()) == 0
assert len(node.get_gate('sym').activations) == 1
assert len(node.get_gate('ref').get_links()) == 0
assert len(node.get_gate('ref').activations) == 1
# frontend/persistency-oriented data dictionary test
data = node.get_data()
assert data['uid'] == node.uid
assert data['name'] == node.name
assert data['type'] == node.type
node = netapi.create_node("Pipe", None)
# TODO: teh weirdness, server-internally, we return uids as names, clients don't see this, confusion ensues
# assert data['name'] == node.name
def test_node_netapi_create_node_in_nodespace(fixed_nodenet):
# test register node in nodespace creation
net, netapi, source = prepare(fixed_nodenet)
nodespace = netapi.create_nodespace(None, "NestedNodespace")
node = netapi.create_node("Register", nodespace.uid, "TestName")
assert node.parent_nodespace == nodespace.uid
assert node.get_data()['parent_nodespace'] == nodespace.uid
def test_node_netapi_get_nodespace_one(fixed_nodenet):
# test single nodespace querying
net, netapi, source = prepare(fixed_nodenet)
nodespace = netapi.create_nodespace(None, "TestName")
queried_nodespace = netapi.get_nodespace(nodespace.uid)
assert queried_nodespace.uid == nodespace.uid
assert queried_nodespace.name == nodespace.name
def test_node_netapi_get_nodespace_multi(fixed_nodenet):
# test nodespace listing
net, netapi, source = prepare(fixed_nodenet)
nodespace1 = netapi.create_nodespace(None, "TestName1")
nodespace2 = netapi.create_nodespace(None, "TestName2")
nodespace3 = netapi.create_nodespace(nodespace2.uid, "TestName3")
root_ns = netapi.get_nodespace(None)
queried_nodespaces = netapi.get_nodespaces(root_ns.uid)
assert len(queried_nodespaces) == 2
assert nodespace1.uid in [x.uid for x in queried_nodespaces]
assert nodespace2.uid in [x.uid for x in queried_nodespaces]
assert nodespace3.uid not in [x.uid for x in queried_nodespaces]
def test_node_netapi_get_node(fixed_nodenet):
# test register node creation
net, netapi, source = prepare(fixed_nodenet)
node = netapi.create_node("Register", None, "TestName")
queried_node = netapi.get_node(node.uid)
assert queried_node.uid == node.uid
assert queried_node.name == node.name
assert queried_node.get_data() == node.get_data()
assert queried_node.type == node.type
def test_node_netapi_get_nodes(fixed_nodenet):
# test get_nodes plain
net, netapi, source = prepare(fixed_nodenet)
node1 = netapi.create_node("Register", None, "TestName1")
node2 = netapi.create_node("Register", None, "TestName2")
nodes = netapi.get_nodes(netapi.get_nodespace(None).uid)
assert node1.uid in [n.uid for n in nodes]
assert node2.uid in [n.uid for n in nodes]
def test_node_netapi_get_nodes_by_name(fixed_nodenet):
# test get_nodes by name
net, netapi, source = prepare(fixed_nodenet)
node1 = netapi.create_node("Register", None, "TestName1")
node2 = netapi.create_node("Register", None, "TestName2")
nodes = netapi.get_nodes(netapi.get_nodespace(None).uid, node_name_prefix="TestName")
assert len(nodes) == 2
assert node1.uid in [n.uid for n in nodes]
assert node2.uid in [n.uid for n in nodes]
def test_node_netapi_get_nodes_by_nodespace(fixed_nodenet):
# test get_nodes by name and nodespace
net, netapi, source = prepare(fixed_nodenet)
nodespace = netapi.create_nodespace(None, "NestedNodespace")
node1 = netapi.create_node("Register", nodespace.uid, "TestName1")
node2 = netapi.create_node("Register", nodespace.uid, "TestName2")
nodes = netapi.get_nodes(nodespace.uid)
assert len(nodes) == 2
assert node1.uid in [n.uid for n in nodes]
assert node2.uid in [n.uid for n in nodes]
def test_node_netapi_get_nodes_by_nodetype(fixed_nodenet):
# test get_nodes by name and nodespace
net, netapi, source = prepare(fixed_nodenet)
nodespace = netapi.create_nodespace(None, "NestedNodespace")
node1 = netapi.create_node("Pipe", nodespace.uid, "TestName1")
node2 = netapi.create_node("Register", nodespace.uid, "TestName2")
nodes = netapi.get_nodes(nodetype="Register")
assert len(nodes) == 2
uids = [n.uid for n in nodes]
assert node1.uid not in uids
assert node2.uid in uids
assert source.uid in uids
def test_node_netapi_get_nodes_by_name_and_nodespace(fixed_nodenet):
# test get_nodes by name and nodespace
net, netapi, source = prepare(fixed_nodenet)
nodespace = netapi.create_nodespace(None, "NestedNodespace")
node1 = netapi.create_node("Register", None, "TestName1")
node2 = netapi.create_node("Register", nodespace.uid, "TestName2")
nodes = netapi.get_nodes(nodespace.uid, "TestName")
assert len(nodes) == 1
assert node2.uid in [n.uid for n in nodes]
def test_node_netapi_get_nodes_in_gate_field(fixed_nodenet):
# test get_nodes_in_gate_field
net, netapi, source = prepare(fixed_nodenet)
node1 = netapi.create_node("Pipe", None, "TestName1")
node2 = netapi.create_node("Pipe", None, "TestName2")
node3 = netapi.create_node("Pipe", None, "TestName3")
node4 = netapi.create_node("Pipe", None, "TestName4")
netapi.link_with_reciprocal(node1, node2, "subsur")
netapi.link_with_reciprocal(node1, node3, "subsur")
netapi.link_with_reciprocal(node1, node4, "subsur")
netapi.link_with_reciprocal(node2, node3, "porret")
nodes = netapi.get_nodes_in_gate_field(node1, "sub")
assert len(nodes) == 3
assert node2.uid in [n.uid for n in nodes]
assert node3.uid in [n.uid for n in nodes]
assert node4.uid in [n.uid for n in nodes]
def test_node_netapi_get_nodes_in_gate_field_all_links(fixed_nodenet):
# test get_nodes_in_gate_field without specifying a gate parameter
net, netapi, source = prepare(fixed_nodenet)
node1 = netapi.create_node("Pipe", None, "TestName1")
node2 = netapi.create_node("Pipe", None, "TestName2")
node3 = netapi.create_node("Pipe", None, "TestName3")
node4 = netapi.create_node("Pipe", None, "TestName4")
netapi.link_with_reciprocal(node1, node2, "subsur")
netapi.link_with_reciprocal(node1, node3, "subsur")
netapi.link_with_reciprocal(node1, node4, "subsur")
netapi.link_with_reciprocal(node2, node3, "porret")
nodes = netapi.get_nodes_in_gate_field(node2)
assert len(nodes) == 2
assert node1.uid in [n.uid for n in nodes]
assert node3.uid in [n.uid for n in nodes]
def test_node_netapi_get_nodes_in_gate_field_with_limitations(fixed_nodenet):
# test get_nodes_in_gate_field with limitations: no por links
net, netapi, source = prepare(fixed_nodenet)
node1 = netapi.create_node("Pipe", None, "TestName1")
node2 = netapi.create_node("Pipe", None, "TestName2")
node3 = netapi.create_node("Pipe", None, "TestName3")
node4 = netapi.create_node("Pipe", None, "TestName4")
netapi.link_with_reciprocal(node1, node2, "subsur")
netapi.link_with_reciprocal(node1, node3, "subsur")
netapi.link_with_reciprocal(node1, node4, "subsur")
netapi.link_with_reciprocal(node2, node3, "porret")
nodes = netapi.get_nodes_in_gate_field(node1, "sub", ["por"])
assert len(nodes) == 2
assert node3.uid in [n.uid for n in nodes]
assert node4.uid in [n.uid for n in nodes]
def test_node_netapi_get_nodes_in_gate_field_with_limitations_and_nodespace(fixed_nodenet):
# test get_nodes_in_gate_field with limitations: no por links
net, netapi, source = prepare(fixed_nodenet)
nodespace = netapi.create_nodespace(None, "NestedNodespace")
node1 = netapi.create_node("Pipe", None, "TestName1")
node2 = netapi.create_node("Pipe", None, "TestName2")
node3 = netapi.create_node("Pipe", None, "TestName3")
node4 = netapi.create_node("Pipe", nodespace.uid, "TestName4")
netapi.link_with_reciprocal(node1, node2, "subsur")
netapi.link_with_reciprocal(node1, node3, "subsur")
netapi.link_with_reciprocal(node1, node4, "subsur")
netapi.link_with_reciprocal(node2, node3, "porret")
nodes = netapi.get_nodes_in_gate_field(node1, "sub", ["por"], netapi.get_nodespace(None).uid)
assert len(nodes) == 1
assert node3.uid in [n.uid for n in nodes]
def test_node_netapi_get_nodes_in_slot_field(fixed_nodenet):
# test get_nodes_in_slot_field
net, netapi, source = prepare(fixed_nodenet)
node1 = netapi.create_node("Register", None, "TestName1")
node2 = netapi.create_node("Register", None, "TestName2")
node3 = netapi.create_node("Register", None, "TestName3")
node4 = netapi.create_node("Register", None, "TestName4")
netapi.link(node2, "gen", node1, "gen")
netapi.link(node3, "gen", node1, "gen")
netapi.link(node3, "gen", node1, "gen")
netapi.link(node4, "gen", node1, "gen")
nodes = netapi.get_nodes_in_slot_field(node1, "gen")
assert len(nodes) == 3
assert node2.uid in [n.uid for n in nodes]
assert node3.uid in [n.uid for n in nodes]
assert node4.uid in [n.uid for n in nodes]
def test_node_netapi_get_nodes_in_slot_field_all_links(fixed_nodenet):
# test get_nodes_in_slot_field without a gate parameter
net, netapi, source = prepare(fixed_nodenet)
net, netapi, source = prepare(fixed_nodenet)
node1 = netapi.create_node("Pipe", None, "TestName1")
node2 = netapi.create_node("Pipe", None, "TestName2")
node3 = netapi.create_node("Pipe", None, "TestName3")
node4 = netapi.create_node("Pipe", None, "TestName4")
netapi.link_with_reciprocal(node1, node2, "subsur")
netapi.link_with_reciprocal(node1, node3, "subsur")
netapi.link_with_reciprocal(node1, node4, "subsur")
netapi.link_with_reciprocal(node2, node3, "porret")
nodes = netapi.get_nodes_in_slot_field(node1)
assert len(nodes) == 3
assert node2.uid in [n.uid for n in nodes]
assert node3.uid in [n.uid for n in nodes]
assert node4.uid in [n.uid for n in nodes]
def test_node_netapi_get_nodes_with_nodespace_limitation(fixed_nodenet):
# test get_nodes_feed with nodespace limitation
net, netapi, source | |
logger.info('finised reading clusters from %s', h5_path)
return result_clusters, result_annotations
def delete_clusters(h5_path, label=DEFAULT_ALGO_NAME):
"""
delete the clustering results with the given label from the h5 file.
:param str h5_path: h5_path to the h5 file
:param str label: the name/label for of the clustering result, for example 'my_clusterer_run_3'.
The always present symlink 'latest' is updated to the next latest clustering result.
"""
with SharedH5File(h5_path, "r+") as file:
if 'clustering' in file:
for name, group in file['clustering'].items():
if label is None or name==label:
for sap_nr, sap_dict in group['saps'].items():
if 'clusters' in sap_dict:
logger.info('deleting clusters for sap %s in %s', sap_nr, h5_path)
del sap_dict['clusters']
_delete_clustering_group_if_empty(h5_path, label)
def _add_annotation_to_group(annotations__parent_group, annotation, user=None, **kwargs):
"""
add an annotation to the cluster in the file at h5_path, given by the clustering label, sap_nr, cluster_nr.
:param str h5_path: h5_path to the h5 file
:param str label: the label of the clustering results group
:param int sap_nr: the sap number withing the clustering results group
:param int cluster_nr: the cluster number withing the sap within the clustering results group
:param str annotation: the annotation for this cluster (can be any free form text)
:param str user: an optional user name
"""
if 'annotations' in annotations__parent_group:
annotations_group = annotations__parent_group['annotations']
else:
annotations_group = annotations__parent_group.create_group('annotations')
annotations_group.attrs['description'] = 'annotations on this cluster'
for seq_nr, ds in annotations_group.items():
if ds[0] == annotation:
if not 'cluster_nr' in kwargs or ('cluster_nr' in kwargs and ds.attrs['cluster_nr'] == kwargs['cluster_nr']):
raise ValueError('annotation "%s" already exists' % (annotation,))
seq_nr = max([int(x) for x in annotations_group.keys()])+1 if annotations_group.keys() else 0
ds = annotations_group.create_dataset(str(seq_nr), (1,), h5py.special_dtype(vlen=str), annotation)
ds.attrs['user'] = user if user else 'anonymous'
ds.attrs['timestamp'] = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
for key, value in kwargs.items():
ds.attrs[key] = value
def annotate_cluster(h5_path, label, sap_nr, cluster_nr, annotation, user=None):
"""
add an annotation to the cluster in the file at h5_path, given by the clustering label, sap_nr, cluster_nr.
:param str h5_path: h5_path to the h5 file
:param str label: the label of the clustering results group
:param int sap_nr: the sap number withing the clustering results group
:param int cluster_nr: the cluster number withing the sap within the clustering results group
:param str annotation: the annotation for this cluster (can be any free form text)
:param str user: an optional user name
"""
with SharedH5File(h5_path, "r+") as file:
if 'clustering' in file:
clustering_group = file['clustering']
if label in clustering_group:
algo_group = clustering_group[label]
saps_group = algo_group['saps']
if str(sap_nr) in saps_group:
sap_group = saps_group[str(sap_nr)]
_add_annotation_to_group(sap_group, annotation, user, cluster_nr=cluster_nr)
def delete_cluster_annotation(h5_path, sap_nr, cluster_nr, annotation_nr, label='latest'):
"""
remove the annotation_nr'th annotation for the cluster in the file at h5_path, given by the clustering label, sap_nr, cluster_nr.
:param str h5_path: h5_path to the h5 file
:param str label: the label of the clustering results group
:param int sap_nr: the sap number withing the clustering results group
:param int cluster_nr: the cluster number withing the sap within the clustering results group
:param str annotation_nr: the annotation number (index) to delete
:param str label: the label of the clustering results group
"""
with SharedH5File(h5_path, "r+") as file:
if 'clustering' in file:
clustering_group = file['clustering']
if label in clustering_group:
algo_group = clustering_group[label]
saps_group = algo_group['saps']
if str(sap_nr) in saps_group:
sap_group = saps_group[str(sap_nr)]
if 'annotations' in sap_group:
annotations_group = sap_group['annotations']
if 'annotations' in sap_group:
annotations_group = sap_group['annotations']
if str(annotation_nr) in annotations_group:
del annotations_group[str(annotation_nr)]
def annotate_clustering_results(h5_path, label, annotation, user=None):
"""
add an annotation at top level for the entire file at h5_path.
:param str h5_path: h5_path to the h5 file
:param str label: the label of the clustering results group
:param str annotation: the annotation for this cluster (can be any free form text)
:param str user: an optional user name
"""
with SharedH5File(h5_path, "r+") as file:
if 'clustering' in file:
clustering_group = file['clustering']
if label in clustering_group:
algo_group = clustering_group[label]
_add_annotation_to_group(algo_group, annotation, user)
def annotate_file(h5_path, annotation, user=None):
"""
add an annotation at top level for the entire file at h5_path.
:param str h5_path: h5_path to the h5 file
:param str annotation: the annotation for this cluster (can be any free form text)
:param str user: an optional user name
"""
with SharedH5File(h5_path, "r+") as file:
_add_annotation_to_group(file, annotation, user)
def read_file_annotations(h5_path):
"""
read the top level annotations on this file as a whole.
:param str h5_path: path to the h5 file
:return list: an annotations list with the top level annotations on this file as a whole.
annotations list = [ { 'annotation': <text>, 'user': <user>, 'timestamp: <datetime> },
{ 'annotation': <text>, 'user': <user>, 'timestamp: <datetime> },
.... ]
"""
result_annotations = []
with SharedH5File(h5_path, "r") as file:
if 'annotationss' in file:
for anno_nr, anno_ds in file['annotations'].items():
annotation = anno_ds[0]
cluster_nr = anno_ds.attrs.get('cluster_nr')
user = anno_ds.attrs.get('user')
timestamp = anno_ds.attrs.get('timestamp')
result_annotations.append({'annotation': annotation,
'user': user,
'timestamp': datetime.strptime(timestamp, '%Y-%m-%d %H:%M:%S')})
return result_annotations
def get_stations(h5_path):
with SharedH5File(h5_path, "r+") as file:
stations = set()
for sap_dict in file['measurement/saps'].values():
baselines = sap_dict['baselines'][:]
for bl in baselines:
stations.add(bl[0])
return sorted(stations)
def read_info_from_hdf5(h5_path, read_data_info=True, read_parset_info=True):
"""
Read basic info like Project, start/stoptime, stations, etc from h5 file.
:param str h5_path: h5_path to the h5 file
:param bool read_data_info: do/don't read data info (how many sap's, baselines, timestamps, subbands).
:param bool read_parset_info: do/don't read info from the parset (Project, PI, name, start/stop time, etc).
:return str: A human readable string with the requested info.
"""
result = {}
with SharedH5File(h5_path, "r") as file:
need_to_fill_info_folder_from_parset = 'measurement/info' not in file
if need_to_fill_info_folder_from_parset:
# try to convert old style file with parsets only into new files with info.
fill_info_folder_from_parset(h5_path)
if read_data_info:
result = read_hypercube(h5_path, read_visibilities=False, read_flagging=False)
if read_parset_info:
parset = read_hypercube_parset(h5_path)
if parset:
result['parset'] = parset
file_annotations = read_file_annotations(h5_path)
clusters, clustering_algorithm_annotations = read_clusters(h5_path)
return create_info_string(result, h5_path, file_annotations, clusters, clustering_algorithm_annotations)
def create_info_string(data, h5_path=None, file_annotations=None, clusters=None, cluster_annotations=None):
info = ''
try:
parset = data['parset']
if h5_path:
info += 'File : ' + os.path.basename(h5_path) + '\n'
try:
with SharedH5File(h5_path, "r") as file:
info += 'File version : ' + file['version'][0] + '\n'
except IOError:
pass
info += 'Project : ' + parset.getString('ObsSW.Observation.Campaign.name') + '\n'
info += 'Project description : ' + parset.getString('ObsSW.Observation.Campaign.title') + '\n'
info += 'Project PI : ' + parset.getString('ObsSW.Observation.Campaign.PI') + '\n'
info += 'Type : ' + parset.getString('ObsSW.Observation.processSubtype') + '\n'
info += 'SAS id : ' + parset.getString('ObsSW.Observation.otdbID') + '\n'
info += 'name : ' + parset.getString('ObsSW.Observation.Scheduler.taskName') + '\n'
info += 'start time (UTC) : ' + parset.getString('ObsSW.Observation.startTime') + '\n'
info += 'stop time (UTC) : ' + parset.getString('ObsSW.Observation.stopTime') + '\n'
try:
# try to import lofar.common.datetimeutils here and not at the top of the file
# to make this hdf5_io module as loosly coupled to other lofar code as possible
from lofar.common.datetimeutils import format_timedelta, parseDatetime
info += 'duration : ' + format_timedelta(parseDatetime(parset.getString('ObsSW.Observation.stopTime')) -
parseDatetime(parset.getString('ObsSW.Observation.startTime'))) + '\n'
except ImportError:
pass #just continue
if 'observation' in parset.getString('ObsSW.Observation.processSubtype','').lower():
info += '#Stations : ' + str(len(parset.getStringVector('ObsSW.Observation.VirtualInstrument.stationList'))) + '\n'
info += 'Stations : ' + ','.join(sorted(parset.getStringVector('ObsSW.Observation.VirtualInstrument.stationList'))) + '\n'
info += 'antenna array : ' + parset.getString('ObsSW.Observation.antennaArray') + '\n'
except:
#parset info not available
pass
if file_annotations:
for i, anno in enumerate(file_annotations):
info += 'annotation[%02d] : \'%s\', by \'%s\' at \'%s\'\n' % (i, anno['annotation'], anno['user'], anno['timestamp'].strftime('%Y-%m-%d %H:%M:%S'))
if 'saps' in data:
for sap_nr, sap_dict in data['saps'].items():
info += 'data : sap: %s, #baselines: %s, #timestamps: %s, #subbands: %s, #polarizations: %s' % (
sap_nr, len(sap_dict['baselines']), len(sap_dict['timestamps']), len(sap_dict['subbands']), len(sap_dict['polarizations'])) + '\n'
if clusters:
for sap_nr in sorted(clusters.keys()):
sap_dict = clusters[sap_nr]
sap_cluster_dict = sap_dict['clusters']
info += 'clusters : sap: %s, #clusters: %s, cluster sizes: %s' % (
sap_nr, len(sap_cluster_dict), ', '.join([str(len(sap_cluster_dict[c_nr])) for c_nr in sorted(sap_cluster_dict.keys())])) + '\n'
sap_cluster_annotation_dict = sap_dict.get('annotations', {})
for sap_cluster_nr in sorted(sap_cluster_annotation_dict.keys()):
sap_cluster_annotations = sap_cluster_annotation_dict[sap_cluster_nr]
for sap_cluster_annotation in sap_cluster_annotations:
info += 'annotations : sap: %d cluster: %d : %s %s "%s"\n' % (sap_nr, sap_cluster_nr,
sap_cluster_annotation.get('user', '<unknown>'),
sap_cluster_annotation.get('timestamp', '<unknown>'),
sap_cluster_annotation.get('annotation', '<unknown>'))
return info
def fill_info_folder_from_parset(h5_path):
try:
logger.info('fill_info_folder_from_parset for %s', h5_path)
parset = read_hypercube_parset(h5_path)
if parset is not None:
with SharedH5File(h5_path, "r+") as file:
# remove previous info if present
if 'measurement/info' in file:
del file['measurement/info']
info_group = | |
+ ' ]\n'
tag_str += indent_str + ']'
else:
tag_str += ' ]'
return tag_str
def __getitem__(self, attr_index):
'''
Returns the masked, unshifted value of the flag defined
by the descriptor: self[DESC][attr_index].
If attr_index is a string, uses self.desc['NAME_MAP'].get(attr_index)
as attr_index.
Being unshifted means that if the flag is(for example) the 5th bit
in the integer and is set, this method will return 2**(5-1) or 16.
attr_index must be an int.
Raises AttributeError if attr_index does not exist in self.desc
Raises TypeError if attr_index is not an int or string.
'''
desc = object.__getattribute__(self, "desc")
if isinstance(attr_index, str):
attr_index = desc['NAME_MAP'].get(attr_index)
elif not isinstance(attr_index, int):
raise TypeError("'attr_index' must be an int or str, not %s" %
type(attr_index))
if attr_index not in desc:
raise AttributeError("'%s' of type %s has no attribute '%s'" %
(desc.get('NAME', UNNAMED),
type(self), attr_index))
return self.data & desc[attr_index]['VALUE']
def __setitem__(self, attr_index, new_value):
'''
Sets the flag defined by the descriptor: self.desc[attr_index]
The flag is set to bool(new_value)
If attr_index is a string, uses self.desc['NAME_MAP'].get(attr_index)
as attr_index.
Raises AttributeError if attr_index does not exist in self.desc
Raises TypeError if attr_index is not an int or string.
'''
desc = object.__getattribute__(self, "desc")
if isinstance(attr_index, str):
attr_index = desc['NAME_MAP'].get(attr_index)
elif not isinstance(attr_index, int):
raise TypeError("'attr_index' must be an int or str, not %s" %
type(attr_index))
if attr_index not in desc:
raise AttributeError("'%s' of type %s has no attribute '%s'" %
(desc.get('NAME', UNNAMED),
type(self), attr_index))
mask = desc[attr_index]['VALUE']
self.data = self.data - (self.data & mask) + (mask)*bool(new_value)
def __delitem__(self, attr_index):
'''
Unsets the flag defined by the descriptor: self.desc[attr_index]
If attr_index is a string, uses self.desc['NAME_MAP'].get(attr_index)
as attr_index.
Raises AttributeError if attr_index does not exist in self.desc
Raises TypeError if attr_index is not an int or string.
'''
desc = object.__getattribute__(self, "desc")
if isinstance(attr_index, str):
attr_index = desc['NAME_MAP'].get(attr_index)
elif not isinstance(attr_index, int):
raise TypeError("'attr_index' must be an int or str, not %s" %
type(attr_index))
if attr_index not in desc:
raise AttributeError("'%s' of type %s has no attribute '%s'" %
(desc.get('NAME', UNNAMED),
type(self), attr_index))
self.data -= self.data & desc[attr_index]['VALUE']
def __getattr__(self, attr_name):
'''
Returns the attribute specified by 'attr_name'.
The attribute may either exist directly in this Block, in this Block
under an alias name stored in self.desc['NAME_MAP'], or in self.desc.
If object.__getattribute__(self, attr_name) raises an AttributeError,
then self.desc['NAME_MAP'] will be checked for attr_name in its keys.
If it exists, uses desc[desc['NAME_MAP'][attr_name]]['VALUE'] as a
bitmask to return self.data & bitmask.
If attr_name does not exist in self.desc['NAME_MAP'], self.desc will
be checked for attr_name in its keys.
If it exists, returns self.desc[attr_index]
Raises AttributeError if attr_name cant be found in any of the above.
'''
try:
return object.__getattribute__(self, attr_name)
except AttributeError:
desc = object.__getattribute__(self, "desc")
if attr_name in desc['NAME_MAP']:
return self.data & desc[desc['NAME_MAP'][attr_name]]['VALUE']
elif attr_name in desc:
return desc[attr_name]
else:
raise AttributeError("'%s' of type %s has no attribute '%s'" %
(desc.get('NAME', UNNAMED),
type(self), attr_name))
def __setattr__(self, attr_name, new_value):
'''
Sets the attribute specified by 'attr_name' to the given 'new_value'.
The attribute may either exist directly in this Block, in this Block
under an alias name stored in self.desc['NAME_MAP'], or in self.desc.
If object.__setattr__(self, attr_name, new_value) raises an
AttributeError, then self.desc['NAME_MAP'] will be checked for
attr_name in its keys.
If it exists, uses desc[desc['NAME_MAP'][attr_name]]['VALUE'] as a
bitmask to set the specified flag.
If attr_name does not exist in self.desc['NAME_MAP'], self.desc will
be checked for attr_name in its keys.
Raises AttributeError if attr_name cant be found in any of the above.
'''
try:
object.__setattr__(self, attr_name, new_value)
except AttributeError:
desc = object.__getattribute__(self, "desc")
attr_index = desc['NAME_MAP'].get(attr_name)
if attr_index is not None:
mask = desc[attr_index]['VALUE']
self.data = (self.data - (self.data & mask) +
mask*bool(new_value))
else:
raise AttributeError("'%s' of type %s has no attribute '%s'" %
(desc.get('NAME', UNNAMED),
type(self), attr_name))
def __delattr__(self, attr_name):
'''
Deletes the attribute specified by 'attr_name'.
The attribute may either exist directly in this Block, in this Block
under an alias name stored in self.desc['NAME_MAP'], or in self.desc.
If object.__delattr__(self, attr_name) raises an AttributeError,
then self.desc['NAME_MAP'] will be checked for attr_name in its keys.
If it exists, uses desc[desc['NAME_MAP'][attr_name]]['VALUE'] as a
bitmask to unset the specified flag.
If attr_name does not exist in self.desc['NAME_MAP'], self.desc will
be checked for attr_name in its keys.
Raises AttributeError if attr_name cant be found in any of the above.
'''
try:
object.__delattr__(self, attr_name)
except AttributeError:
desc = object.__getattribute__(self, "desc")
attr_index = desc['NAME_MAP'].get(attr_name)
if attr_index is not None:
# unset the flag and remove the option from the descriptor
self.data -= self.data & desc[attr_index]['VALUE']
else:
raise AttributeError("'%s' of type %s has no attribute '%s'" %
(desc.get('NAME', UNNAMED),
type(self), attr_name))
def get(self, attr_name):
'''
Sets the flag specified by 'attr_name' to True.
Raises TypeError if 'attr_name' is not a string.
'''
if not isinstance(attr_name, str):
raise TypeError("'attr_name' must be a string, not %s" %
type(attr_name))
desc = object.__getattribute__(self, "desc")
return bool(self.data & desc[desc['NAME_MAP'][attr_name]]['VALUE'])
def set(self, attr_name):
'''
Sets the flag specified by 'attr_name' to True.
Raises TypeError if 'attr_name' is not a string.
'''
if not isinstance(attr_name, str):
raise TypeError("'attr_name' must be a string, not %s" %
type(attr_name))
desc = object.__getattribute__(self, "desc")
mask = desc[desc['NAME_MAP'][attr_name]]['VALUE']
self.data = self.data - (self.data & mask) + mask
def set_to(self, attr_name, value):
'''
Sets the flag specified by 'attr_name' to bool(value).
Raises TypeError if 'attr_name' is not a string.
'''
if not isinstance(attr_name, str):
raise TypeError("'attr_name' must be a string, not %s" %
type(attr_name))
desc = object.__getattribute__(self, "desc")
mask = desc[desc['NAME_MAP'][attr_name]]['VALUE']
self.data = self.data - (self.data & mask) + mask*bool(value)
def unset(self, attr_name):
'''
Sets the flag specified by 'attr_name' to False.
Raises TypeError if 'attr_name' is not a string.
'''
if not isinstance(attr_name, str):
raise TypeError("'attr_name' must be a string, not %s" %
type(attr_name))
desc = object.__getattribute__(self, "desc")
self.data -= self.data & desc[desc['NAME_MAP'][attr_name]]['VALUE']
def parse(self, **kwargs):
'''
Parses this BoolBlock in the way specified by the keyword arguments.
If initdata is supplied, it will be cast as an int and used for
this BoolBlock 'data' attribute. If not, and rawdata or a filepath
is supplied, it will be used to reparse this BoolBlock.
If rawdata, initdata, filepath, and init_attrs are all unsupplied,
init_attrs will default to True, resetting all flags to their defaults.
If rawdata, initdata, and filepath are all unsupplied or None and
init_attrs is False, this method will do nothing.
Raises TypeError if rawdata and filepath are both supplied.
Raises TypeError if rawdata doesnt have read, seek, and peek methods.
Optional keywords arguments:
# bool:
init_attrs --- If True, resets all flags to the values under the
DEFAULT descriptor key of each flags descriptor.
Flags default to False is no DEFAULT exists.
# buffer:
rawdata ------ A peekable buffer that will be used for parsing
this BoolBlock. Defaults to None.
If supplied, do not supply 'filepath'.
# int:
root_offset -- The root offset that all rawdata reading is done from.
Pointers and other offsets are relative to this value.
Passed to the parser of this BoolBlocks FieldType.
offset ------- The initial offset that rawdata reading is done from.
Passed to the parser of this BoolBlocks FieldType.
# iterable:
initdata ----- An object able to be cast as an int using int(initdata).
Will be cast as an int and self.data will be set to it.
#str:
filepath ----- An absolute path to a file to use as rawdata to parse
this BoolBlock. If supplied, do not supply 'rawdata'.
'''
initdata = kwargs.pop('initdata', None)
if isinstance(initdata, DataBlock):
self.data = int(initdata.data)
return
elif initdata is not None:
self.data = int(initdata)
return # return early
writable = kwargs.pop('writable', False)
with get_rawdata_context(writable=writable, **kwargs) as rawdata:
if rawdata is not None:
# parse the Block from raw data
try:
desc = object.__getattribute__(self, "desc")
kwargs.update(desc=desc, node=self, rawdata=rawdata)
kwargs.pop('filepath', None)
desc['TYPE'].parser(**kwargs)
return # return early
except Exception as e:
a = e.args[:-1]
e_str = "\n"
try:
e_str = e.args[-1] + | |
<filename>zipline/pipeline/mixins.py
"""
Mixins classes for use with Filters and Factors.
"""
from textwrap import dedent
from numpy import (
array,
full,
recarray,
vstack,
)
from pandas import NaT as pd_NaT
from zipline.errors import (
WindowLengthNotPositive,
UnsupportedDataType,
NoFurtherDataError,
)
from zipline.utils.context_tricks import nop_context
from zipline.utils.input_validation import expect_types
from zipline.utils.sharedoc import (
format_docstring,
PIPELINE_ALIAS_NAME_DOC,
PIPELINE_DOWNSAMPLING_FREQUENCY_DOC,
)
from zipline.utils.pandas_utils import nearest_unequal_elements
from .downsample_helpers import (
select_sampling_indices,
expect_downsample_frequency,
)
from .sentinels import NotSpecified
from .term import Term
class PositiveWindowLengthMixin(object):
"""
Validation mixin enforcing that a Term gets a positive WindowLength
"""
def _validate(self):
super(PositiveWindowLengthMixin, self)._validate()
if not self.windowed:
raise WindowLengthNotPositive(window_length=self.window_length)
class SingleInputMixin(object):
"""
Validation mixin enforcing that a Term gets a length-1 inputs list.
"""
def _validate(self):
super(SingleInputMixin, self)._validate()
num_inputs = len(self.inputs)
if num_inputs != 1:
raise ValueError(
"{typename} expects only one input, "
"but received {num_inputs} instead.".format(
typename=type(self).__name__,
num_inputs=num_inputs
)
)
class StandardOutputs(object):
"""
Validation mixin enforcing that a Term cannot produce non-standard outputs.
"""
def _validate(self):
super(StandardOutputs, self)._validate()
if self.outputs is not NotSpecified:
raise ValueError(
"{typename} does not support custom outputs,"
" but received custom outputs={outputs}.".format(
typename=type(self).__name__,
outputs=self.outputs,
)
)
class RestrictedDTypeMixin(object):
"""
Validation mixin enforcing that a term has a specific dtype.
"""
ALLOWED_DTYPES = NotSpecified
def _validate(self):
super(RestrictedDTypeMixin, self)._validate()
assert self.ALLOWED_DTYPES is not NotSpecified, (
"ALLOWED_DTYPES not supplied on subclass "
"of RestrictedDTypeMixin: %s." % type(self).__name__
)
if self.dtype not in self.ALLOWED_DTYPES:
raise UnsupportedDataType(
typename=type(self).__name__,
dtype=self.dtype,
)
class CustomTermMixin(object):
"""
Mixin for user-defined rolling-window Terms.
Implements `_compute` in terms of a user-defined `compute` function, which
is mapped over the input windows.
Used by CustomFactor, CustomFilter, CustomClassifier, etc.
"""
ctx = nop_context
def __new__(cls,
inputs=NotSpecified,
outputs=NotSpecified,
window_length=NotSpecified,
mask=NotSpecified,
dtype=NotSpecified,
missing_value=NotSpecified,
ndim=NotSpecified,
**kwargs):
unexpected_keys = set(kwargs) - set(cls.params)
if unexpected_keys:
raise TypeError(
"{termname} received unexpected keyword "
"arguments {unexpected}".format(
termname=cls.__name__,
unexpected={k: kwargs[k] for k in unexpected_keys},
)
)
return super(CustomTermMixin, cls).__new__(
cls,
inputs=inputs,
outputs=outputs,
window_length=window_length,
mask=mask,
dtype=dtype,
missing_value=missing_value,
ndim=ndim,
**kwargs
)
def compute(self, today, assets, out, *arrays):
"""
Override this method with a function that writes a value into `out`.
"""
raise NotImplementedError(
"{name} must define a compute method".format(
name=type(self).__name__
)
)
def _allocate_output(self, windows, shape):
"""
Allocate an output array whose rows should be passed to `self.compute`.
The resulting array must have a shape of ``shape``.
If we have standard outputs (i.e. self.outputs is NotSpecified), the
default is an empty ndarray whose dtype is ``self.dtype``.
If we have an outputs tuple, the default is an empty recarray with
``self.outputs`` as field names. Each field will have dtype
``self.dtype``.
This can be overridden to control the kind of array constructed
(e.g. to produce a LabelArray instead of an ndarray).
"""
missing_value = self.missing_value
outputs = self.outputs
if outputs is not NotSpecified:
out = recarray(
shape,
formats=[self.dtype.str] * len(outputs),
names=outputs,
)
out[:] = missing_value
else:
out = full(shape, missing_value, dtype=self.dtype)
return out
def _format_inputs(self, windows, column_mask):
inputs = []
for input_ in windows:
window = next(input_)
if window.shape[1] == 1:
# Do not mask single-column inputs.
inputs.append(window)
else:
inputs.append(window[:, column_mask])
return inputs
def _compute(self, windows, dates, assets, mask):
"""
Call the user's `compute` function on each window with a pre-built
output array.
"""
format_inputs = self._format_inputs
compute = self.compute
params = self.params
ndim = self.ndim
shape = (len(mask), 1) if ndim == 1 else mask.shape
out = self._allocate_output(windows, shape)
with self.ctx:
for idx, date in enumerate(dates):
# Never apply a mask to 1D outputs.
out_mask = array([True]) if ndim == 1 else mask[idx]
# Mask our inputs as usual.
inputs_mask = mask[idx]
masked_assets = assets[inputs_mask]
out_row = out[idx][out_mask]
inputs = format_inputs(windows, inputs_mask)
compute(date, masked_assets, out_row, *inputs, **params)
out[idx][out_mask] = out_row
return out
def graph_repr(self):
"""Short repr to use when rendering Pipeline graphs."""
# Graphviz interprets `\l` as "divide label into lines, left-justified"
return type(self).__name__ + ':\\l window_length: %d\\l' % \
self.window_length
class LatestMixin(SingleInputMixin):
"""
Mixin for behavior shared by Custom{Factor,Filter,Classifier}.
"""
window_length = 1
def compute(self, today, assets, out, data):
out[:] = data[-1]
def _validate(self):
super(LatestMixin, self)._validate()
if self.inputs[0].dtype != self.dtype:
raise TypeError(
"{name} expected an input of dtype {expected}, "
"but got {actual} instead.".format(
name=type(self).__name__,
expected=self.dtype,
actual=self.inputs[0].dtype,
)
)
def graph_repr(self):
return "Latest"
class AliasedMixin(SingleInputMixin):
"""
Mixin for aliased terms.
"""
def __new__(cls, term, name):
return super(AliasedMixin, cls).__new__(
cls,
inputs=(term,),
outputs=term.outputs,
window_length=0,
name=name,
dtype=term.dtype,
missing_value=term.missing_value,
ndim=term.ndim,
window_safe=term.window_safe,
)
def _init(self, name, *args, **kwargs):
self.name = name
return super(AliasedMixin, self)._init(*args, **kwargs)
@classmethod
def _static_identity(cls, name, *args, **kwargs):
return (
super(AliasedMixin, cls)._static_identity(*args, **kwargs),
name,
)
def _compute(self, inputs, dates, assets, mask):
return inputs[0]
def __repr__(self):
return '{type}({inner}, name={name!r})'.format(
type=type(self).__name__,
inner=self.inputs[0].recursive_repr(),
name=self.name,
)
def graph_repr(self):
"""Short repr to use when rendering Pipeline graphs."""
return self.name
@classmethod
def make_aliased_type(cls, other_base):
"""
Factory for making Aliased{Filter,Factor,Classifier}.
"""
docstring = dedent(
"""
A {t} that names another {t}.
Parameters
----------
term : {t}
{{name}}
"""
).format(t=other_base.__name__)
doc = format_docstring(
owner_name=other_base.__name__,
docstring=docstring,
formatters={'name': PIPELINE_ALIAS_NAME_DOC},
)
return type(
'Aliased' + other_base.__name__,
(cls, other_base),
{'__doc__': doc,
'__module__': other_base.__module__},
)
class DownsampledMixin(StandardOutputs):
"""
Mixin for behavior shared by Downsampled{Factor,Filter,Classifier}
A downsampled term is a wrapper around the "real" term that performs actual
computation. The downsampler is responsible for calling the real term's
`compute` method at selected intervals and forward-filling the computed
values.
Downsampling is not currently supported for terms with multiple outputs.
"""
# There's no reason to take a window of a downsampled term. The whole
# point is that you're re-using the same result multiple times.
window_safe = False
@expect_types(term=Term)
@expect_downsample_frequency
def __new__(cls, term, frequency):
return super(DownsampledMixin, cls).__new__(
cls,
inputs=term.inputs,
outputs=term.outputs,
window_length=term.window_length,
mask=term.mask,
frequency=frequency,
wrapped_term=term,
dtype=term.dtype,
missing_value=term.missing_value,
ndim=term.ndim,
)
def _init(self, frequency, wrapped_term, *args, **kwargs):
self._frequency = frequency
self._wrapped_term = wrapped_term
return super(DownsampledMixin, self)._init(*args, **kwargs)
@classmethod
def _static_identity(cls, frequency, wrapped_term, *args, **kwargs):
return (
super(DownsampledMixin, cls)._static_identity(*args, **kwargs),
frequency,
wrapped_term,
)
def compute_extra_rows(self,
all_dates,
start_date,
end_date,
min_extra_rows):
"""
Ensure that min_extra_rows pushes us back to a computation date.
Parameters
----------
all_dates : pd.DatetimeIndex
The trading sessions against which ``self`` will be computed.
start_date : pd.Timestamp
The first date for which final output is requested.
end_date : pd.Timestamp
The last date for which final output is requested.
min_extra_rows : int
The minimum number of extra rows required of ``self``, as
determined by other terms that depend on ``self``.
Returns
-------
extra_rows : int
The number of extra rows to compute. This will be the minimum
number of rows required to make our computed start_date fall on a
recomputation date.
"""
try:
current_start_pos = all_dates.get_loc(start_date) - min_extra_rows
if current_start_pos < 0:
raise NoFurtherDataError.from_lookback_window(
initial_message="Insufficient data to compute Pipeline:",
first_date=all_dates[0],
lookback_start=start_date,
lookback_length=min_extra_rows,
)
except KeyError:
before, after = nearest_unequal_elements(all_dates, start_date)
raise ValueError(
"Pipeline start_date {start_date} is not in calendar.\n"
"Latest date before start_date is {before}.\n"
"Earliest date after start_date is {after}.".format(
start_date=start_date,
before=before,
after=after,
)
)
# Our possible target dates are all the dates on or before the current
# starting position.
# TODO: Consider bounding this below by self.window_length
candidates = all_dates[:current_start_pos + 1]
# Choose the latest date in the candidates that is the start of a new
# period at our frequency.
choices = select_sampling_indices(candidates, self._frequency)
# If we have choices, the last choice is the first date if the
# period containing current_start_date. Choose it.
new_start_date = candidates[choices[-1]]
# Add the difference between the new and old start dates to get the
# number of rows for the new start_date.
new_start_pos = all_dates.get_loc(new_start_date)
assert new_start_pos <= current_start_pos, \
"Computed negative extra rows!"
return min_extra_rows + (current_start_pos - new_start_pos)
def _compute(self, inputs, dates, assets, mask):
"""
Compute by delegating to self._wrapped_term._compute on sample dates.
On non-sample dates, forward-fill from previously-computed samples.
"""
to_sample = dates[select_sampling_indices(dates, self._frequency)]
assert to_sample[0] == dates[0], \
"Misaligned sampling dates in %s." % type(self).__name__
real_compute = self._wrapped_term._compute
# Inputs will contain different kinds of values depending on whether or
# not we're a windowed computation.
# If we're windowed, then `inputs` is a list of iterators of ndarrays.
# If we're not windowed, then `inputs` is just a list of ndarrays.
# There are two things we care about doing with the | |
# -*- coding: utf-8 -*-
'''
The MIT License
Copyright (c) 2009 Marici, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
from django.conf import settings
from django.core import serializers
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.core.exceptions import ObjectDoesNotExist
from django.http import (HttpResponse, HttpResponseRedirect, Http404,
HttpResponseForbidden)
from django.template import loader, Context, RequestContext
from django.shortcuts import get_object_or_404, render_to_response
from django.contrib import auth
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.contrib.sites.models import Site
from maricilib.django.shortcuts import (get_object,
render_to_response_of_class)
from maricilib.django.decorators import getmethod, postmethod
from maricilib.django.core.paginator import Paginator
from maricilib.django.apps.taskqueue.queue import get_taskqueue
from maricilib.django.apps.taskqueue.tasks import SendEmailTask
from recipes.models import (UserProfile, FavoriteUser, Recipe, Comment,
DailyAction, DailyScore, Karma)
from recipes import forms
recipes_per_page = 20
users_per_page = 30
comments_per_page = 20
def user_is_active_or_404(user):
if not user.is_active:
raise Http404
def get_profile_or_create(user):
'''
指定ユーザのプロファイルを返します。なければ作成します。(この関数はビュー関数ではありません。)
'''
try:
profile = user.get_profile()
except ObjectDoesNotExist:
profile = UserProfile.objects.create(user=user)
return profile
def send_email(template_path, context, subject, to_list):
'''
メールを作成し、タスクに登録します。(この関数はビュー関数ではありません。)
'''
t = loader.get_template(template_path)
body = t.render(context)
task = SendEmailTask(dict(subject=subject, body=body,
from_address=settings.EMAIL_FROM,
to_list=to_list))
get_taskqueue().send_task(task, queue_name=settings.QUEUENAME_EMAIL)
def target_is_favorite(user, target):
'''
userに指定されたユーザがtargetに指定されたユーザをフェイバリットに登録していれば
Trueを、そうでなければFalseを返します。(この関数はビュー関数ではありません。)
'''
if not user.is_authenticated(): return False
fav = get_object(FavoriteUser, user=user, target=target)
return fav is not None
@getmethod
def new(request):
'''
メンバー新規登録用のフォームを表示します。
@context form: UserCreationFormインスタンス
@return: 200レスポンス (成功。フォームを表示)
'''
userform = forms.UserCreationForm()
return render_to_response('registration/new_form.html',
{'form': userform}, RequestContext(request))
@postmethod
def new(request):
'''
メンバーを作成します。
forms.UserCreationFormで定義された値を受け取ります。
必須項目はNewRecipeFormを参照してください。
作成が成功した場合、メール送信通知ページを表示します。
作成に成功した場合、Contextで返されるUserインスタンスは以下の状態になっています。
email: フォームから渡された値
username: emailの@を_に置換した文字列
first_name: ニックネーム
is_active: False
Contextで返されるUserProfileインスタンスは以下の状態になっています。
birth_year, prefecture: フォームから渡された値
is_female: フォームでgenderをfemaleとしていればTrue
@context created_user: Userインスタンス
@context profile: UserProfileインスタンス
@return: 200レスポンス (成功)
@return: 200レスポンス (バリデートに失敗。フォームを再表示)
'''
form = forms.UserCreationForm(request.POST)
if not form.is_valid():
return render_to_response('registration/new_form.html',
{'form': form}, RequestContext(request))
user = form.save(commit=False)
user.is_active = False
user.save()
profile = UserProfile.objects.create_pending_user(user=user,
alter_email=user.email,
**(form.get_profile_dict()))
email_validation_url(request, user, profile)
d = {'created_user': user, 'profile': profile}
return render_to_response('registration/new_sent_email.html',
d, RequestContext(request))
def email_validation_url(request, user, profile):
'''
メールアドレス確認用のメールを送信します。(この関数はビュー関数ではありません。)
'''
site = Site.objects.get_current()
validation_url = request.build_absolute_uri(
reverse('recipes-users-validate',
kwargs={'key': profile.validation_key}))
c = Context({'created_user': user, 'profile': profile,
'validation_url': validation_url})
subject = u'[%s] メンバー登録確認メール' % site.name
send_email('recipes/email/validation.txt', c, subject, [user.email])
def validate(request, key=None):
'''
ユーザをアクティベートします。
指定されたkeyを持つUserProfileインスタンスを検索し、そのuserインスタンスの
is_activeをTrueに設定します。
UserProfileインスタンスのkey_issued_atが2日以上古い場合、403エラーを返します。
@context created_user: アクティベートされたユーザ
@context profile: ユーザプロファイル
@param key: バリデーションキー
@return: 200レスポンス (成功。)
@return: 403レスポンス (UserProfileインスタンスのkey_issued_atが2日以上古い場合)
@return: 404レスポンス (該当のUserProfileインスタンスが存在しない場合)
'''
profile = get_object_or_404(UserProfile, validation_key=key)
if not profile.validate(key):
return render_to_response_of_class(HttpResponseForbidden, '403.html')
profile.disable_validation_key()
profile.save()
user = profile.user
user.is_active = True
user.save()
d = {'created_user': user, 'profile': profile}
return render_to_response('registration/new_success.html',
d, RequestContext(request))
@getmethod
@login_required
def inactivate(request):
'''
退会用確認画面を表示します。
@return: 302レスポンス (ログインしていない場合。ログインページへ)
'''
return render_to_response('registration/inactivate_confirm.html',
{}, RequestContext(request))
@postmethod
@login_required
def inactivate(request):
'''
ユーザを退会させ、ログアウトさせます。
@return: 404レスポンス (ログインしていない場合)
@return: 200レスポンス (成功)
'''
request.user.is_active = False
request.user.save()
auth.logout(request)
return render_to_response('registration/inactivate_done.html',
{}, RequestContext(request))
def show(request, user_id=None):
'''
指定されたユーザのプロフィールやレシピなどを表示します。
@param user_id: ユーザID
'''
user = get_object_or_404(User, pk=user_id)
user_is_active_or_404(user)
profile = get_profile_or_create(user)
is_favorite = target_is_favorite(request.user, user)
popular_recipes = Recipe.objects.get_popular_recipes(user)[: 5]
recent_recipes = Recipe.objects.get_recent_recipes(user)[: 10]
favorite_recipes = Recipe.objects.favorite_recipes(user)[: 5]
favorite_users = FavoriteUser.objects.get_favorite_users(user)
u_and_p = zip_profile(favorite_users, favorite_users.values('pk').query)
d = {'homeuser': user, 'profile': profile, 'is_favorite': is_favorite,
'popular_recipes': popular_recipes, 'recent_recipes': recent_recipes,
'favorite_recipes': favorite_recipes,
'favorite_user_profiles': u_and_p}
return render_to_response('recipes/user.html',
d, RequestContext(request))
@login_required
def show_home(request):
'''
アクセスユーザのホーム画面を表示します。
@return: 403レスポンス (ログインしていない場合)
'''
user = request.user
profile = get_profile_or_create(user)
recent_recipes = Recipe.objects.get_recent_recipes(user,
allow_draft=True)[:10]
comments = Comment.objects.get_owner_comments(user)[: 5]
fav_user_actions = FavoriteUser.objects.get_favorite_user_actions(
user)[:10]
d = {'profile': profile,
'recent_recipes': recent_recipes,
'comments': comments,
'favorite_user_actions': fav_user_actions}
if not DailyAction.objects.is_done(user, user, 'login'):
Karma.add_karma(user, Karma.act_login, profile)
return render_to_response('recipes/home.html',
d, RequestContext(request))
def get_user_links(user):
'''
ユーザページへのリンクを表す辞書のリストを返します。 (この関数はビュー関数ではありません)
'''
return [{'url': reverse('recipes-users-show',
kwargs={'user_id': user.id}),
'name': u'%s さん' % user.first_name}]
def show_recipe_list(request, user_id=None, page=1):
'''
指定されたユーザのレシピ一覧をpublished_atの新しい順に表示します。
下書き(is_draft == True)は表示されません。
@param user_id: ユーザID
@param page: 表示ページ
@return: 404レスポンス (ユーザが存在しない、または is_active == Falseの場合)
'''
user = get_object_or_404(User, pk=user_id)
user_is_active_or_404(user)
recipes = Recipe.objects.get_recent_recipes(user,
allow_draft=(request.user == user))
page_obj = Paginator(recipes, recipes_per_page).page(page)
return render_to_response('recipes/recipes.html',
{'title': u'%s さんのレシピ' % user.first_name,
'page_obj': page_obj, 'links': get_user_links(user)},
RequestContext(request))
@getmethod
@login_required
def edit_profile(request):
'''
プロフィール編集フォームを表示します。
@return: 302レスポンス (ログインしていない場合)
'''
user = request.user
profile = get_profile_or_create(user)
profile_form = forms.UserProfileForm(instance=profile,
initial={'prefecture': profile.prefecture,
'first_name': user.first_name,
'last_name': user.last_name})
d = {'profile': profile, 'profile_form': profile_form}
return render_to_response('recipes/user_setting_form.html',
d, RequestContext(request))
@postmethod
@login_required
def edit_profile(request):
'''
プロフィールデータを変更します。
項目はUserProfileFormの定義に従います。
@return: 200レスポンス (成功、失敗いずれも)
'''
user = request.user
profile = user.get_profile()
profile_form = forms.UserProfileForm(request.POST, request.FILES,
instance=profile)
if not profile_form.is_valid():
d = {'profile': profile, 'profile_form': profile_form}
return render_to_response('recipes/user_setting_form.html',
d, RequestContext(request))
profile = profile_form.save()
nickname = request.POST.get('nickname')
if user.first_name != nickname:
user.first_name = nickname
user.save()
messages.add_message(request, messages.INFO, u'設定を変更しました')
return HttpResponseRedirect(reverse('recipes-users-edit-profile'))
@postmethod
@login_required
def add_favorite_user(request, user_id=None):
'''
指定されたIDのユーザをログインユーザのフェイバリットに登録します。
指定IDのユーザを除くユーザだけが作成を行うことができます。(user!=request.user)
JSONで返されるFavoriteUserインスタンスは以下の状態になっています。
user: ログインユーザ
target: 指定されたIDのUserインスタンス
@param user_id: ユーザID
@return: 302レスポンス (ログインページへ。ログインしていない場合)
@return: 403レスポンス (指定されたユーザがログインユーザの場合)
@return: 404レスポンス (指定されたIDのユーザが存在しない場合)
@return: 200レスポンス (成功。FavoriteUserインスタンスをJSONで出力)
'''
target = get_object_or_404(User, pk=user_id)
user_is_active_or_404(target)
if target == request.user:
return render_to_response_of_class(HttpResponseForbidden, '403.html')
if not FavoriteUser.objects.reach_limit(request.user):
try:
fav = FavoriteUser.objects.get(user=request.user, target=target)
except:
fav = FavoriteUser.objects.create(user=request.user, target=target)
data = serializers.serialize('json', [fav])
return HttpResponse(data, mimetype='application/javascript')
else:
message = (u'申し訳ありません。'
u'フェイバリットメンバーにできるのは%s人までです。')\
% FavoriteUser.objects.limit
request.user.message_set.create(message=message)
return render_to_response_of_class(HttpResponseForbidden, '403.html')
@postmethod
@login_required
def remove_favorite_user(request, user_id=None):
'''
指定されたIDのユーザをログインユーザのフェイバリットから削除します。
該当のFavoriteUserインスタンスを作成したユーザだけが削除を行うことが出来ます。
成功した場合、削除されたFavoriteRecipeインスタンスがJSONで返ります。
@param user_id: ユーザID
@return: 302レスポンス (ログインページへ。ログインしていない場合)
@return: 403レスポンス (FavoriteUserインスタンスを作成していない場合)
@return: 404レスポンス (指定されたIDのユーザをフェイバリットにしていない場合)
@return: 200レスポンス (成功。FavoriteUserインスタンスをJSONで出力)
'''
target = get_object_or_404(User, pk=user_id)
if target == request.user:
return render_to_response_of_class(HttpResponseForbidden, '403.html')
try:
fav = FavoriteUser.objects.get(user=request.user, target=target)
data = serializers.serialize('json', [fav])
fav.delete()
except:
raise Http404
return HttpResponse(data, mimetype='application/javascript')
def show_favorite_recipes(request, user_id=None, page=1):
'''
指定されたユーザのフェイバリットレシピを一覧表示します。
@param user_id: ユーザID
@param page: 表示ページ
@context page_obj: object_listにクエリセットを含むPageオブジェクト
@return: 404レスポンス (指定ユーザが存在しない、またはis_active == Falseの場合)
@return: 200レスポンス (成功)
'''
user = get_object_or_404(User, pk=user_id)
user_is_active_or_404(user)
fav_recipes = Recipe.objects.favorite_recipes(user)
page_obj = Paginator(fav_recipes, recipes_per_page).page(page)
links = get_user_links(user)
links.append(dict(name=u'%s さんのフェイバリットメンバー' % user.first_name,
url=reverse('recipes-users-favorite-users-show',
kwargs={'user_id': user.id})))
d = {'title': u'%s さんのフェイバリットレシピ' % user.first_name,
'page_obj': page_obj, 'links': links}
return render_to_response('recipes/recipes.html',
d, RequestContext(request))
def show_favorite_users(request, user_id=None, page=1):
'''
指定されたユーザのフェイバリットメンバーを一覧表示します。
@param user_id: ユーザID
@param page: 表示ページ
@context favorite_users_and_profiles: ユーザおよびプロファイルを含む辞書のリスト
@return: 404レスポンス (指定ユーザが存在しない、またはis_active == Falseの場合)
@return: 200レスポンス (成功)
'''
user = get_object_or_404(User, pk=user_id)
user_is_active_or_404(user)
fav_user_ids = FavoriteUser.objects.filter(user=user).values(
'target_id').query
fav_users = User.objects.filter(pk__in=fav_user_ids)
u_and_p = zip_profile(fav_users, fav_users.values('pk').query)
links = get_user_links(user)
links.append({'name': u'%s さんのフェイバリットレシピ' % user.first_name,
'url': reverse('recipes-users-favorite-recipes-show',
kwargs={'user_id': user.id})})
d = {'homeuser': user, 'favorite_users_and_profiles': u_and_p,
'links': links}
return render_to_response('recipes/favorite_users.html',
d, RequestContext(request))
@login_required
def show_owner_comments(request, only_not_moderated=False, page=1):
'''
ログインユーザのレシピにつけられたコメント一覧を表示します。
@param only_not_moderated: 未承認のコメントのみを指定する場合True
@param page: 表示ページ
@return: 302レスポンス (ログインしていない場合。ログインページへ。)
'''
page = page or 1 # Noneが渡ることがあるため
qs = Comment.objects.filter(owner=request.user)
if only_not_moderated:
qs = qs.filter(is_moderated=False)
page_obj = Paginator(qs.order_by('-created_at'),
comments_per_page).page(page)
d = {'only_not_moderated': only_not_moderated, 'page_obj': page_obj}
return render_to_response('recipes/comments.html',
d, RequestContext(request))
@login_required
@getmethod
def change_email(request):
'''
メールアドレス変更フォームを表示します。
@return: 302レスポンス (ログインしていない場合。ログインページへ。)
'''
return render_change_email_form(request, forms.EmailChangeForm())
def render_change_email_form(request, form):
d = {'form': form,
'title': u'メールアドレスの変更',
'focus_id': 'id_email',
'text': u'新しいメールアドレスを入力してください。',
'submit_text': u'変更'}
return render_to_response('base_form.html',
d, RequestContext(request))
@login_required
@postmethod
def change_email(request):
'''
メールアドレス変更を登録します。
POSTすべき内容はEmailChangeFormに定義された項目に従います。
変更完了のためにアクセスすべきURLを作成し、新しいアドレスにメールを送信します。
@return: 302レスポンス (ログインしていない場合。ログインページへ。)
@return: 200レスポンス (成功またはフォーム内容が不正な場合)
'''
form = forms.EmailChangeForm(request.POST)
if form.is_valid():
new_email = form.cleaned_data['email']
profile = request.user.get_profile()
profile.change_email(new_email)
profile.save()
email_change_email(request, request.user, profile)
message = u'%s にメールを送信しました。\n' % new_email + \
u'メールに含まれるURLをクリックすると、変更が完了します。'
d = {'title': u'メールアドレスの変更',
'text': message}
return render_to_response('base_message.html',
d, RequestContext(request))
else:
return render_change_email_form(request, form)
def email_change_email(request, user, profile):
'''
メールをタスクに登録します。 (この関数はビュー関数ではありません)
'''
site = Site.objects.get_current()
validation_url = request.build_absolute_uri(
reverse('recipes-users-validate-email',
kwargs={'user_id': user.id,
'key': profile.validation_key}))
send_email('registration/email/change_email.txt',
Context({'user': user, 'validation_url': validation_url}),
u'[%s] アドレス変更確認メール' % site.name,
[profile.pending_email])
def validate_change_email(request, user_id=None, key=None):
'''
メールアドレス変更を行います。
@param user_id: 変更を行うユーザのID
@param key: 変更のためのバリデーションキー
@return: 403レスポンス (キーが正しくない場合)
@return: 200レスポンス (成功)
'''
user = get_object_or_404(User, pk=user_id)
profile = user.get_profile()
if profile.validate(key):
profile.disable_validation_key()
profile.save()
user.email = profile.pending_email
user.username = forms.email_to_username(user.email)
user.save()
d = {'title': u'メールアドレスの変更完了',
'text': u'メールアドレスの変更が完了しました。'}
return render_to_response('base_message.html',
d, RequestContext(request))
else:
return render_to_response_of_class(HttpResponseForbidden, '403.html')
@getmethod
def login(request):
'''
ログインフォームを表示します。
'''
d = {'form': forms.AuthenticationForm(),
'next': request.REQUEST.get('next', '')}
return render_to_response('registration/login.html', d,
RequestContext(request))
@postmethod
def login(request):
'''
アクセスユーザのログインを行います。
項目はAuthenticationFormの定義に従います。
@return: 200レスポンス (フォーム内容が不正な場合)
@return: 302レスポンス (成功。ホーム画面またはnextで指定されたパスへ)
'''
form = forms.AuthenticationForm(request.POST)
if form.is_valid():
user = form.get_user()
if user is not None and user.is_active:
auth.login(request, user)
redirect_to = request.REQUEST.get('next', '')
if not redirect_to or '//' in redirect_to or ' ' in redirect_to:
redirect_to = settings.LOGIN_REDIRECT_URL
return HttpResponseRedirect(redirect_to)
return render_to_response('registration/login.html', {'form': form},
RequestContext(request))
def show_active_users(request):
'''
アクティブメンバーの一覧を表示します。
アクティブメンバーとは、最近五週間でDailyScoreの上位に入ったユーザのことです。
@context users_and_profiles: ユーザおよびプロファイルを含む辞書のリスト
@return: 200レスポンス (成功)
| |
# modules_torch.py
import os, sys, pickle, time, shutil, logging, copy
import math, numpy, scipy
numpy.random.seed(545)
import torch
torch.manual_seed(545)
from modules import make_logger
'''
This file contains handy modules of using PyTorch
'''
########################
# PyTorch-based Layers #
########################
class Tensor_Reshape(torch.nn.Module):
def __init__(self, current_layer_params):
super().__init__()
self.params = current_layer_params
def update_layer_params(self):
input_dim_seq = self.params['input_dim_seq']
input_dim_values = self.params['input_dim_values']
expect_input_dim_seq = self.params['expect_input_dim_seq']
# First, check if change is needed at all; pass on if not
if input_dim_seq == expect_input_dim_seq:
self.params['expect_input_dim_values'] = input_dim_values
return self.params
else:
# Make anything into ['S', 'B', 'T', 'D']
if input_dim_seq == ['S', 'B', 'T', 'D']:
# Do nothing, pass on
temp_input_dim_values = input_dim_values
elif input_dim_seq == ['S', 'B', 'D']:
# Add T and make it 1
temp_input_dim_values = {'S':input_dim_values['S'], 'B':input_dim_values['B'], 'T':1, 'D':input_dim_values['D']}
# Then, make from ['S', 'B', 'T', 'D']
if expect_input_dim_seq == ['S', 'B', 'D']:
# So basically, stack and remove T; last dimension D -> T * D
self.params['expect_input_dim_values'] = {'S':temp_input_dim_values['S'], 'B':temp_input_dim_values['B'], 'T':0, 'D':temp_input_dim_values['T']*temp_input_dim_values['D'] }
elif expect_input_dim_seq == ['S','B','1','T']:
# If D>1, that is stacked waveform, so flatten it
# So basically, stack and remove D; T -> T * D
self.params['expect_input_dim_values'] = {'S':temp_input_dim_values['S'], 'B':temp_input_dim_values['B'], 'T':temp_input_dim_values['T']*temp_input_dim_values['D'],'D':0 }
elif expect_input_dim_seq == ['S','B','T']:
# If D>1, that is stacked waveform, so flatten it
# So basically, stack and remove D; T -> T * D
self.params['expect_input_dim_values'] = {'S':temp_input_dim_values['S'], 'B':temp_input_dim_values['B'], 'T':temp_input_dim_values['T']*temp_input_dim_values['D'],'D':0 }
return self.params
def forward(self, x):
input_dim_seq = self.params['input_dim_seq']
input_dim_values = self.params['input_dim_values']
expect_input_dim_seq = self.params['expect_input_dim_seq']
expect_input_dim_values = self.params['expect_input_dim_values']
# First, check if change is needed at all; pass on if not
if input_dim_seq == expect_input_dim_seq:
return x
else:
# Make anything into ['S', 'B', 'T', 'D']
if input_dim_seq == ['S', 'B', 'T', 'D']:
# Do nothing, pass on
temp_input = x
elif input_dim_seq == ['S', 'B', 'D']:
# Add T and make it 1
temp_input_dim_values = [input_dim_values['S'], input_dim_values['B'], 1, input_dim_values['D']]
temp_input = x.view(temp_input_dim_values)
# Then, make from ['S', 'B', 'T', 'D']
if expect_input_dim_seq == ['S', 'B', 'D']:
expect_input_shape_values = [expect_input_dim_values['S'], expect_input_dim_values['B'], expect_input_dim_values['D']]
expect_input = temp_input.view(expect_input_shape_values)
elif expect_input_dim_seq == ['S','B','1','T']:
expect_input_shape_values = [expect_input_dim_values['S'], expect_input_dim_values['B'], 1, expect_input_dim_values['T']]
expect_input = temp_input.view(expect_input_shape_values)
elif expect_input_dim_seq == ['S','B','T']:
expect_input_shape_values = [expect_input_dim_values['S'], expect_input_dim_values['B'], expect_input_dim_values['T']]
expect_input = temp_input.view(expect_input_shape_values)
return expect_input
class Build_S_B_TD_Input_Layer(object):
''' This layer has only parameters, no torch.nn.module '''
''' Mainly for the prev_layer argument '''
def __init__(self, dv_y_cfg):
self.input_dim = dv_y_cfg.batch_seq_len * dv_y_cfg.feat_dim
self.params = {}
self.params["output_dim_seq"] = ['S', 'B', 'D']
self.params["output_dim_values"] = {'S':dv_y_cfg.batch_num_spk, 'B':dv_y_cfg.spk_num_seq, 'D':self.input_dim}
v = self.params["output_dim_values"]
self.params["output_shape_values"] = [v['S'], v['B'], v['D']]
class Build_NN_Layer(torch.nn.Module):
def __init__(self, layer_config, prev_layer):
super().__init__()
self.params = {}
self.params["layer_config"] = layer_config
self.params["type"] = layer_config['type']
self.params["size"] = layer_config['size']
self.params["input_dim_seq"] = prev_layer.params["output_dim_seq"]
self.params["input_dim_values"] = prev_layer.params["output_dim_values"]
# To be set per layer type; mostly for definition of h
self.params["expect_input_dim_seq"] = []
self.params["expect_input_dim_values"] = {}
self.params["output_dim_seq"] = []
self.params["output_dim_values"] = {}
construct_layer = getattr(self, self.params["layer_config"]["type"])
construct_layer()
''' Dropout '''
try:
self.params["dropout_p"] = self.params["layer_config"]['dropout_p']
except KeyError:
self.params["dropout_p"] = 0.
return dropout_input
if self.params["dropout_p"] > 0:
self.dropout_fn = torch.nn.Dropout(p=self.params["dropout_p"])
else:
self.dropout_fn = lambda a: a # Do nothing, just return same tensor
def forward(self, x):
x = self.reshape_fn(x)
x = self.layer_fn(x)
x = self.dropout_fn(x)
return x
def ReLUDVMax(self):
self.params["expect_input_dim_seq"] = ['S','B','D']
self.reshape_fn = Tensor_Reshape(self.params)
self.params = self.reshape_fn.update_layer_params()
self.params["output_dim_seq"] = ['S', 'B', 'D']
v = self.params["expect_input_dim_values"]
self.params["output_dim_values"] = {'S': v['S'], 'B': v['B'], 'D': self.params["size"]}
input_dim = self.params['expect_input_dim_values']['D']
output_dim = self.params['output_dim_values']['D']
num_channels = self.params["layer_config"]["num_channels"]
self.layer_fn = ReLUDVMaxLayer(input_dim, output_dim, num_channels)
def LinDV(self):
self.params["expect_input_dim_seq"] = ['S','B','D']
self.reshape_fn = Tensor_Reshape(self.params)
self.params = self.reshape_fn.update_layer_params()
self.params["output_dim_seq"] = ['S', 'B', 'D']
v = self.params["expect_input_dim_values"]
self.params["output_dim_values"] = {'S': v['S'], 'B': v['B'], 'D': self.params["size"]}
input_dim = self.params['expect_input_dim_values']['D']
output_dim = self.params['output_dim_values']['D']
self.layer_fn = torch.nn.Linear(input_dim, output_dim)
def Sinenet(self):
self.params["expect_input_dim_seq"] = ['S','B','1','T']
self.reshape_fn = Tensor_Reshape(self.params)
self.params = self.reshape_fn.update_layer_params()
self.params["output_dim_seq"] = ['S', 'B', 'D']
v = self.params["expect_input_dim_values"]
self.params["output_dim_values"] = {'S': v['S'], 'B': v['B'], 'D': self.params["size"]}
input_dim = self.params['expect_input_dim_values']['D']
output_dim = self.params['output_dim_values']['D']
num_channels = self.params["layer_config"]["num_channels"]
time_len = self.params['expect_input_dim_values']['T']
self.layer_fn = SinenetLayer(time_len, output_dim, num_channels)
def SinenetV1(self):
self.params["expect_input_dim_seq"] = ['S','B','1','T']
self.reshape_fn = Tensor_Reshape(self.params)
self.params = self.reshape_fn.update_layer_params()
self.params["output_dim_seq"] = ['S', 'B', 'D']
v = self.params["expect_input_dim_values"]
self.params["output_dim_values"] = {'S': v['S'], 'B': v['B'], 'D': self.params["size"]}
input_dim = self.params['expect_input_dim_values']['D']
output_dim = self.params['output_dim_values']['D']
num_channels = self.params["layer_config"]["num_channels"]
time_len = self.params['expect_input_dim_values']['T']
self.layer_fn = SinenetLayerV1(time_len, output_dim, num_channels)
self.params["output_dim_values"]['D'] += 1 # +1 to append nlf F0 values
class ReLUDVMaxLayer(torch.nn.Module):
def __init__(self, input_dim, output_dim, num_channels):
super().__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.num_channels = num_channels
self.fc_list = torch.nn.ModuleList([torch.nn.Linear(input_dim, output_dim) for i in range(self.num_channels)])
self.relu_fn = torch.nn.ReLU()
def forward(self, x):
h_list = []
for i in range(self.num_channels):
# Linear
h_i = self.fc_list[i](x)
# ReLU
h_i = self.relu_fn(h_i)
h_list.append(h_i)
h_stack = torch.stack(h_list, dim=0)
# MaxOut
h_max, _indices = torch.max(h_stack, dim=0, keepdim=False)
return h_max
class SinenetLayerIndiv(torch.nn.Module):
''' Try to build per frequency '''
def __init__(self, time_len, output_dim, num_channels):
super().__init__()
self.time_len = time_len
self.output_dim = output_dim # Total output dimension
self.num_channels = num_channels # Number of components per frequency
self.num_freq = int(output_dim / num_channels) # Number of frequency components
self.t_wav = 1./16000
self.log_f_mean = 5.02654
self.log_f_std = 0.373288
self.k_T_tensor = self.make_k_T_tensor()
self.sinenet_list = torch.nn.ModuleList()
for i in range(self.num_freq):
for j in range(self.num_channels):
self.sinenet_list.append(SinenetComponent(self.time_len, i))
def forward(self, x, nlf, tau):
lf = torch.add(torch.mul(nlf, self.log_f_std), self.log_f_mean) # S*B
f = torch.exp(lf) # S*B
# Time
t = torch.add(self.k_T_tensor, torch.neg(tau)) # 1*T + S*B*1 -> S*B*T
h_list = []
for k in range(self.output_dim):
h_k = self.sinenet_list[k](x, f, t)
h_list.append(h_k) # SB
# Need to check the stacking process
h_SBD = torch.stack(h_list, dim=2)
return h_SBD
def make_k_T_tensor(self):
# indices along time; 1*T
k_T_vec = numpy.zeros((1,self.time_len))
for i in range(self.time_len):
k_T_vec[0,i] = i
k_T_vec = k_T_vec * self.t_wav
k_T_tensor = torch.tensor(k_T_vec, dtype=torch.float, requires_grad=False)
k_T_tensor = torch.nn.Parameter(k_T_tensor, requires_grad=False)
return k_T_tensor
class SinenetComponent(torch.nn.Module):
def __init__(self, time_len, i):
super().__init__()
self.time_len = time_len
self.i = i # Multiple of fundamental frequency
self.t_wav = 1./16000
self.log_f_mean = 5.02654
self.log_f_std = 0.373288
self.a = torch.nn.Parameter(torch.Tensor(1))
self.phi = torch.nn.Parameter(torch.Tensor(1))
def forward(self, x, f, t):
# Degree in radian
i_f = torch.mul(self.i, f) # 1 * S*B*1 -> S*B*1
i_f_t = torch.mul(i_f, t) # S*B*1 * S*B*T -> S*B*T
deg = torch.add(i_f_t, self.phi) # S*B*T + 1 -> S*B*T
s = torch.sin(deg) # S*B*T
self.W = torch.mul(self.a, s) # 1 * S*B*T -> S*B*T
h_SBT = torch.mul(self.W, x) # S*B*T * S*B*T -> S*B*T
h_SB = torch.sum(h_SBT, dim=-1, keepdim=False)
return h_SB
class SinenetLayerTooBig(torch.nn.Module):
''' Intermediate tensor has dimension S*B*T*D, too big '''
def __init__(self, time_len, output_dim, num_channels):
super().__init__()
self.time_len = time_len
self.output_dim = output_dim # Total output dimension
self.num_channels = num_channels # Number of components per frequency
self.num_freq = int(output_dim / num_channels) # Number of frequency components
self.t_wav = 1./16000
self.log_f_mean = 5.02654
self.log_f_std = 0.373288
self.i_2pi_tensor = self.make_i_2pi_tensor() # D*1
self.k_T_tensor = self.make_k_T_tensor() # 1*T
self.a = torch.nn.Parameter(torch.Tensor(output_dim, 1)) # D*1
self.phi = torch.nn.Parameter(torch.Tensor(output_dim, 1)) # D*1
def forward(self, x, nlf, tau):
'''
Input dimensions
x: S*B*1*T
nlf, tau: S*B*1*1
'''
# Denorm and exp norm_log_f (S*B)
# Norm: norm_features = (features - mean_matrix) / std_matrix
# Denorm: features = norm_features * std_matrix + mean_matrix
lf = torch.add(torch.mul(nlf, self.log_f_std), self.log_f_mean) # S*B
f = torch.exp(lf) # S*B
# Time
t = torch.add(self.k_T_tensor, torch.neg(tau)) # 1*T + S*B*1*1 -> S*B*1*T
# Degree in radian
f_t = torch.mul(f, t) # S*B*1*1 * S*B*1*T -> S*B*1*T
deg = torch.nn.functional.linear(f_t, self.i_2pi_tensor, bias=self.phi)
i_f = torch.mul(self.i_2pi_tensor, f) # D*1 * S*B*1*1 -> S*B*D*1
i_f_t = torch.mul(i_f, t) # S*B*D*1 * S*B*1*T -> S*B*D*T
deg = torch.add(i_f_t, self.phi) # S*B*D*T + D*1 -> S*B*D*T
# # Degree in radian
# ft = torch.mul(f, t) # S*B*1*1 * S*B*1*T = S*B*1*T
# deg = torch.mul(self.i_2pi_tensor, deg) # D*1 * S*B*1*T = S*B*D*T
# deg = torch.add(deg, self.phi) # S*B*D*T + D*1 -> S*B*D*T
# deg = torch.mul(self.i_2pi_tensor, t) # D*1 * S*B*1*T -> S*B*D*T
# deg = torch.mul(f, deg) # S*B*1*1 * S*B*D*T = S*B*D*T
# deg = torch.add(deg, self.phi) # S*B*D*T + D*1 -> S*B*D*T
# Sine
s = torch.sin(deg) # S*B*D*T
self.W = torch.mul(self.a, s) # D*1 * S*B*D*T -> S*B*D*T
# self.W = torch.mul(self.a, torch.sin(torch.add(torch.mul(f, torch.mul(self.i_2pi_tensor, torch.add(self.k_T_tensor, torch.neg(tau)))), self.phi)))
h_SBDT = torch.mul(self.W, x) | |
{} is not 3.".format(
ads[0].serial))
return False
call_conf_id = None
for call_id in calls:
if call_id != call_ab_id and call_id != call_ac_id:
call_conf_id = call_id
if not call_conf_id:
self.log.error("Merge call fail, no new conference call id.")
return False
if not verify_incall_state(self.log, [ads[0], ads[1], ads[2]], True):
return False
# Check if Conf Call is currently active
if ads[0].droid.telecomCallGetCallState(
call_conf_id) != CALL_STATE_ACTIVE:
self.log.error(
"Call_id:{}, state:{}, expected: STATE_ACTIVE".format(
call_conf_id, ads[0].droid.telecomCallGetCallState(
call_conf_id)))
return False
self.log.info("Step5: End call on PhoneC and verify call continues.")
ads[2].droid.telecomEndCall()
time.sleep(WAIT_TIME_IN_CALL)
calls = ads[0].droid.telecomCallGetCallIds()
self.log.info("Calls in PhoneA{}".format(calls))
if num_active_calls(self.log, ads[0]) != 1:
return False
if not verify_incall_state(self.log, [ads[0], ads[1]], True):
return False
if not verify_incall_state(self.log, [ads[2]], False):
return False
self.log.info("Step6: End call on PhoneB and verify PhoneA end.")
ads[1].droid.telecomEndCall()
time.sleep(WAIT_TIME_IN_CALL)
if not verify_incall_state(self.log, [ads[0], ads[1], ads[2]], False):
return False
return True
@TelephonyBaseTest.tel_test_wrap
def test_gsm_mo_mo_add_merge_drop(self):
""" Test Conf Call among three phones.
Call from PhoneA to PhoneB, accept on PhoneB.
Call from PhoneA to PhoneC, accept on PhoneC.
On PhoneA, merge to conference call.
End call on PhoneC, verify call continues.
End call on PhoneB, verify call end on PhoneA.
Returns:
True if pass; False if fail.
"""
call_ab_id, call_ac_id = self._test_gsm_mo_mo_add_swap_x(0)
if call_ab_id is None or call_ac_id is None:
return False
return self._test_gsm_conference_merge_drop(call_ab_id, call_ac_id)
@TelephonyBaseTest.tel_test_wrap
def test_gsm_mo_mo_add_swap_once_drop_held(self):
""" Test Conf Call among three phones.
Call from PhoneA to PhoneB, accept on PhoneB.
Call from PhoneA to PhoneC, accept on PhoneC.
On PhoneA, swap active call.
End call on PhoneB, verify call continues.
End call on PhoneC, verify call end on PhoneA.
Returns:
True if pass; False if fail.
"""
ads = self.android_devices
call_ab_id, call_ac_id = self._test_gsm_mo_mo_add_swap_x(1)
if call_ab_id is None or call_ac_id is None:
return False
return self._three_phone_hangup_call_verify_call_state(
ad_hangup=ads[2],
ad_verify=ads[0],
call_id=call_ab_id,
call_state=CALL_STATE_ACTIVE,
ads_active=[ads[0], ads[1]])
@TelephonyBaseTest.tel_test_wrap
def test_gsm_mt_mt_add_merge_drop(self):
""" Test Conf Call among three phones.
Call from PhoneB to PhoneA, accept on PhoneA.
Call from PhoneC to PhoneA, accept on PhoneA.
On PhoneA, merge to conference call.
End call on PhoneC, verify call continues.
End call on PhoneB, verify call end on PhoneA.
Returns:
True if pass; False if fail.
"""
call_ab_id, call_ac_id = self._test_gsm_mt_mt_add_swap_x(0)
if call_ab_id is None or call_ac_id is None:
return False
return self._test_gsm_conference_merge_drop(call_ab_id, call_ac_id)
@TelephonyBaseTest.tel_test_wrap
def test_epdg_mo_mo_add_epdg_merge_drop_second_call_from_participant_wfc_apm_wifi_preferred_no_cep(
self):
""" Test WFC Conference Call among three phones. No CEP.
Steps:
1. PhoneA (WFC APM WiFi Preferred) call PhoneB (WFC APM WiFi Preferred), accept on PhoneB.
2. PhoneA (WFC APM WiFi Preferred) call PhoneC (WFC APM WiFi Preferred), accept on PhoneC.
3. On PhoneA, merge to conference call (No CEP).
4. End call on PhoneC, verify call continues.
5. End call on PhoneB, verify call end on PhoneA.
Expected Results:
3. Conference merged successfully.
4. Drop calls succeeded. Call between A-B continues.
5. Drop calls succeeded, all call participants drop.
Returns:
True if pass; False if fail.
TAGS: Telephony, WFC, Conference, No_CEP
Priority: 1
"""
ads = self.android_devices
tasks = [(phone_setup_iwlan,
(self.log, ads[0], True, WFC_MODE_WIFI_PREFERRED,
self.wifi_network_ssid, self.wifi_network_pass)),
(phone_setup_iwlan,
(self.log, ads[1], True, WFC_MODE_WIFI_PREFERRED,
self.wifi_network_ssid, self.wifi_network_pass)),
(phone_setup_iwlan,
(self.log, ads[2], True, WFC_MODE_WIFI_PREFERRED,
self.wifi_network_ssid, self.wifi_network_pass))]
if not multithread_func(self.log, tasks):
self.log.error("Phone Failed to Set Up Properly.")
return False
call_ab_id, call_ac_id = self._test_epdg_mo_mo_add_epdg_swap_x(0)
if call_ab_id is None or call_ac_id is None:
return False
return self._test_ims_conference_merge_drop_second_call_no_cep(
call_ab_id, call_ac_id)
@TelephonyBaseTest.tel_test_wrap
def test_epdg_mo_mo_add_epdg_merge_drop_second_call_from_participant_wfc_apm_wifi_preferred_cep(
self):
""" Test WFC Conference Call among three phones. CEP enabled.
Steps
1. PhoneA (WFC APM WiFi Preferred) call PhoneB (WFC APM WiFi Preferred), accept on PhoneB.
2. PhoneA (WFC APM WiFi Preferred) call PhoneC (WFC APM WiFi Preferred), accept on PhoneC.
3. On PhoneA, merge to conference call (WFC CEP conference call).
4. End call on PhoneC, verify call continues.
5. End call on PhoneB, verify call end on PhoneA.
Expected Results:
3. Conference merged successfully.
4. Drop calls succeeded. Call between A-B continues.
5. Drop calls succeeded, all call participants drop.
Returns:
True if pass; False if fail.
TAGS: Telephony, WFC, Conference, CEP
Priority: 1
"""
ads = self.android_devices
tasks = [(phone_setup_iwlan,
(self.log, ads[0], True, WFC_MODE_WIFI_PREFERRED,
self.wifi_network_ssid, self.wifi_network_pass)),
(phone_setup_iwlan,
(self.log, ads[1], True, WFC_MODE_WIFI_PREFERRED,
self.wifi_network_ssid, self.wifi_network_pass)),
(phone_setup_iwlan,
(self.log, ads[2], True, WFC_MODE_WIFI_PREFERRED,
self.wifi_network_ssid, self.wifi_network_pass))]
if not multithread_func(self.log, tasks):
self.log.error("Phone Failed to Set Up Properly.")
return False
call_ab_id, call_ac_id = self._test_epdg_mo_mo_add_epdg_swap_x(0)
if call_ab_id is None or call_ac_id is None:
return False
return self._test_ims_conference_merge_drop_second_call_from_participant_cep(
call_ab_id, call_ac_id)
@TelephonyBaseTest.tel_test_wrap
def test_epdg_mo_mo_add_epdg_merge_drop_second_call_from_host_wfc_apm_wifi_preferred_cep(
self):
""" Test WFC Conference Call among three phones. CEP enabled.
Steps:
1. PhoneA (WFC APM WiFi Preferred) call PhoneB (WFC APM WiFi Preferred), accept on PhoneB.
2. PhoneA (WFC APM WiFi Preferred) call PhoneC (WFC APM WiFi Preferred), accept on PhoneC.
3. On PhoneA, merge to conference call (WFC CEP conference call).
4. On PhoneA disconnect call between A-C, verify call continues.
5. On PhoneA disconnect call between A-B, verify call continues.
Expected Results:
3. Conference merged successfully.
4. Drop calls succeeded. Call between A-B continues.
5. Drop calls succeeded, all call participants drop.
Returns:
True if pass; False if fail.
TAGS: Telephony, WFC, Conference, CEP
Priority: 1
"""
ads = self.android_devices
tasks = [(phone_setup_iwlan,
(self.log, ads[0], True, WFC_MODE_WIFI_PREFERRED,
self.wifi_network_ssid, self.wifi_network_pass)),
(phone_setup_iwlan,
(self.log, ads[1], True, WFC_MODE_WIFI_PREFERRED,
self.wifi_network_ssid, self.wifi_network_pass)),
(phone_setup_iwlan,
(self.log, ads[2], True, WFC_MODE_WIFI_PREFERRED,
self.wifi_network_ssid, self.wifi_network_pass))]
if not multithread_func(self.log, tasks):
self.log.error("Phone Failed to Set Up Properly.")
return False
call_ab_id, call_ac_id = self._test_epdg_mo_mo_add_epdg_swap_x(0)
if call_ab_id is None or call_ac_id is None:
return False
return self._test_ims_conference_merge_drop_second_call_from_host_cep(
call_ab_id, call_ac_id)
@TelephonyBaseTest.tel_test_wrap
def test_epdg_mo_mo_add_epdg_merge_drop_first_call_from_participant_wfc_apm_wifi_preferred_cep(
self):
""" Test WFC Conference Call among three phones. CEP enabled.
Steps:
1. PhoneA (WFC APM WiFi Preferred) call PhoneB (WFC APM WiFi Preferred), accept on PhoneB.
2. PhoneA (WFC APM WiFi Preferred) call PhoneC (WFC APM WiFi Preferred), accept on PhoneC.
3. On PhoneA, merge to conference call (WFC CEP conference call).
4. End call on PhoneB, verify call continues.
5. End call on PhoneC, verify call end on PhoneA.
Expected Results:
3. Conference merged successfully.
4. Drop calls succeeded. Call between A-C continues.
5. Drop calls succeeded, all call participants drop.
Returns:
True if pass; False if fail.
TAGS: Telephony, WFC, Conference, CEP
Priority: 1
"""
ads = self.android_devices
tasks = [(phone_setup_iwlan,
(self.log, ads[0], True, WFC_MODE_WIFI_PREFERRED,
self.wifi_network_ssid, self.wifi_network_pass)),
(phone_setup_iwlan,
(self.log, ads[1], True, WFC_MODE_WIFI_PREFERRED,
self.wifi_network_ssid, self.wifi_network_pass)),
(phone_setup_iwlan,
(self.log, ads[2], True, WFC_MODE_WIFI_PREFERRED,
self.wifi_network_ssid, self.wifi_network_pass))]
if not multithread_func(self.log, tasks):
self.log.error("Phone Failed to Set Up Properly.")
return False
call_ab_id, call_ac_id = self._test_epdg_mo_mo_add_epdg_swap_x(0)
if call_ab_id is None or call_ac_id is None:
return False
return self._test_ims_conference_merge_drop_first_call_from_participant_cep(
call_ab_id, call_ac_id)
@TelephonyBaseTest.tel_test_wrap
def test_epdg_mo_mo_add_epdg_merge_drop_first_call_from_host_wfc_apm_wifi_preferred_cep(
self):
""" Test WFC Conference Call among three phones. CEP enabled.
Steps:
1. PhoneA (WFC APM WiFi Preferred) call PhoneB (WFC APM WiFi Preferred), accept on PhoneB.
2. PhoneA (WFC APM WiFi Preferred) call PhoneC (WFC APM WiFi Preferred), accept on PhoneC.
3. On PhoneA, merge to conference call (WFC CEP conference call).
4. On PhoneA disconnect call between A-B, verify call continues.
5. On PhoneA disconnect call between A-C, verify call continues.
Expected Results:
3. Conference merged successfully.
4. Drop calls succeeded. Call between A-C continues.
5. Drop calls succeeded, all call participants drop.
Returns:
True if pass; False if fail.
TAGS: Telephony, WFC, Conference, CEP
Priority: 1
"""
ads = self.android_devices
tasks = [(phone_setup_iwlan,
(self.log, ads[0], True, WFC_MODE_WIFI_PREFERRED,
self.wifi_network_ssid, self.wifi_network_pass)),
(phone_setup_iwlan,
(self.log, ads[1], True, WFC_MODE_WIFI_PREFERRED,
self.wifi_network_ssid, self.wifi_network_pass)),
(phone_setup_iwlan,
(self.log, ads[2], True, WFC_MODE_WIFI_PREFERRED,
self.wifi_network_ssid, self.wifi_network_pass))]
if not multithread_func(self.log, tasks):
self.log.error("Phone Failed to Set Up Properly.")
return False
call_ab_id, call_ac_id = self._test_epdg_mo_mo_add_epdg_swap_x(0)
if call_ab_id is None or call_ac_id is None:
return False
return self._test_ims_conference_merge_drop_first_call_from_host_cep(
call_ab_id, call_ac_id)
@TelephonyBaseTest.tel_test_wrap
def test_epdg_mo_mt_add_epdg_merge_drop_second_call_from_participant_wfc_apm_wifi_preferred_no_cep(
self):
""" Test WFC Conference Call among three phones. No CEP.
Steps:
1. PhoneA (WFC APM WiFi Preferred) call PhoneB (WFC APM WiFi Preferred), accept on PhoneB.
2. PhoneC (WFC APM WiFi Preferred) call PhoneA (WFC APM WiFi Preferred), accept on PhoneA.
3. On PhoneA, merge to conference call (No CEP).
4. End call on PhoneC, verify call continues.
5. End call on PhoneB, verify call end on PhoneA.
Expected Results:
3. Conference merged successfully.
4. Drop calls succeeded. Call between A-B continues.
| |
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#ipc_params.
def enterIpc_params(self, ctx: tnsnamesParser.Ipc_paramsContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#ipc_params.
def exitIpc_params(self, ctx: tnsnamesParser.Ipc_paramsContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#ipc_parameter.
def enterIpc_parameter(self, ctx: tnsnamesParser.Ipc_parameterContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#ipc_parameter.
def exitIpc_parameter(self, ctx: tnsnamesParser.Ipc_parameterContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#ipc_ipc.
def enterIpc_ipc(self, ctx: tnsnamesParser.Ipc_ipcContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#ipc_ipc.
def exitIpc_ipc(self, ctx: tnsnamesParser.Ipc_ipcContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#ipc_key.
def enterIpc_key(self, ctx: tnsnamesParser.Ipc_keyContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#ipc_key.
def exitIpc_key(self, ctx: tnsnamesParser.Ipc_keyContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#spx_protocol.
def enterSpx_protocol(self, ctx: tnsnamesParser.Spx_protocolContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#spx_protocol.
def exitSpx_protocol(self, ctx: tnsnamesParser.Spx_protocolContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#spx_params.
def enterSpx_params(self, ctx: tnsnamesParser.Spx_paramsContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#spx_params.
def exitSpx_params(self, ctx: tnsnamesParser.Spx_paramsContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#spx_parameter.
def enterSpx_parameter(self, ctx: tnsnamesParser.Spx_parameterContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#spx_parameter.
def exitSpx_parameter(self, ctx: tnsnamesParser.Spx_parameterContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#spx_spx.
def enterSpx_spx(self, ctx: tnsnamesParser.Spx_spxContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#spx_spx.
def exitSpx_spx(self, ctx: tnsnamesParser.Spx_spxContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#spx_service.
def enterSpx_service(self, ctx: tnsnamesParser.Spx_serviceContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#spx_service.
def exitSpx_service(self, ctx: tnsnamesParser.Spx_serviceContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#nmp_protocol.
def enterNmp_protocol(self, ctx: tnsnamesParser.Nmp_protocolContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#nmp_protocol.
def exitNmp_protocol(self, ctx: tnsnamesParser.Nmp_protocolContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#nmp_params.
def enterNmp_params(self, ctx: tnsnamesParser.Nmp_paramsContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#nmp_params.
def exitNmp_params(self, ctx: tnsnamesParser.Nmp_paramsContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#nmp_parameter.
def enterNmp_parameter(self, ctx: tnsnamesParser.Nmp_parameterContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#nmp_parameter.
def exitNmp_parameter(self, ctx: tnsnamesParser.Nmp_parameterContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#nmp_nmp.
def enterNmp_nmp(self, ctx: tnsnamesParser.Nmp_nmpContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#nmp_nmp.
def exitNmp_nmp(self, ctx: tnsnamesParser.Nmp_nmpContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#nmp_server.
def enterNmp_server(self, ctx: tnsnamesParser.Nmp_serverContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#nmp_server.
def exitNmp_server(self, ctx: tnsnamesParser.Nmp_serverContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#nmp_pipe.
def enterNmp_pipe(self, ctx: tnsnamesParser.Nmp_pipeContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#nmp_pipe.
def exitNmp_pipe(self, ctx: tnsnamesParser.Nmp_pipeContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#beq_protocol.
def enterBeq_protocol(self, ctx: tnsnamesParser.Beq_protocolContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#beq_protocol.
def exitBeq_protocol(self, ctx: tnsnamesParser.Beq_protocolContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#beq_params.
def enterBeq_params(self, ctx: tnsnamesParser.Beq_paramsContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#beq_params.
def exitBeq_params(self, ctx: tnsnamesParser.Beq_paramsContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#beq_parameter.
def enterBeq_parameter(self, ctx: tnsnamesParser.Beq_parameterContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#beq_parameter.
def exitBeq_parameter(self, ctx: tnsnamesParser.Beq_parameterContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#beq_beq.
def enterBeq_beq(self, ctx: tnsnamesParser.Beq_beqContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#beq_beq.
def exitBeq_beq(self, ctx: tnsnamesParser.Beq_beqContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#beq_program.
def enterBeq_program(self, ctx: tnsnamesParser.Beq_programContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#beq_program.
def exitBeq_program(self, ctx: tnsnamesParser.Beq_programContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#beq_argv0.
def enterBeq_argv0(self, ctx: tnsnamesParser.Beq_argv0Context):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#beq_argv0.
def exitBeq_argv0(self, ctx: tnsnamesParser.Beq_argv0Context):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#beq_args.
def enterBeq_args(self, ctx: tnsnamesParser.Beq_argsContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#beq_args.
def exitBeq_args(self, ctx: tnsnamesParser.Beq_argsContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#ba_parameter.
def enterBa_parameter(self, ctx: tnsnamesParser.Ba_parameterContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#ba_parameter.
def exitBa_parameter(self, ctx: tnsnamesParser.Ba_parameterContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#ba_description.
def enterBa_description(self, ctx: tnsnamesParser.Ba_descriptionContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#ba_description.
def exitBa_description(self, ctx: tnsnamesParser.Ba_descriptionContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#bad_params.
def enterBad_params(self, ctx: tnsnamesParser.Bad_paramsContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#bad_params.
def exitBad_params(self, ctx: tnsnamesParser.Bad_paramsContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#bad_parameter.
def enterBad_parameter(self, ctx: tnsnamesParser.Bad_parameterContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#bad_parameter.
def exitBad_parameter(self, ctx: tnsnamesParser.Bad_parameterContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#bad_local.
def enterBad_local(self, ctx: tnsnamesParser.Bad_localContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#bad_local.
def exitBad_local(self, ctx: tnsnamesParser.Bad_localContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#bad_address.
def enterBad_address(self, ctx: tnsnamesParser.Bad_addressContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#bad_address.
def exitBad_address(self, ctx: tnsnamesParser.Bad_addressContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#connect_data.
def enterConnect_data(self, ctx: tnsnamesParser.Connect_dataContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#connect_data.
def exitConnect_data(self, ctx: tnsnamesParser.Connect_dataContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#cd_params.
def enterCd_params(self, ctx: tnsnamesParser.Cd_paramsContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#cd_params.
def exitCd_params(self, ctx: tnsnamesParser.Cd_paramsContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#cd_parameter.
def enterCd_parameter(self, ctx: tnsnamesParser.Cd_parameterContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#cd_parameter.
def exitCd_parameter(self, ctx: tnsnamesParser.Cd_parameterContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#cd_service_name.
def enterCd_service_name(self, ctx: tnsnamesParser.Cd_service_nameContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#cd_service_name.
def exitCd_service_name(self, ctx: tnsnamesParser.Cd_service_nameContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#cd_sid.
def enterCd_sid(self, ctx: tnsnamesParser.Cd_sidContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#cd_sid.
def exitCd_sid(self, ctx: tnsnamesParser.Cd_sidContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#cd_instance_name.
def enterCd_instance_name(self, ctx: tnsnamesParser.Cd_instance_nameContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#cd_instance_name.
def exitCd_instance_name(self, ctx: tnsnamesParser.Cd_instance_nameContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#cd_failover_mode.
def enterCd_failover_mode(self, ctx: tnsnamesParser.Cd_failover_modeContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#cd_failover_mode.
def exitCd_failover_mode(self, ctx: tnsnamesParser.Cd_failover_modeContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#cd_global_name.
def enterCd_global_name(self, ctx: tnsnamesParser.Cd_global_nameContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#cd_global_name.
def exitCd_global_name(self, ctx: tnsnamesParser.Cd_global_nameContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#cd_hs.
def enterCd_hs(self, ctx: tnsnamesParser.Cd_hsContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#cd_hs.
def exitCd_hs(self, ctx: tnsnamesParser.Cd_hsContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#cd_rdb_database.
def enterCd_rdb_database(self, ctx: tnsnamesParser.Cd_rdb_databaseContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#cd_rdb_database.
def exitCd_rdb_database(self, ctx: tnsnamesParser.Cd_rdb_databaseContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#cd_server.
def enterCd_server(self, ctx: tnsnamesParser.Cd_serverContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#cd_server.
def exitCd_server(self, ctx: tnsnamesParser.Cd_serverContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#cd_ur.
def enterCd_ur(self, ctx: tnsnamesParser.Cd_urContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#cd_ur.
def exitCd_ur(self, ctx: tnsnamesParser.Cd_urContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#fo_params.
def enterFo_params(self, ctx: tnsnamesParser.Fo_paramsContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#fo_params.
def exitFo_params(self, ctx: tnsnamesParser.Fo_paramsContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#fo_parameter.
def enterFo_parameter(self, ctx: tnsnamesParser.Fo_parameterContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#fo_parameter.
def exitFo_parameter(self, ctx: tnsnamesParser.Fo_parameterContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#fo_type.
def enterFo_type(self, ctx: tnsnamesParser.Fo_typeContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#fo_type.
def exitFo_type(self, ctx: tnsnamesParser.Fo_typeContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#fo_backup.
def enterFo_backup(self, ctx: tnsnamesParser.Fo_backupContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#fo_backup.
def exitFo_backup(self, ctx: tnsnamesParser.Fo_backupContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
| |
import numpy as np
import pandas as pd
import time
# https://github.com/UNSW-CEEM/Bill_Calculator
# Prepared by <NAME> (<EMAIL>)
# You can learn how to use this function by running the Tariff Calculation Example notebook in this repository
# Inputs: Tariff and Load profile (30 min interval, one year,
# timestamps are the end of time period: 12:30 is consumption from 12 to 12:30)
# If tariff rates include gst the result will be gst inclusive
# if discount applies to any rate, it should be considered before calling the function
def bill_calculator(load_profile, tariff, network_load=None, fit=True):
# Treating load profile
load_profile = load_profile.fillna(0)
def time_select(load_profile_s, par):
load_profile_s_t_a = pd.DataFrame()
for k2_1, v2_1, in par['TimeIntervals'].items():
if v2_1[0][0:2] == '24':
v2_1[0] = v2_1[1].replace("24", "00")
if v2_1[1][0:2] == '24':
v2_1[1] = v2_1[1].replace("24", "00")
if v2_1[0] != v2_1[1]:
load_profile_s_t = load_profile_s.between_time(start_time=v2_1[0], end_time=v2_1[1], include_start=False,
include_end=True)
else:
load_profile_s_t = load_profile_s.copy()
if not par['Weekday']:
load_profile_s_t = load_profile_s_t.loc[load_profile_s_t.index.weekday >= 5].copy()
if not par['Weekend']:
load_profile_s_t = load_profile_s_t.loc[load_profile_s_t.index.weekday < 5].copy()
load_profile_s_t = load_profile_s_t.loc[load_profile_s_t.index.month.isin(par['Month']), :].copy()
load_profile_s_t_a = pd.concat([load_profile_s_t_a, load_profile_s_t])
return load_profile_s_t_a
# Calculate imports and exports
results = {}
Temp_imp = load_profile.values
Temp_exp = Temp_imp.copy()
Temp_imp[Temp_imp < 0] = 0
Temp_exp[Temp_exp > 0] = 0
load_profile_import = pd.DataFrame(Temp_imp, columns=load_profile.columns, index=load_profile.index)
load_profile_export = pd.DataFrame(Temp_exp, columns=load_profile.columns, index=load_profile.index)
results['LoadInfo'] = pd.DataFrame(index=[col for col in load_profile.columns],
data=np.sum(load_profile_import.values, axis=0), columns=['Annual_kWh'])
if fit:
results['LoadInfo']['Annual_kWh_exp'] = -1 * np.sum(load_profile_export.values, axis=0)
# If it is retailer put retailer as a component to make it similar to network tariffs
if tariff['ProviderType'] == 'Retailer':
tariff_temp = tariff.copy()
del tariff_temp['Parameters']
tariff_temp['Parameters'] = {'Retailer': tariff['Parameters']}
tariff = tariff_temp.copy()
for TarComp, TarCompVal in tariff['Parameters'].items():
results[TarComp] = pd.DataFrame(index=results['LoadInfo'].index)
# Calculate the FiT
for TarComp, TarCompVal in tariff['Parameters'].items():
if 'FiT' in TarCompVal.keys():
results[TarComp]['Charge_FiT_Rebate'] = -1 * results['LoadInfo']['Annual_kWh_exp'] * TarCompVal['FiT']['Value']
elif 'FiT_TOU' in TarCompVal.keys():
load_profile_ti_exp = pd.DataFrame()
load_profile_ti_exp_charge = pd.DataFrame()
for k, v in TarCompVal['FiT_TOU'].items():
this_part = v.copy()
if 'Weekday' not in this_part:
this_part['Weekday'] = True
this_part['Weekend'] = True
if 'TimeIntervals' not in this_part:
this_part['TimeIntervals'] = {'T1': ['00:00', '00:00']}
if 'Month' not in this_part:
this_part['Month'] = list(range(1, 13))
load_profile_t_a = time_select(load_profile_export, this_part)
load_profile_ti_exp[k] = load_profile_t_a.sum()
results[TarComp]['kWh_Exp' + k] = load_profile_ti_exp[k].copy()
load_profile_ti_exp_charge[k] = this_part['Value'] * load_profile_ti_exp[k]
results[TarComp]['FiT_C_TOU' + k] = load_profile_ti_exp_charge[k].copy()
results[TarComp]['Charge_FiT_Rebate'] = load_profile_ti_exp_charge.sum(axis=1)
# Check if daily exists and calculate the charge
for TarComp, TarCompVal in tariff['Parameters'].items():
if 'Daily' in TarCompVal.keys():
num_days = (len(load_profile.index.normalize().unique()) - 1)
break
for TarComp, TarCompVal in tariff['Parameters'].items():
if 'Daily' in TarCompVal.keys():
results[TarComp]['Charge_Daily'] = num_days * TarCompVal['Daily']['Value']
# Energy
# Flat Rate:
# Check if flat rate charge exists and calculate the charge
for TarComp, TarCompVal in tariff['Parameters'].items():
if 'FlatRate' in TarCompVal.keys():
results[TarComp]['Charge_FlatRate'] = results['LoadInfo']['Annual_kWh'] * TarCompVal['FlatRate']['Value']
# Block Annual:
for TarComp, TarCompVal in tariff['Parameters'].items():
if 'BlockAnnual' in TarCompVal.keys():
block_use = results['LoadInfo'][['Annual_kWh']].copy()
block_use_charge = block_use.copy()
# separating the blocks of usage
lim = 0
for k, v in TarCompVal['BlockAnnual'].items():
block_use[k] = block_use['Annual_kWh']
block_use[k][block_use[k] > float(v['HighBound'])] = float(v['HighBound'])
block_use[k] = block_use[k] - lim
block_use[k][block_use[k] < 0] = 0
lim = float(v['HighBound'])
block_use_charge[k] = block_use[k] * v['Value']
del block_use['Annual_kWh']
del block_use_charge['Annual_kWh']
results[TarComp]['Charge_BlockAnnual'] = block_use_charge.sum(axis=1)
# Block Quarterly:
# check if it has quarterly and if yes calculate the quarterly energy
for TarComp, TarCompVal in tariff['Parameters'].items():
if 'BlockQuarterly' in TarCompVal.keys():
for Q in range(1, 5):
load_profile_q = load_profile_import.loc[
load_profile_import.index.month.isin(list(range((Q - 1) * 3 + 1, Q * 3 + 1))), :]
results['LoadInfo']['kWh_Q' + str(Q)] = [
np.nansum(load_profile_q[col].values[load_profile_q[col].values > 0])
for col in load_profile_q.columns]
break
for TarComp, TarCompVal in tariff['Parameters'].items():
if 'BlockQuarterly' in TarCompVal.keys():
for Q in range(1, 5):
block_use = results['LoadInfo'][['kWh_Q' + str(Q)]].copy()
block_use_charge = block_use.copy()
lim = 0
for k, v in TarCompVal['BlockQuarterly'].items():
block_use[k] = block_use['kWh_Q' + str(Q)]
block_use[k][block_use[k] > float(v['HighBound'])] = float(v['HighBound'])
block_use[k] = block_use[k] - lim
block_use[k][block_use[k] < 0] = 0
lim = float(v['HighBound'])
block_use_charge[k] = block_use[k] * v['Value']
del block_use['kWh_Q' + str(Q)]
del block_use_charge['kWh_Q' + str(Q)]
results[TarComp]['C_Q' + str(Q)] = block_use_charge.sum(axis=1)
results[TarComp]['Charge_BlockQuarterly'] = results[TarComp][
['C_Q' + str(Q) for Q in range(1, 5)]].sum(axis=1)
# Block Monthly:
# check if it has Monthly and if yes calculate the Monthly energy
for TarComp, TarCompVal in tariff['Parameters'].items():
if 'BlockMonthly' in TarCompVal.keys():
for m in range(1, 13):
load_profile_m = load_profile_import.loc[load_profile_import.index.month == m, :]
results['LoadInfo']['kWh_m' + str(m)] = [
np.nansum(load_profile_m[col].values[load_profile_m[col].values > 0])
for col in load_profile_m.columns]
break
for TarComp, TarCompVal in tariff['Parameters'].items():
if 'BlockMonthly' in TarCompVal.keys():
for Q in range(1, 13):
block_use = results['LoadInfo'][['kWh_m' + str(Q)]].copy()
block_use_charge = block_use.copy()
lim = 0
for k, v in TarCompVal['BlockMonthly'].items():
block_use[k] = block_use['kWh_m' + str(Q)]
block_use[k][block_use[k] > float(v['HighBound'])] = float(v['HighBound'])
block_use[k] = block_use[k] - lim
block_use[k][block_use[k] < 0] = 0
lim = float(v['HighBound'])
block_use_charge[k] = block_use[k] * v['Value']
del block_use['kWh_m' + str(Q)]
del block_use_charge['kWh_m' + str(Q)]
results[TarComp]['C_m' + str(Q)] = block_use_charge.sum(axis=1)
results[TarComp]['Charge_BlockMonthly'] = results[TarComp][['C_m' + str(Q) for Q in range(1, 13)]].sum(
axis=1)
# Block Daily:
for TarComp, TarCompVal in tariff['Parameters'].items():
if 'BlockDaily' in TarCompVal.keys():
DailykWh = load_profile_import.resample('D').sum()
block_use_temp_charge = DailykWh.copy()
block_use_temp_charge.iloc[:, :] = 0
lim = 0
for k, v in TarCompVal['BlockDaily'].items():
block_use_temp = DailykWh.copy()
block_use_temp[block_use_temp > float(v['HighBound'])] = float(v['HighBound'])
block_use_temp = block_use_temp - lim
block_use_temp[block_use_temp < 0] = 0
lim = float(v['HighBound'])
block_use_temp_charge = block_use_temp_charge + block_use_temp * v['Value']
results[TarComp]['Charge_BlockDaily'] = block_use_temp_charge.sum(axis=0)
# TOU energy
for TarComp, TarCompVal in tariff['Parameters'].items():
if 'TOU' in TarCompVal.keys():
load_profile_ti = pd.DataFrame()
load_profile_ti_charge = pd.DataFrame()
for k, v in TarCompVal['TOU'].items():
this_part = v.copy()
if 'Weekday' not in this_part:
this_part['Weekday'] = True
this_part['Weekend'] = True
if 'TimeIntervals' not in this_part:
this_part['TimeIntervals'] = {'T1': ['00:00', '00:00']}
if 'Month' not in this_part:
this_part['Month'] = list(range(1, 13))
load_profile_t_a = time_select(load_profile_import, this_part)
load_profile_ti[k] = load_profile_t_a.sum()
results[TarComp]['kWh_' + k] = load_profile_ti[k].copy()
load_profile_ti_charge[k] = this_part['Value'] * load_profile_ti[k]
results[TarComp]['C_' + k] = load_profile_ti_charge[k].copy()
results[TarComp]['Charge_TOU'] = load_profile_ti_charge.sum(axis=1)
# Demand charge:
for TarComp, TarCompVal in tariff['Parameters'].items():
if 'Demand' in TarCompVal.keys():
for DemCharComp, DemCharCompVal in TarCompVal['Demand'].items():
ts_num = DemCharCompVal['Demand Window Length'] # number of timestamp
num_of_peaks = DemCharCompVal['Number of Peaks']
if ts_num > 1:
load_profile_r = load_profile_import.rolling(ts_num, min_periods=1).mean()
else:
load_profile_r = load_profile_import.copy()
load_profile_f = time_select(load_profile_r, DemCharCompVal)
# if capacity charge is applied meaning the charge only applies when you exceed the capacity for
# a certain number of times
if 'Capacity' in DemCharCompVal:
# please note the capacity charge only works with user's demand peak (not coincident peak)
# Customers can exceed their capacity level on x separate days per month during each interval
# (day or night). If they exceed more than x times, they will be charged for the highest
# exceedance of their capacity the capacity charge (if they don't exceed) is already included
# in the fixed charge so they only pay for the difference
capacity = DemCharCompVal['Capacity']['Value']
if 'Capacity Exceeded No' in DemCharCompVal:
cap_exc_no = DemCharCompVal['Capacity Exceeded No']
else:
cap_exc_no = 0
load_profile_f = load_profile_f - (capacity / 2)
load_profile_f = load_profile_f.clip(lower=0)
load_profile_f_g = load_profile_f.groupby(load_profile_f.index.normalize()).max()
for m in range(1, 13):
arr = load_profile_f_g.loc[load_profile_f_g.index.month == m, :].copy().values
cap_exc_no_val = np.sum(arr > 0, axis=0)
load_profile_f.loc[load_profile_f.index.month == m, cap_exc_no_val <= cap_exc_no] = 0
load_profile_f2 = load_profile_f.copy()
else:
load_profile_f2 = load_profile_f.copy()
based_on_network_peak = False
if 'Based on Network Peak' in DemCharCompVal:
if DemCharCompVal['Based on Network Peak']:
based_on_network_peak = True
# minimum demand or demand charge
min_dem1 = 0
min_dem2 = 0
if 'Min Demand (kW)' in DemCharCompVal:
min_dem1 = DemCharCompVal['Min Demand (kW)']
if 'Min Demand Charge ($)' in DemCharCompVal:
if DemCharCompVal['Value'] > 0:
min_dem2 = DemCharCompVal['Min Demand Charge ($)'] / DemCharCompVal['Value']
min_dem = min(min_dem1, min_dem2)
if based_on_network_peak:
new_load = pd.merge(load_profile_f2, network_load, left_index=True, right_index=True)
average_peaks_all = np.empty((0, new_load.shape[1] - 1), dtype=float)
for m in DemCharCompVal['Month']:
new_load2 = new_load.loc[new_load.index.month == m, :].copy()
new_load2.sort_values(by='NetworkLoad', inplace=True, ascending=False)
average_peaks_all = np.append(average_peaks_all,
[2 * new_load2.iloc[:num_of_peaks, :-1].values.mean(axis=0)],
axis=0)
average_peaks_all = np.clip(average_peaks_all, a_min=min_dem, a_max=None)
average_peaks_all_sum = average_peaks_all.sum(axis=0)
else:
average_peaks_all = np.empty((0, load_profile_f.shape[1]), dtype=float)
for m in DemCharCompVal['Month']:
arr = load_profile_f.loc[load_profile_f.index.month == m, :].copy().values
arr.sort(axis=0)
arr = arr[::-1]
average_peaks_all = np.append(average_peaks_all, [2 * arr[:num_of_peaks, :].mean(axis=0)],
axis=0)
average_peaks_all = np.clip(average_peaks_all, a_min=min_dem, a_max=None)
average_peaks_all_sum = average_peaks_all.sum(axis=0)
results[TarComp]['Avg_kW_' + DemCharComp] = average_peaks_all_sum / len(DemCharCompVal['Month'])
results[TarComp]['C_' + DemCharComp] = average_peaks_all_sum * DemCharCompVal['Value']
results[TarComp]['Charge_Demand'] = results[TarComp][
[col for col in results[TarComp] if col.startswith('C_')]].sum(axis=1)
for k, v in results.items():
if k != | |
<gh_stars>0
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
import tensorflow_probability as tfp
import datetime
import os, sys
from argparse import ArgumentParser
# Debug module
# from tensorflow.python import debug as tf_debug
import numpy as np
import warnings
from keras.datasets import mnist
from tensorflow.python.summary.writer.writer import FileWriter
import matplotlib.pyplot as plt
warnings.simplefilter('error', UserWarning)
class IWAE:
def __init__(self, input_shape, batch_size, layer_specs, k_samples, lr, sess, small):
self.data_ph = tf.placeholder(dtype=tf.float32, shape=(None, k_samples, input_shape))
self.train_ph = tf.placeholder(dtype=tf.bool)
self.tot_obj_loss = tf.placeholder(dtype=tf.float32)
self.log2pi = tf.log(2 * np.pi)
self.q_probs = []
self.h_units = layer_specs
self.batch_size = batch_size
self.small = small
self.init = tf.placeholder(dtype=tf.bool)
self.k = k
self.log_w = tf.zeros(dtype=tf.float32, shape=[batch_size, self.k])
self.norm_w = tf.zeros_like(self.log_w)
self.sess = sess
self.recon = self.model(self.data_ph)
self.loss, self.obj_loss = self.objective_function()
with tf.name_scope('Optimizer'):
self.optimizer = tf.train.AdamOptimizer(learning_rate=lr, beta1=0.9, beta2=0.999).minimize(self.obj_loss)
self.summary = tf.Summary()
loss_summary = tf.summary.scalar('Objective loss', self.tot_obj_loss)
self.merge_op = tf.summary.merge_all()
print('Logging to:', './logs/' + str(datetime.datetime.now()))
self.writer = tf.summary.FileWriter('./logs/' + str(datetime.datetime.now()))
def dense(self, x_, num_units, init_scale=0.01, scope_name=''):
"""
Dense layer including Weight normalization and initialization
as presented by (Kingma & Salimans, Weight normalization, 2016)
based on code from: https://github.com/openai/weightnorm/blob/master/tensorflow/nn.py
currently not giving any good desirable results
:param x: input data
:param num_units: number of units in the dense layer
:param init_scale: initialization scale
:param scope_name: name of current scope
:return: data run through dense layer
"""
with tf.variable_scope(scope_name):
ema = tf.train.ExponentialMovingAverage(decay=0.998)
if self.init is not False:
V = tf.get_variable('V', shape=[int(x_.get_shape()[-1]), num_units], dtype=tf.float32,
initializer=tf.random_normal_initializer(0, 0.05), trainable=True)
g = tf.get_variable('g', shape=[num_units], dtype=tf.float32,
initializer=tf.constant_initializer(1.), trainable=True)
b = tf.get_variable('b', shape=[num_units], dtype=tf.float32,
initializer=tf.constant_initializer(0.), trainable=True)
else:
V = tf.get_variable('V')
g = tf.get_variable('g')
b = tf.get_variable('b')
tf.assert_variables_initialized([V, g, b])
ema.apply([V, g, b])
g_ = tf.expand_dims(g, 0)
g_ = tf.tile(g_, [self.k, 1])
# use weight normalization (Salimans & Kingma, 2016)
x = tf.matmul(x_, V)
scaler = g_ / tf.sqrt(tf.reduce_sum(tf.square(V), [0]))
b_ = tf.expand_dims(b, 0)
b_ = tf.tile(b_, [self.k, 1])
x = tf.reshape(scaler, [1, self.k, num_units]) * x + tf.reshape(b_, [1, self.k, num_units])
if self.init is not False: # normalize x
m_init, v_init = tf.nn.moments(x, [0])
m_init = m_init[0]
scale_init = init_scale / tf.sqrt(v_init + 1e-10)
scale_init = scale_init[0]
with tf.control_dependencies([g.assign(g * scale_init), b.assign_add(-m_init * scale_init)]):
# x = tf.identity(x)
g_s = tf.expand_dims(g, 0)
g_s = tf.tile(g_s, [self.k, 1])
x = tf.matmul(x_, V)
scaler = g_s / tf.sqrt(tf.reduce_sum(tf.square(V), [0]))
b_ = tf.expand_dims(b, 0)
b_ = tf.tile(b_, [self.k, 1])
x = tf.reshape(scaler, [1, self.k, num_units]) * x + tf.reshape(b_, [1, self.k, num_units])
return x
def MLP_layer(self, x, mlp_units, out_dims, scope_name=''):
"""
MLP layer with sampling built in
:param x: input data
:param mlp_units: dimensions of the MLP layers
:param out_dims: output dimension for matching the next MLP layer
:param scope_name: set the scope_name for WeightNorm, currently not working properly
:return: nu, rho
"""
# 2 regular linear dense layers with leaky Relu activations
# x = self.dense(x, num_units=mlp_units, init_scale=1., scope_name=scope_name + '_dense1')
x = tf.layers.dense(x, mlp_units)
h_inter = tf.nn.leaky_relu(x, alpha=0.1)
# h_i = self.dense(h_inter, num_units=mlp_units, init_scale=1., scope_name=scope_name + '_dense2')
h_i = tf.layers.dense(h_inter, mlp_units)
h_i = tf.nn.leaky_relu(h_i, alpha=0.1)
# nu = self.dense(h_i, num_units=out_dims, init_scale=1., scope_name=scope_name + '_dense3')
nu = tf.layers.dense(h_i, out_dims)
# rho = 0.01 + tf.nn.softplus(self.dense(h_i, num_units=out_dims, init_scale=1., scope_name=scope_name + '_dense4'))
rho = 0.01 + tf.nn.softplus(tf.layers.dense(h_i, out_dims))
return nu, rho
def sample_z(self, nu, rho, value=None, bern=False):
"""
sample from N(nu, rho)
:param nu: mean
:param rho: stddev
:param value: None or the latent variables from the corresponding encoder layer (if we are in the decoder layer)
:param bern: Flag for using a bernoulli distribution
:return: logprob(z|nu,rho) & z
"""
# flag for using a bernoulli distribution
if bern:
sample_dist = tf.distributions.Bernoulli(logits=nu, dtype=tf.float32)
nu_bern = sample_dist.mean()
return nu_bern, self.bincrossentropy(value, nu)
# reparametrization trick
eps = tf.random_normal(tf.shape(nu), dtype=tf.float32)
z_next = nu + rho*eps
if value is not None:
estimate = value
else:
estimate = z_next
log2pi = 0.5*np.log(2*np.pi)
logprob_z = (-tf.constant(log2pi, dtype=tf.float32))-\
0.5*(tf.reduce_sum(tf.square((estimate-nu)/rho) + 2*tf.log(rho), axis=-1))
return z_next, logprob_z
def bincrossentropy(self, x, x_hat):
"""
calculate binary cross-entropy between true image and reconstruction
:param x: true image
:param x_hat: reconstructed image at the bernoulli layer of the decoder
:return: binary cross-entropy
"""
x_hat = tf.nn.sigmoid(x_hat)
bce = x * tf.log(x_hat + 1e-8) + (1 - x) * tf.log(1 - x_hat + 1e-8)
return tf.reduce_sum(bce, axis=-1)
def calc_logw(self, q_logprob, p_logprob):
"""
calculate the log weights
:param q_logprob: output of a layer in q
:param p_logprob: output of a layer in p
:return: no return
"""
self.log_w += p_logprob - q_logprob
def calc_norm_tilde(self):
"""
calculates the normalized importance weights
:return: no return
"""
log_w_max = tf.math.reduce_max(self.log_w, axis=-1, keepdims=True)
log_w = tf.math.subtract(self.log_w, log_w_max)
w = tf.math.exp(log_w)
self.norm_w = tf.math.divide(w, tf.math.reduce_sum(w, axis=-1, keepdims=True))
def objective_function(self):
"""
Calculate the objective function loss
:return: deprecated loss and objective function loss
"""
k = tf.constant(self.k, dtype=tf.float32)
with tf.name_scope('Loss'):
# this loss is currently not used anywhere, deprecated
self.calc_norm_tilde()
loss = - tf.reduce_mean(tf.reduce_sum(self.norm_w * self.log_w, axis=-1))
# objective loss over k-samples
log_sum_w = tf.reduce_logsumexp(self.log_w, axis=-1)
obj_loss = - tf.reduce_sum(tf.math.subtract(log_sum_w, tf.math.log(k)), axis=0)
return loss, obj_loss
def train(self, trn_data):
trn_data = np.array([self.k * [x] for x in trn_data])
_, recon, obj_loss, loss, log_w = self.sess.run([self.optimizer,
self.recon,
self.obj_loss,
self.loss,
self.log_w],
feed_dict={
self.train_ph: True,
self.data_ph: trn_data,
self.init: False
})
return recon, obj_loss, loss, log_w
def test(self, test_data):
test_data = np.array([self.k * [x] for x in test_data])
recon, obj_loss, loss, log_w = self.sess.run([self.recon,
self.obj_loss,
self.loss,
self.log_w],
feed_dict={
self.data_ph: test_data,
self.train_ph: False,
self.init: False
})
return recon, obj_loss, loss
def data_based_initialize(self, mb_data):
test_data = np.array([self.k * [x] for x in mb_data])
empt = self.sess.run([], feed_dict={self.data_ph: test_data, self.init: True})
def model(self, q_z_next):
"""
IWAE model structure for the Non-facturized case
:param q_z_next: input data
:return: returns a reconstructed image
"""
self.log_w = tf.zeros_like(self.log_w)
q_logprob_tot = 0
p_logprob_tot = 0
q_nu_next = None
q_rho_next = None
recon = None
q_zs = [q_z_next]
if self.small is True:
mult = 2
else:
mult = 8
# Encoder portion
for mlp_units in self.h_units:
with tf.name_scope('Q_MLP_layer'):
q_dense_name = 'Q_MLP_layer_{}_'.format(mlp_units)
q_nu_next, q_rho_next = self.MLP_layer(q_z_next, mlp_units=mult * mlp_units,
out_dims=mlp_units, scope_name=q_dense_name)
with tf.name_scope('Q_stochastic_layer'):
q_z_next, q_logprob = self.sample_z(q_nu_next, q_rho_next)
q_logprob_tot += q_logprob
q_zs.append(q_z_next)
# account for prior ~ N(0,1)
with tf.name_scope('Prior'):
prior_nu = tf.zeros_like(q_nu_next)
prior_rho = tf.ones_like(q_rho_next)
_, prior_logprob = self.sample_z(prior_nu, prior_rho, q_z_next)
p_logprob_tot += prior_logprob
# Decoder portion
for p_out, mlp_units, q_z_in, q_z_out in zip([8, 16, 32, 64, 784],
self.h_units[::-1],
q_zs[:0:-1],
q_zs[-2::-1]):
# at last decoder layer, sample from Bernoulli dist
if p_out == 784:
bern = True
else:
bern = False
with tf.name_scope('P_MLP_layer'):
p_dense_name = 'P_MLP_layer_{}_'.format(mlp_units)
p_nu, p_rho = self.MLP_layer(
q_z_in, mlp_units=2 * mlp_units, out_dims=p_out, scope_name=p_dense_name)
with tf.name_scope('P_stochastic_layer'):
p_z_next, p_logprob = self.sample_z(p_nu, p_rho, q_z_out, bern=bern)
if bern:
recon = p_z_next
p_logprob_tot += p_logprob
with tf.name_scope('log_w'):
self.calc_logw(q_logprob_tot, p_logprob_tot)
return recon
def mb(x, batch_size):
"""
Minibatch generator
:param x: input data
:param batch_size: desired batch size
:return: yield a new batch each call
"""
n_samples = x.shape[0]
n_batches = int(np.ceil(n_samples / batch_size))
while True:
permutation = np.random.permutation(x.shape[0])
for b in range(n_batches):
batch_idx = permutation[b *
batch_size:(b + 1) * batch_size]
batch = x[batch_idx]
if batch.shape[0] is not batch_size:
continue
yield batch
parser = ArgumentParser("Tensorflow implementation of IWAE in TMC-paper from NeurIPS 2019")
parser.add_argument('-k', dest='k', type=int, default=20, help="Option for choosing k")
parser.add_argument('--epochs', dest='epochs', type=int, default=1200, help="Option for choosing number of epochs")
parser.add_argument('--batch_size', dest='batch_size', type=int, default=128, help="Option for choosing batch size")
parser.add_argument('--model_type', dest='model_type', type=str, default='small', help="Option for using small or large model")
args = parser.parse_args()
print("Batch size: ", args.batch_size)
print("Number of epochs: ", args.epochs)
print("Model type: ", args.model_type)
print("k: ", args.k)
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.astype('float32') / 255.0
x_test = x_test.astype('float32') / 255.0
batch_size = 128
# TODO TEST WITH k = 5, k = 20, k = 50, k = 100
model_type = args.model_type
if model_type == 'small':
small = True
else:
small = False
restore_and_recon = True
lr = float(1e-3)
batch_size = args.batch_size
# want to test with k = 5, 20, 50, 100
"""TODO: TEST WITH k = 5, k = 20, k = 50, k = 100"""
k = args.k
epochs = args.epochs
save_path = 'IWAE_model_non_fac_{}_k_{}'.format(model_type, k)
if not os.path.exists(save_path):
os.mkdir(save_path)
with tf.Session() as sess:
IWAE_net = IWAE(batch_size=batch_size, input_shape=784, k_samples=k, layer_specs=[64, 32, 16, 8, 4],
lr=lr, sess=sess, small=small)
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
if restore_and_recon:
saver.restore(sess, '{}'.format(tf.train.latest_checkpoint('/home/linus/DD2412-Reproducibility-project-Group-61/IWAE/IWAE_model_non_fac_small_k_20_iwae_obj/')))
x_gen_test = mb(x_test, | |
# -*- coding: utf-8 -*-
"""Veil https api client."""
import asyncio
import json
import logging
from types import TracebackType
from typing import Dict, Optional, Type
from urllib.parse import urlencode
from uuid import UUID, uuid4
try:
import ujson
except ImportError: # pragma: no cover
ujson = None
try:
import aiohttp
except ImportError: # pragma: no cover
aiohttp = None
from .api_objects import (VeilCluster, VeilController, VeilDataPool,
VeilDomainExt, VeilEvent, VeilLibrary, VeilNode, VeilResourcePool,
VeilVDisk)
from .base import VeilRetryConfiguration, VeilTag, VeilTask
from .base.api_cache import VeilCacheConfiguration, cached_response
from .base.utils import (IntType, NullableDictType, VeilJwtTokenType,
VeilUrlStringType, veil_api_response)
logger = logging.getLogger('veil-api-client.request')
logger.addHandler(logging.NullHandler())
class _RequestContext:
"""Custom aiohttp.RequestContext class for request retry logic.
Attributes:
request: aiohttp.request operation (POST, GET, etc).
url: request url
num_of_attempts: num of retry attempts if request failed.
timeout: base try timeout time (with exponential grow).
max_timeout: max timeout between tries.
timeout_increase_step: timeout increase step.
status_codes: collection of response status codes witch must be repeated.
exceptions: collection of aiohttp exceptions witch must be repeated.
kwargs: additional aiohttp.request arguments, such as headers and etc.
"""
def __init__(self,
request: aiohttp.ClientRequest,
url: str,
num_of_attempts: int,
timeout: int,
max_timeout: int,
timeout_increase_step: int,
status_codes: set,
exceptions: set,
**kwargs
) -> None:
"""Please see help(_RequestContext) for more info."""
self._request = request
self._url = url
self._num_of_attempts = num_of_attempts
self._timeout = timeout
self._max_timeout = max_timeout
self._timeout_increase_step = timeout_increase_step
if status_codes is None:
status_codes = set()
self._status_codes = status_codes
if exceptions is None:
exceptions = set()
self._exceptions = exceptions
self._kwargs = kwargs
self._current_attempt = 0
self._response = None
@property
def _exp_timeout(self) -> float:
"""Retry request timeout witch can exponentially grow."""
timeout = self._timeout * (self._timeout_increase_step ** (self._current_attempt - 1))
return min(timeout, self._max_timeout)
def _bad_code(self, code: int) -> bool:
"""Check that request status_code is bad."""
return 500 <= code <= 599 or code in self._status_codes
async def _execute_request(self) -> aiohttp.ClientResponse:
"""Run client request on aiohttp."""
try:
self._current_attempt += 1
if self._current_attempt > 1:
logger.debug('Request %s attempt', self._current_attempt)
response = await self._request(self._url, **self._kwargs)
code = response.status
if self._current_attempt < self._num_of_attempts and self._bad_code(code):
await asyncio.sleep(self._exp_timeout)
return await self._execute_request()
self._response = response
return response
except Exception as e:
if self._current_attempt < self._num_of_attempts:
for exc in self._exceptions:
if isinstance(e, exc):
await asyncio.sleep(self._exp_timeout)
return await self._execute_request()
raise e
async def __aenter__(self) -> aiohttp.ClientResponse:
return await self._execute_request()
async def __aexit__(self, exc_type, exc_val, exc_tb) -> None:
if self._response is not None:
if not self._response.closed:
self._response.close()
class VeilClient:
"""VeilClient class.
Private attributes:
__AUTH_HEADER_KEY: Header authorization key.
__USER_AGENT_VAL: Header user-agent value.
__TRANSFER_PROTOCOL_PREFIX: force to use only HTTPS.
Attributes:
server_address: VeiL server address (without protocol).
token: VeiL auth token.
ssl_enabled: ssl-certificate validation.
session_reopen: auto reopen aiohttp.ClientSession when it`s closed.
timeout: aiohttp.ClientSession total timeout.
extra_headers: additional user headers.
extra_params: additional user params.
cookies: additional user cookies (probably useless).
ujson_: ujson using instead of default aiohttp.ClientSession serializer.
retry_opts: VeilRetryConfiguration instance.
cache_opts: VeilCacheConfiguration instance.
url_max_length: maximum url length (protocol + domain + query params)
"""
__TRANSFER_PROTOCOL_PREFIX = 'https://'
__AUTH_HEADER_KEY = 'Authorization'
__USER_AGENT_VAL = 'veil-api-client/2.2'
__IDEMPOTENCY_BODY_KEY = 'idempotency_key'
__extra_headers = NullableDictType('__extra_headers')
__extra_params = NullableDictType('__extra_params')
__cookies = NullableDictType('__cookies')
server_address = VeilUrlStringType('server_address')
token = VeilJwtTokenType('token')
def __init__(self, server_address: str,
token: str,
ssl_enabled: bool = True,
session_reopen: bool = False,
timeout: int = 5 * 60,
extra_headers: Optional[dict] = None,
extra_params: Optional[dict] = None,
cookies: Optional[dict] = None,
ujson_: bool = True,
retry_opts: Optional[VeilRetryConfiguration] = None,
cache_opts: Optional[VeilCacheConfiguration] = None,
url_max_length: Optional[int] = None,
) -> None:
"""Please see help(VeilClient) for more info."""
if aiohttp is None:
raise RuntimeError('Please install `aiohttp`') # pragma: no cover
if ujson is None and ujson_:
raise RuntimeError('Please install `ujson`') # pragma: no cover
self.server_address = server_address
self.token = token
self.__session_reopen = session_reopen
self.__ssl_enabled = ssl_enabled
self.__extra_headers = extra_headers
self.__extra_params = extra_params
__timeout = aiohttp.ClientTimeout(total=timeout)
self.__timeout = __timeout
self.__cookies = cookies
# ujson is much faster but less compatible
self.__json_serialize = ujson.dumps if ujson_ else json.dumps
if not retry_opts:
retry_opts = VeilRetryConfiguration()
self.__retry_opts = retry_opts
# cache options that can be used in request caching decorator
if not cache_opts:
cache_opts = VeilCacheConfiguration(cache_client=None, ttl=0)
self.__cache_opts = cache_opts
self.__url_max_length = url_max_length
self.__client_session = self.new_client_session
async def __aenter__(self) -> 'VeilClient':
"""Async context manager enter."""
return self
async def __aexit__(self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType]) -> None:
"""Async context manager exit."""
await self.__session.close()
async def close(self) -> None:
"""Session close."""
await self.__session.close()
@property
def new_client_session(self) -> 'aiohttp.ClientSession':
"""Return new ClientSession instance."""
# TODO: DeprecationWarning: The object should be created from async function
return aiohttp.ClientSession(timeout=self.__timeout, cookies=self.__cookies,
json_serialize=self.__json_serialize)
@property
def base_url(self) -> str:
"""Build controller api url."""
return ''.join([self.__TRANSFER_PROTOCOL_PREFIX, self.server_address, '/api/'])
@property
def __base_params(self) -> Dict[str, int]:
"""All requests to VeiL should be async by default."""
return {'async': 1}
@property
def __params(self) -> Dict:
"""Return base params extended by user extra params."""
params = self.__base_params
if self.__extra_params and isinstance(self.__extra_params, dict):
params.update(self.__extra_params)
return params
@property
def __base_headers(self) -> Dict[str, str]:
"""Return preconfigured request headers.
Note:
response should be json encoded on utf-8 and EN locale.
"""
headers_dict = {
'Content-Type': 'application/json',
'Accept': 'application/json',
'Accept-Charset': 'utf-8',
'User-Agent': self.__USER_AGENT_VAL,
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'Accept-Language': 'en',
self.__AUTH_HEADER_KEY: '{}'.format(self.token),
}
return headers_dict
@property
def __headers(self) -> Dict[str, str]:
"""Return base_headers extended by user extra_headers."""
headers = self.__base_headers
if self.__extra_headers and isinstance(self.__extra_headers, dict):
headers.update(self.__extra_headers)
return headers
@property
def __session(self) -> 'aiohttp.ClientSession':
"""Return connection ClientSession."""
if self.__client_session.closed and self.__session_reopen:
self.__client_session = self.new_client_session
return self.__client_session
def __request_context(self,
request: aiohttp.ClientRequest,
url: str,
headers: dict,
params: dict,
ssl: bool,
retry_opts: VeilRetryConfiguration,
json_data: Optional[dict] = None):
"""Create new _RequestContext instance."""
# protocol + domain + query args
if self.__url_max_length:
full_url = '{url}?{params}'.format(url=url, params=urlencode(params))
if len(full_url) > self.__url_max_length:
raise AssertionError('The maximum url length is set and exceeded.')
# User-friendly magic - convert all UUID values to a str
if isinstance(json_data, dict):
for key, value in json_data.items():
try:
if isinstance(value, UUID):
logger.warning('JSON can`t contain a UUID -> converting %s to a str',
value)
json_data[key] = str(value)
except ValueError:
json_data[key] = value
return _RequestContext(request=request, url=url, headers=headers, params=params,
ssl=ssl, json=json_data,
num_of_attempts=retry_opts.num_of_attempts,
timeout=retry_opts.timeout,
max_timeout=retry_opts.max_timeout,
timeout_increase_step=retry_opts.timeout_increase_step,
status_codes=retry_opts.status_codes,
exceptions=retry_opts.exceptions)
@staticmethod
async def __fetch_response_data(response: aiohttp.ClientResponse) -> Dict[str, str]:
"""Collect all response attributes."""
if isinstance(response, aiohttp.ClientResponse):
# Collect response data
async with response:
status_code = response.status
headers = response.headers
# If VeiL ECP is not fully turned on, the responses may be of the wrong type
try:
data = await response.json()
except aiohttp.ContentTypeError:
logger.debug('VeiL response has wrong content type.')
data = dict()
return dict(status_code=status_code, headers=dict(headers), data=data)
async def __api_retry_request(self, method_name: str,
url: str,
headers: dict,
params: dict,
ssl: bool,
json_data: Optional[dict] = None,
retry_opts: Optional[VeilRetryConfiguration] = None) -> Dict[str, str]: # noqa: E501
"""Log parameters and execute passed aiohttp method with retry options."""
# VeiL can`t decode requests witch contain extra commas
for argument, value in params.items():
if isinstance(value, str) and value[-1] == ',':
params[argument] = value[:-1]
# If request retry_opts are not defined - use Class attr value.
if not retry_opts:
retry_opts = self.__retry_opts
# log request
logger.debug('ssl: %s, url: %s, header: %s, params: %s, json: %s', self.__ssl_enabled,
url, self.__headers, params, json_data)
# determine aiohttp.client method to call
aiohttp_request_method = getattr(self.__session, method_name)
# create aiohttp.request witch can be retried.
aiohttp_request = self.__request_context(request=aiohttp_request_method,
url=url,
headers=headers,
params=params,
ssl=ssl,
json_data=json_data,
retry_opts=retry_opts)
# execute request and fetch response data
async with aiohttp_request as aiohttp_response:
return await self.__fetch_response_data(aiohttp_response)
@veil_api_response
@cached_response
async def api_request(self,
method_name: str,
url: str,
headers: dict,
params: dict,
ssl: bool,
json_data: Optional[dict] = None,
retry_opts: Optional[VeilRetryConfiguration] = None
):
"""Api_retry interface.
Note:
Override me to extend standard behaviour.
"""
return await self.__api_retry_request(method_name=method_name,
url=url,
headers=headers,
params=params,
ssl=ssl,
json_data=json_data,
retry_opts=retry_opts)
async def get(self, api_object, url: str,
extra_params: Optional[dict] = None,
extra_headers: Optional[dict] = None,
retry_opts: Optional[VeilRetryConfiguration] = None,
cache_opts: Optional[VeilCacheConfiguration] = None) -> Dict[str, str]:
"""Send GET request to VeiL ECP."""
params = self.__params
if extra_params:
params.update(extra_params)
headers = self.__headers
if extra_headers:
headers.update(extra_headers)
if not cache_opts:
cache_opts = self.__cache_opts
logger.debug('%s GET request.', api_object.__class__.__name__)
return await self.api_request(api_object=api_object,
method_name='get',
url=url,
headers=headers,
params=params,
ssl=self.__ssl_enabled,
retry_opts=retry_opts,
cache_opts=cache_opts)
async def post(self, api_object,
url: str,
json_data: Optional[dict] = None,
extra_params: Optional[dict] = None,
retry_opts: Optional[VeilRetryConfiguration] = None,
cache_opts: Optional[VeilCacheConfiguration] = None) -> Dict[str, str]:
"""Send POST request to VeiL ECP."""
if isinstance(json_data, dict):
json_data[self.__IDEMPOTENCY_BODY_KEY] | |
<reponame>axis-edge/pulumi-kubernetes
# coding=utf-8
# *** WARNING: this file was generated by pulumigen. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ... import core as _core
from ... import meta as _meta
__all__ = [
'EndpointConditionsArgs',
'EndpointHintsArgs',
'EndpointPortArgs',
'EndpointSliceArgs',
'EndpointArgs',
'ForZoneArgs',
]
@pulumi.input_type
class EndpointConditionsArgs:
def __init__(__self__, *,
ready: Optional[pulumi.Input[bool]] = None,
serving: Optional[pulumi.Input[bool]] = None,
terminating: Optional[pulumi.Input[bool]] = None):
"""
EndpointConditions represents the current condition of an endpoint.
:param pulumi.Input[bool] ready: ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready. For compatibility reasons, ready should never be "true" for terminating endpoints.
:param pulumi.Input[bool] serving: serving is identical to ready except that it is set regardless of the terminating state of endpoints. This condition should be set to true for a ready endpoint that is terminating. If nil, consumers should defer to the ready condition. This field can be enabled with the EndpointSliceTerminatingCondition feature gate.
:param pulumi.Input[bool] terminating: terminating indicates that this endpoint is terminating. A nil value indicates an unknown state. Consumers should interpret this unknown state to mean that the endpoint is not terminating. This field can be enabled with the EndpointSliceTerminatingCondition feature gate.
"""
if ready is not None:
pulumi.set(__self__, "ready", ready)
if serving is not None:
pulumi.set(__self__, "serving", serving)
if terminating is not None:
pulumi.set(__self__, "terminating", terminating)
@property
@pulumi.getter
def ready(self) -> Optional[pulumi.Input[bool]]:
"""
ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready. For compatibility reasons, ready should never be "true" for terminating endpoints.
"""
return pulumi.get(self, "ready")
@ready.setter
def ready(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "ready", value)
@property
@pulumi.getter
def serving(self) -> Optional[pulumi.Input[bool]]:
"""
serving is identical to ready except that it is set regardless of the terminating state of endpoints. This condition should be set to true for a ready endpoint that is terminating. If nil, consumers should defer to the ready condition. This field can be enabled with the EndpointSliceTerminatingCondition feature gate.
"""
return pulumi.get(self, "serving")
@serving.setter
def serving(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "serving", value)
@property
@pulumi.getter
def terminating(self) -> Optional[pulumi.Input[bool]]:
"""
terminating indicates that this endpoint is terminating. A nil value indicates an unknown state. Consumers should interpret this unknown state to mean that the endpoint is not terminating. This field can be enabled with the EndpointSliceTerminatingCondition feature gate.
"""
return pulumi.get(self, "terminating")
@terminating.setter
def terminating(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "terminating", value)
@pulumi.input_type
class EndpointHintsArgs:
def __init__(__self__, *,
for_zones: Optional[pulumi.Input[Sequence[pulumi.Input['ForZoneArgs']]]] = None):
"""
EndpointHints provides hints describing how an endpoint should be consumed.
:param pulumi.Input[Sequence[pulumi.Input['ForZoneArgs']]] for_zones: forZones indicates the zone(s) this endpoint should be consumed by to enable topology aware routing.
"""
if for_zones is not None:
pulumi.set(__self__, "for_zones", for_zones)
@property
@pulumi.getter(name="forZones")
def for_zones(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ForZoneArgs']]]]:
"""
forZones indicates the zone(s) this endpoint should be consumed by to enable topology aware routing.
"""
return pulumi.get(self, "for_zones")
@for_zones.setter
def for_zones(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ForZoneArgs']]]]):
pulumi.set(self, "for_zones", value)
@pulumi.input_type
class EndpointPortArgs:
def __init__(__self__, *,
app_protocol: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
protocol: Optional[pulumi.Input[str]] = None):
"""
EndpointPort represents a Port used by an EndpointSlice
:param pulumi.Input[str] app_protocol: The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol.
:param pulumi.Input[str] name: The name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.
:param pulumi.Input[int] port: The port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.
:param pulumi.Input[str] protocol: The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.
"""
if app_protocol is not None:
pulumi.set(__self__, "app_protocol", app_protocol)
if name is not None:
pulumi.set(__self__, "name", name)
if port is not None:
pulumi.set(__self__, "port", port)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
@property
@pulumi.getter(name="appProtocol")
def app_protocol(self) -> Optional[pulumi.Input[str]]:
"""
The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol.
"""
return pulumi.get(self, "app_protocol")
@app_protocol.setter
def app_protocol(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "app_protocol", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[int]]:
"""
The port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def protocol(self) -> Optional[pulumi.Input[str]]:
"""
The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protocol", value)
@pulumi.input_type
class EndpointSliceArgs:
def __init__(__self__, *,
address_type: pulumi.Input[str],
endpoints: pulumi.Input[Sequence[pulumi.Input['EndpointArgs']]],
api_version: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input['_meta.v1.ObjectMetaArgs']] = None,
ports: Optional[pulumi.Input[Sequence[pulumi.Input['EndpointPortArgs']]]] = None):
"""
EndpointSlice represents a subset of the endpoints that implement a service. For a given service there may be multiple EndpointSlice objects, selected by labels, which must be joined to produce the full set of endpoints.
:param pulumi.Input[str] address_type: addressType specifies the type of address carried by this EndpointSlice. All addresses in this slice must be the same type. This field is immutable after creation. The following address types are currently supported: * IPv4: Represents an IPv4 Address. * IPv6: Represents an IPv6 Address. * FQDN: Represents a Fully Qualified Domain Name.
Possible enum values:
- `"FQDN"` represents a FQDN.
- `"IPv4"` represents an IPv4 Address.
- `"IPv6"` represents an IPv6 Address.
:param pulumi.Input[Sequence[pulumi.Input['EndpointArgs']]] endpoints: endpoints is a list of unique endpoints in this slice. Each slice may include a maximum of 1000 endpoints.
:param pulumi.Input[str] api_version: APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
:param pulumi.Input[str] kind: Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
:param pulumi.Input['_meta.v1.ObjectMetaArgs'] metadata: Standard object's metadata.
:param pulumi.Input[Sequence[pulumi.Input['EndpointPortArgs']]] ports: ports specifies the list of network ports exposed by each endpoint in this slice. Each port must have a unique name. When ports is empty, it indicates that there are no defined ports. When a port is defined with a nil port value, it indicates "all ports". Each slice may include a maximum of 100 ports.
"""
pulumi.set(__self__, "address_type", address_type)
pulumi.set(__self__, "endpoints", endpoints)
if api_version is not None:
pulumi.set(__self__, "api_version", 'discovery.k8s.io/v1')
if | |
# -*- coding: utf-8 -*-
# Copyright 2013, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain a
# copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import unittest
from coapy.endpoint import *
from tests.support import *
import coapy.option
import urlparse
class TestEndpoint (unittest.TestCase):
def testBasic6(self):
ep = Endpoint(host='2001:db8:0::2:1')
self.assertEqual(b'\x20\x01\x0d\xb8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01',
ep.in_addr)
self.assertEqual('[2001:fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:1]', ep.uri_host)
self.assertEqual(5683, ep.port)
ep2 = Endpoint(host='2001:db8:0::2:1')
self.assertTrue(ep is ep2)
ep2 = ep.get_peer_endpoint(('2001:db8:0::2:2', 1234))
self.assertFalse(ep is ep2)
self.assertEqual(b'\x20\x01\x0d\xb8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x02',
ep2.in_addr)
self.assertEqual(1234, ep2.port)
self.assertEqual(ep.family, ep2.family)
self.assertEqual(ep.security_mode, ep2.security_mode)
ep3 = ep.get_peer_endpoint(host='2001:db8:0::2:2', port=1234)
self.assertTrue(ep3 is ep2)
def testBasic4(self):
ep = Endpoint(host='10.0.1.2', port=1234)
self.assertEqual(b'\x0a\x00\x01\x02', ep.in_addr)
self.assertEqual('10.0.1.2', ep.uri_host)
self.assertEqual(1234, ep.port)
ep2 = Endpoint(host='10.0.1.2', port=1234)
self.assertTrue(ep is ep2)
ep2 = ep.get_peer_endpoint(('10.0.1.5', 52342))
self.assertFalse(ep is ep2)
self.assertEqual(b'\x0a\x00\x01\x05', ep2.in_addr)
self.assertEqual('10.0.1.5', ep2.uri_host)
self.assertEqual(52342, ep2.port)
self.assertEqual(ep.family, ep2.family)
self.assertEqual(ep.security_mode, ep2.security_mode)
ep3 = ep.get_peer_endpoint(host=ep.uri_host)
self.assertFalse(ep is ep3)
self.assertEqual(ep.in_addr, ep3.in_addr)
self.assertEqual(coapy.COAP_PORT, ep3.port)
self.assertEqual(ep.family, ep3.family)
self.assertEqual(ep.security_mode, ep3.security_mode)
def testNotAnInetAddr(self):
naa = 'not an address'
with self.assertRaises(socket.gaierror) as cm:
ep = Endpoint.lookup_endpoint(host=naa)
ep = Endpoint.lookup_endpoint(host=naa, family=None)
self.assertTrue(ep is None)
with self.assertRaises(socket.gaierror) as cm:
ep = Endpoint(host=naa)
ep = Endpoint(host=naa, family=None)
lep = Endpoint.lookup_endpoint(host=naa, family=None)
self.assertTrue(lep is ep)
ep2 = Endpoint(host=naa, family=None)
self.assertTrue(ep is ep2)
self.assertTrue(ep.family is None)
self.assertEqual(ep.in_addr, naa.encode('utf-8'))
self.assertEqual(ep.port, coapy.COAP_PORT)
self.assertTrue(ep.security_mode is None)
ana = 'another non-address'
self.assertEqual((naa, coapy.COAP_PORT), ep.sockaddr)
ep2 = ep.get_peer_endpoint((ana, 24))
self.assertFalse(ep is ep2)
self.assertEqual(ana, ep2.uri_host)
self.assertEqual(24, ep2.port)
self.assertEqual(ep.family, ep2.family)
self.assertEqual(ep.security_mode, ep2.security_mode)
def testUnspecFamily(self):
ep = Endpoint.lookup_endpoint(('::1', 1234))
ep = Endpoint(('::1', 1234))
ep2 = Endpoint.lookup_endpoint(('::1', 1234))
self.assertEqual(ep, ep2)
ep = Endpoint.lookup_endpoint(('192.168.0.1', 1234))
ep = Endpoint(('192.168.0.1', 1234))
ep2 = Endpoint.lookup_endpoint(('192.168.0.1', 1234))
self.assertEqual(ep, ep2)
def testStringize(self):
naa = 'not an address'
ep = Endpoint(host=naa, family=None)
self.assertEqual('not an address:5683', unicode(ep))
ep = Endpoint(host='::1', port=1234)
self.assertEqual('[::1]:1234', unicode(ep))
ep = Endpoint(sockaddr=('192.168.0.1', 12345))
self.assertEqual('192.168.0.1:12345', unicode(ep))
def testIsSameHost(self):
ep = Endpoint(host='127.0.0.1')
self.assertEqual(ep.family, socket.AF_INET)
ep2 = Endpoint(host='localhost')
self.assertTrue(ep is ep2)
self.assertTrue(ep.is_same_host('127.0.0.1'))
self.assertFalse(ep.is_same_host('localhost'))
def testFinalize(self):
ep = Endpoint(host='localhost')
m = ep.create_request('/path')
m.options.append(coapy.option.UriHost(ep.uri_host))
m.options.append(coapy.option.UriPort(ep.port))
self.assertEqual(3, len(m.options))
ep.finalize_message(m)
self.assertEqual(1, len(m.options))
opt = m.options[0]
self.assertTrue(isinstance(opt, coapy.option.UriPath))
self.assertTrue(m.destination_endpoint is ep)
def testReset(self):
ep = SocketEndpoint.create_bound_endpoint(host='127.0.0.1', port=0)
self.assertFalse(ep.bound_socket is None)
ep._reset()
self.assertTrue(ep.bound_socket is None)
class TestURLParse (unittest.TestCase):
def testJoin(self):
ep = Endpoint(host='::1')
self.assertEqual('coap://[::1]/', ep.base_uri)
base = ep.uri_from_options([])
self.assertEqual('coap://[::1]/', base)
self.assertEqual('coap://[::1]/path', urlparse.urljoin(base, '/path'))
self.assertEqual('coap://[::1]/other', urlparse.urljoin(base + 'path/', '../other'))
class TestURLConversion (unittest.TestCase):
def testB1(self):
ep = Endpoint(host='2001:db8::2:1')
url = 'coap://[2001:db8::2:1]/'
opts = ep.uri_to_options(url)
self.assertEqual(0, len(opts))
durl = ep.uri_from_options(opts)
self.assertEqual(url, durl)
def testB2(self):
ep = Endpoint(host='2001:db8::2:1')
url = 'coap://example.net/'
opts = ep.uri_to_options(url)
self.assertEqual(1, len(opts))
opt = opts[0]
self.assertTrue(isinstance(opt, coapy.option.UriHost))
self.assertEqual('example.net', opt.value)
durl = ep.uri_from_options(opts)
self.assertEqual(url, durl)
def testB3(self):
ep = Endpoint(host='2001:db8::2:1')
url = 'coap://example.net/.well-known/core'
opts = ep.uri_to_options(url)
self.assertEqual(3, len(opts))
opt = opts[0]
self.assertTrue(isinstance(opt, coapy.option.UriHost))
self.assertEqual('example.net', opt.value)
opt = opts[1]
self.assertTrue(isinstance(opt, coapy.option.UriPath))
self.assertEqual('.well-known', opt.value)
opt = opts[2]
self.assertTrue(isinstance(opt, coapy.option.UriPath))
self.assertEqual('core', opt.value)
durl = ep.uri_from_options(opts)
self.assertEqual(url, durl)
def testB4(self):
ep = Endpoint(host='2001:db8::2:1')
url = 'coap://xn--18j4d.example/%E3%81%93%E3%82%93%E3%81%AB%E3%81%A1%E3%81%AF'
opts = ep.uri_to_options(url)
self.assertEqual(2, len(opts))
opt = opts[0]
self.assertTrue(isinstance(opt, coapy.option.UriHost))
self.assertEqual('xn--18j4d.example', opt.value)
opt = opts[1]
self.assertTrue(isinstance(opt, coapy.option.UriPath))
self.assertEqual('こんにちは', opt.value)
durl = ep.uri_from_options(opts)
self.assertEqual(url, durl)
def testB5(self):
ep = Endpoint(host='198.51.100.1', port=61616)
opts = (coapy.option.UriPath(''),
coapy.option.UriPath('/'),
coapy.option.UriPath(''),
coapy.option.UriPath(''),
coapy.option.UriQuery('//'),
coapy.option.UriQuery('?&'))
uri = ep.uri_from_options(opts)
self.assertEqual('coap://198.51.100.1:61616//%2F//?%2F%2F&?%26', uri)
uopts = ep.uri_to_options(uri)
self.assertEqual(len(opts), len(uopts))
for i in xrange(len(opts)):
self.assertEqual(type(opts[i]), type(uopts[i]))
self.assertEqual(opts[i].value, uopts[i].value)
def testBasic(self):
ep = Endpoint(host='::1')
rel = '/.well-known/core'
opts = ep.uri_to_options(rel)
self.assertTrue(isinstance(opts, list))
self.assertEqual(2, len(opts))
opt = opts[0]
self.assertTrue(isinstance(opt, coapy.option.UriPath))
self.assertEqual('.well-known', opt.value)
opt = opts[1]
self.assertTrue(isinstance(opt, coapy.option.UriPath))
self.assertEqual('core', opt.value)
def testInvalidToOpts(self):
ep = Endpoint(host='::1')
with self.assertRaises(URIError) as cm:
ep.uri_to_options('http://localhost/path')
self.assertEqual(cm.exception.args[0], 'invalid scheme')
self.assertEqual(cm.exception.args[1], 'http')
def testInvalidFromOpts(self):
ep = Endpoint(host='::1')
with self.assertRaises(URIError) as cm:
ep.uri_from_options([coapy.option.UriHost()])
self.assertEqual(cm.exception.args[0], 'empty Uri-Host')
with self.assertRaises(URIError) as cm:
ep.uri_from_options([coapy.option.UriPort()])
self.assertEqual(cm.exception.args[0], 'empty Uri-Port')
class TestEndpointInterface (unittest.TestCase):
def testNextMessageID(self):
ep = FIFOEndpoint()
ep._reset_next_messageID(621)
self.assertEqual(621, ep.next_messageID())
self.assertEqual(622, ep.next_messageID())
self.assertEqual(623, ep.next_messageID())
ep._reset_next_messageID(65534)
self.assertEqual(65534, ep.next_messageID())
self.assertEqual(65535, ep.next_messageID())
self.assertEqual(0, ep.next_messageID())
self.assertEqual(1, ep.next_messageID())
def testCreateRequest(self):
ep = Endpoint(host='::1')
m = ep.create_request('/path')
self.assertTrue(isinstance(m, coapy.message.Request))
self.assertEqual(coapy.message.Request.GET, m.code)
self.assertTrue(m.destination_endpoint is ep)
self.assertIsNone(m.messageID)
self.assertEqual(m.token, b'')
opts = m.options
self.assertTrue(isinstance(opts, list))
self.assertEqual(1, len(opts))
opt = opts[0]
self.assertTrue(isinstance(opt, coapy.option.UriPath))
self.assertEqual('path', opt.value)
self.assertTrue(m.payload is None)
class TestBoundEndpoints (unittest.TestCase):
def testBasic(self):
localhost = '127.0.0.1'
ep = SocketEndpoint.create_bound_endpoint(host=localhost, port=0)
self.assertEqual(socket.AF_INET, ep.family)
self.assertNotEqual(0, ep.port)
self.assertFalse(ep.bound_socket is None)
self.assertEqual(ep.sockaddr, ep.bound_socket.getsockname())
ep2 = Endpoint(host=localhost)
self.assertFalse(isinstance(ep2, SocketEndpoint))
s = socket.socket(ep.family, socket.SOCK_DGRAM)
with self.assertRaises(ValueError):
ep.set_bound_socket(s)
s.close()
s = ep.bound_socket
obs = ep.set_bound_socket(None)
self.assertTrue(s is obs)
self.assertTrue(ep.bound_socket is None)
obs = ep.set_bound_socket(s)
self.assertTrue(obs is None)
self.assertTrue(s is ep.bound_socket)
class TestSocketSendRecv (unittest.TestCase):
def testBasic(self):
import socket
import errno
ep1 = SocketEndpoint.create_bound_endpoint(host='localhost', port=0)
ep1.bound_socket.setblocking(0)
with self.assertRaises(socket.error) as cm:
(data, sep) = ep1.rawrecvfrom(2048)
e = cm.exception
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[1], 'Resource temporarily unavailable')
s1 = ep1.set_bound_socket(None)
s1.close()
class TestMessageCache (ManagedClock_mixin,
unittest.TestCase):
def testDictionary(self):
from coapy.message import Message
ep = Endpoint(host='localhost')
c = MessageCache(ep, True)
self.assertEqual(0, len(c))
with self.assertRaises(KeyError):
v = c[1]
now = coapy.clock()
e1 = MessageCacheEntry(cache=c, message=Message(messageID=1))
self.assertTrue(e1.message.is_non_confirmable())
self.assertEqual(e1.message.messageID, 1)
self.assertEqual(e1.message_id, 1)
self.assertEqual(e1.created_clk, now)
self.assertEqual(e1.activated_clk, now)
self.assertEqual(e1.time_due, now + coapy.transmissionParameters.NON_LIFETIME)
e2 = MessageCacheEntry(cache=c, message=Message(messageID=2), time_due_offset=0)
self.assertTrue(e2.message.is_non_confirmable())
self.assertEqual(e2.message.messageID, 2)
self.assertEqual(e2.message_id, 2)
self.assertEqual(e2.created_clk, now)
self.assertEqual(e2.time_due, now)
e3 = MessageCacheEntry(cache=c, message=Message(messageID=3, confirmable=True))
self.assertTrue(e3.message.is_confirmable())
self.assertEqual(e3.message.messageID, 3)
self.assertEqual(e3.message_id, 3)
self.assertEqual(e1.created_clk, now)
self.assertEqual(e3.time_due, now + coapy.transmissionParameters.EXCHANGE_LIFETIME)
self.assertEqual(3, len(c))
queue = c.queue()
self.assertTrue(queue[0] is e2)
self.assertTrue(queue[1] is e1)
self.assertTrue(queue[2] is e3)
self.assertTrue(c[1] is e1)
self.assertTrue(c[2] is e2)
self.assertTrue(c[3] is e3)
e1.time_due = now + 5
e2.time_due = now
e3.time_due = now + 2
self.assertEqual(3, len(c))
self.assertTrue(c[1] is e1)
self.assertTrue(c[2] is e2)
self.assertTrue(c[3] is e3)
self.assertTrue(queue[0] is e2)
self.assertTrue(e2.cache is c)
c._remove(e2)
self.assertTrue(e2.cache is None)
self.assertEqual(2, len(c))
self.assertTrue(c[1] is e1)
with self.assertRaises(KeyError):
v = c[2]
self.assertTrue(c[3] is e3)
self.assertTrue(queue[0] is e3)
e1.time_due = e3.time_due - 1
self.assertTrue(queue[0] is e1)
self.assertTrue(c[1] is e1)
self.assertTrue(c[3] is e3)
self.assertEqual(2, len(c))
rv = c._remove(e1)
self.assertTrue(rv is e1)
self.assertEqual(1, len(c))
self.assertTrue(c[3] is e3)
self.assertTrue(e3.cache is c)
c.clear()
self.assertTrue(e3.cache is None)
self.assertEqual(0, len(c))
def testSentEntry(self):
from coapy.message import Message
sep = FIFOEndpoint()
dep = FIFOEndpoint()
c = MessageCache(sep, True)
ce = SentMessageCacheEntry(c, Message(messageID=1), dep)
self.assertTrue(ce.message.is_non_confirmable())
self.assertTrue(ce in c.pending())
self.assertFalse(ce in c.queue())
self.assertEqual(ce.created_clk, coapy.clock())
self.assertIsNone(ce.activated_clk)
self.assertIsNone(ce.expires_clk)
coapy.clock.adjust(5)
ce.time_due = coapy.clock()
self.assertFalse(ce in c.pending())
self.assertTrue(ce in c.queue())
self.assertEqual(ce.activated_clk, coapy.clock())
self.assertEqual(ce.created_clk + 5, ce.activated_clk)
self.assertEqual(ce.expires_clk,
ce.activated_clk + coapy.transmissionParameters.NON_LIFETIME)
def testRcvdEntry(self):
from coapy.message import Message
dep = FIFOEndpoint()
c = MessageCache(dep, False)
ce = RcvdMessageCacheEntry(c, Message(messageID=1))
self.assertTrue(ce.message.is_non_confirmable())
self.assertFalse(ce in c.pending())
self.assertTrue(ce in c.queue())
self.assertEqual(ce.activated_clk, coapy.clock())
self.assertEqual(ce.created_clk, ce.activated_clk)
self.assertEqual(ce.expires_clk,
ce.activated_clk + coapy.transmissionParameters.NON_LIFETIME)
class TestRemoteEndpointState (ManagedClock_mixin,
unittest.TestCase):
def testBasic(self):
clk = coapy.clock
sep = FIFOEndpoint()
dep = FIFOEndpoint()
data = b'data'
self.assertEqual(0, clk())
clk.adjust(120)
self.assertEqual(120, clk())
state = sep.remote_state(dep)
self.assertIsNone(state.last_heard_clk)
self.assertEqual(state.rx_messages, 0)
self.assertEqual(state.tx_messages, 0)
self.assertEqual(state.tx_octets_since_heard, 0)
sep.rawsendto(data, dep)
self.assertEqual(state.rx_messages, 0)
self.assertEqual(state.tx_messages, 1)
self.assertEqual(state.tx_octets, len(data))
self.assertEqual(state.tx_octets_since_heard, len(data))
# Simulate destination replying with same content 45 seconds
# later
sep.fifo.append((dep.fifo.pop()[0], dep))
clk.adjust(45)
self.assertEqual(165, clk())
(xdata, xsep) = sep.rawrecvfrom()
self.assertEqual(xdata, data)
self.assertTrue(xsep is dep)
self.assertEqual(state.last_heard_clk, 165)
self.assertEqual(state.rx_messages, 1)
self.assertEqual(state.tx_messages, 1)
self.assertEqual(state.rx_octets, len(data))
self.assertEqual(state.tx_octets, len(data))
self.assertEqual(state.tx_octets_since_heard, 0)
class TestSentCache (DeterministicBEBO_mixin,
LogHandler_mixin,
ManagedClock_mixin,
unittest.TestCase):
def testCONNoAck(self):
tp = coapy.transmissionParameters
self.assertEqual(tp.ACK_RANDOM_FACTOR, 1.0)
clk = coapy.clock
sep = FIFOEndpoint()
dep = FIFOEndpoint()
self.assertEqual(0, clk())
sm = dep.create_request('/path', confirmable=True, token=b'x')
# Send the request through a full message layer. In the
# current model, this creates the cache entry and sets an
# event due immediately but does not actually transmit
# anything.
ce = sep.send(sm)
self.assertEqual(0, ce.transmissions)
self.assertTrue(clk() >= ce.time_due)
self.assertEqual(ce.ST_untransmitted, ce.state)
# Process one timeout. This should send the message and
# start the BEBO process.
ce.process_timeout()
self.assertFalse(ce.cache is None)
self.assertTrue(ce.stale_at is None)
self.assertEqual(ce.created_clk, 0)
self.assertTrue(ce.message is sm)
self.assertTrue(ce.destination_endpoint is dep)
self.assertEqual(1, ce.transmissions)
self.assertEqual(len(dep.fifo), ce.transmissions)
# Cycle through the BEBO with ACK_TIMEOUT base interval
to = tp.ACK_TIMEOUT
for s in xrange(tp.MAX_RETRANSMIT):
self.assertEqual(ce.time_due, clk() + to)
clk.adjust(to)
self.assertEqual(ce.ST_unacknowledged, ce.state)
ce.process_timeout()
self.assertFalse(ce.cache is None)
self.assertEqual(len(dep.fifo), ce.transmissions)
to += to
# At the end of the retransmissions, the clock should be at
# MAX_TRANSMIT_WAIT. The state should still be
# unacknowledged, and there should be a timeout due through
# which the caller | |
"""
Copyright 2016 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import gzip
import json
import os
import subprocess
import urlparse
class ETW:
def __init__(self):
self.earliest_navigate = None
self.start = None
self.log_file = None
self.trace_name = None
self.kernel_categories = []
#self.kernel_categories = ['latency']
self.user_categories = ['Microsoft-IE',
#'Microsoft-IEFRAME',
#'Microsoft-JScript',
#'Microsoft-PerfTrack-IEFRAME',
#'Microsoft-PerfTrack-MSHTML',
#'Microsoft-Windows-DNS-Client',
#'Microsoft-Windows-Schannel-Events',
#'Microsoft-Windows-URLMon',
#'Microsoft-Windows-WebIO',
#'Microsoft-Windows-WinHttp',
'Microsoft-Windows-WinINet',
'Microsoft-Windows-WinINet-Capture',
#'Microsoft-Windows-Winsock-NameResolution',
#'Microsoft-Windows-Winsock-AFD:5',
#'37D2C3CD-C5D4-4587-8531-4696C44244C8' #Security: SChannel
#'Schannel',
#'Microsoft-Windows-TCPIP',
]
# The list of events we actually care about
self.keep_events = [# Page Navigation Events
'Microsoft-IE/Mshtml_CWindow_SuperNavigate2/Start',
'Microsoft-IE/Mshtml_BFCache/Info',
'Microsoft-IE/Mshtml_WebOCEvents_BeforeNavigate/Info',
'Microsoft-IE/Mshtml_CDoc_Navigation/Info', # Start of navigation, keep track of CMarkup* and EventContextId
'Microsoft-IE/Mshtml_WebOCEvents_DOMContentLoaded/Info', # CMarkup *
'Microsoft-IE/Mshtml_WebOCEvents_DocumentComplete/Info', # CMarkup*
'Microsoft-IE/Mshtml_WebOCEvents_NavigateComplete/Info', # CMarkup*
'Microsoft-IE/Mshtml_CMarkup_LoadEvent_Start/Start', # EventContextId
'Microsoft-IE/Mshtml_CMarkup_LoadEvent_Stop/Stop', # EventContextId
'Microsoft-IE/Mshtml_CMarkup_DOMContentLoadedEvent_Start/Start', # EventContextId
'Microsoft-IE/Mshtml_CMarkup_DOMContentLoadedEvent_Stop/Stop', # EventContextId
# DNS - linked by etw:ActivityId
'Microsoft-Windows-WinINet/WININET_DNS_QUERY/Start',
'Microsoft-Windows-WinINet/WININET_DNS_QUERY/Stop', # Lookup complete (includes address list)
'Microsoft-Windows-WinINet/Wininet_Getaddrinfo/Start', # Start of actual lookup
'Microsoft-Windows-WinINet/Wininet_Getaddrinfo/Stop', # End of actual lookup
# Socket Connect - linked by etw:ActivityId to DNS
'Microsoft-Windows-WinINet/Wininet_SocketConnect/Start', # Start of connection attempt, includes request #
'Microsoft-Windows-WinINet/Wininet_SocketConnect/Stop', # End of connection attempt
'Microsoft-Windows-WinINet/WININET_TCP_CONNECTION/Start', # Start of connection lifetime (after connected)
'Microsoft-Windows-WinINet/WININET_TCP_CONNECTION/Stop', # End of connection lifetime (closed)
'Microsoft-Windows-WinINet/WININET_TCP_CONNECTION/Fail',
'Microsoft-Windows-WinINet/Wininet_Connect/Stop',
# TLS
'Microsoft-Windows-WinINet/WININET_HTTPS_NEGOTIATION/Start',
'Microsoft-Windows-WinINet/WININET_HTTPS_NEGOTIATION/Stop',
# Requests - linked by etw:ActivityId
'Microsoft-Windows-WinINet/WININET_REQUEST_HEADER/Info', # Headers and size of outbound request - Length, Headers
'Microsoft-Windows-WinINet/WININET_RESPONSE_HEADER/Info', # Headers and size of headers - Length, Headers
'Microsoft-Windows-WinINet/Wininet_SendRequest/Start', # Request created (not necessarily sent) - AddressName (URL)
'Microsoft-Windows-WinINet/Wininet_SendRequest/Stop', # Headers done - Direction changing for capture (no params)
'Microsoft-Windows-WinINet/Wininet_SendRequest_Main/Info', # size of outbound request (and actual start) - Size
'Microsoft-Windows-WinINet/Wininet_ReadData/Info', # inbound bytes (ttfb, keep incrementing end) - Size
'Microsoft-Windows-WinINet/Wininet_UsageLogRequest/Info', # completely finished - URL, Verb, RequestHeaders, ResponseHeaders, Status, UsageLogRequestCache
'Microsoft-Windows-WinINet/Wininet_LookupConnection/Stop', # Maps request to source port of connection "Socket" == local port
'Microsoft-Windows-WinINet/WININET_STREAM_DATA_INDICATED/Info', # Size
'Microsoft-Windows-WinINet-Capture//', # raw bytes (before encryption?_) and length - PayloadByteLength, Payload
]
def Start(self, log_file):
ret = 0
if os.path.exists(log_file):
os.unlink(log_file)
if len(self.kernel_categories) or len(self.user_categories):
command = ['xperf']
if len(self.kernel_categories):
command.extend(['-on', '+'.join(self.kernel_categories)])
command.append('-start')
self.trace_name = 'WebPageTest'
command.append(self.trace_name)
if len(self.user_categories):
command.extend(['-on', '+'.join(self.user_categories)])
command.extend(['-BufferSize', '1024'])
command.extend(['-f', log_file])
print('Capturing ETW trace {0} to "{1}"'.format(self.trace_name,log_file))
ret = subprocess.call(command, shell=True)
self.started = True
self.log_file = log_file
return ret
def Stop(self):
ret = 0
if self.trace_name is not None:
print('Stopping ETW trace')
command = ['xperf', '-stop', self.trace_name]
ret = subprocess.call(command, shell=True)
return ret
def Write(self, test_info, dom_data):
start_offset = 0
page_data_file = test_info.GetFilePageData()
request_data_file = test_info.GetFileRequests()
if self.log_file is not None and self.started and page_data_file is not None and request_data_file is not None:
csv_file = self.log_file + '.csv'
self.ExtractCsv(csv_file)
if os.path.exists(csv_file):
print('Parsing Events')
events = self.Parse(csv_file)
if len(events):
print('Processing Events')
raw_result = self.ProcessEvents(events)
page_data, requests = self.ProcessResult(raw_result, test_info, dom_data)
with gzip.open(page_data_file + '.gz', 'wb') as f:
json.dump(page_data, f)
with gzip.open(request_data_file + '.gz', 'wb') as f:
json.dump(requests, f)
os.unlink(csv_file)
if self.earliest_navigate is not None and self.start is not None and self.start > self.earliest_navigate:
start_offset = int(round(float(self.start - self.earliest_navigate) / 1000.0))
return start_offset
def ExtractCsv(self, csv_file):
ret = 0
if self.log_file is not None:
print('Converting ETW trace to CSV')
command = ['xperf', '-i', self.log_file, '-o', csv_file, '-target', 'machine', '-tle', '-tti']
ret = subprocess.call(command, shell=True)
return ret
def Parse(self, csv_file):
events = []
column_names = {}
in_header = False
header_parsed = False
with open(csv_file, 'rb') as file:
buffer = ''
for line in file:
try:
if not in_header and not header_parsed and line == "BeginHeader\r\n":
in_header = True
buffer = ''
elif in_header:
buffer = ''
if line == "EndHeader\r\n":
header_parsed = True
in_header = False
else:
columns = self.ExtractCsvLine(line)
if len(columns):
event_name = columns[0].replace(' ', '').replace('/win:', '/').replace('/Task.', '/')
if len(event_name):
column_names[event_name] = columns
else:
buffer += line
# line feeds in the data are escaped. All real data lines end with \r\n
if len(buffer) and line[-1] != "\r" and buffer[-3:] != "\r\r\n":
buffer = buffer.replace("\r\r\n", "\r\n")
# pull the event name from the front of the string so we only do the heavy csv processing for events we care about
comma = buffer.find(',')
if comma > 0:
event_name = buffer[:comma].replace(' ', '').replace('/win:', '/').replace('/Task.', '/')
if len(event_name) and event_name in column_names and event_name in self.keep_events:
columns = self.ExtractCsvLine(buffer)
if len(columns):
event = {'name': event_name, 'fields': {}}
available_names = len(column_names[event_name])
for i in xrange(1, len(columns)):
if i < available_names:
key = column_names[event_name][i]
value = columns[i]
if key == 'TimeStamp':
event['ts'] = int(value)
elif key == 'etw:ActivityId':
event['activity'] = value
else:
event['fields'][key] = value
if 'ts' in event:
events.append(event)
buffer = ''
except:
pass
# sort the events by timestamp to make sure we process them in order
if len(events):
events.sort(key=lambda event: event['ts'])
return events
def ExtractCsvLine(self, csv):
columns = []
try:
buffer = ''
in_quote = False
in_multiline_quote = False
if csv[-2:] == "\r\n":
csv = csv[:-2]
length = len(csv)
for i in xrange(0, length):
if csv[i] == ',' and not in_quote:
buffer = buffer.strip(" \r\n")
if len(buffer) > 1 and buffer[0] == '"' and buffer[-1] == '"':
buffer = buffer[1:-1]
columns.append(buffer)
buffer = ''
elif len(buffer) or csv[i] != ' ':
buffer += csv[i]
# quote escaping starts with a quote as the first non-space character of the field
if not in_quote and buffer == '"':
in_quote = True
in_multiline_quote = False
elif in_quote and not in_multiline_quote:
if csv[i] == '"':
in_quote = False
elif csv[i] == '\r':
in_multiline_quote = True
elif in_quote and in_multiline_quote and csv[i] == '"':
if len(buffer) > 2 and (csv[i-1] == "\r" or csv[i-1] == "\n"):
in_quote = False
in_multiline_quote = False
if len(buffer):
buffer = buffer.strip(" \r\n")
if len(buffer) > 1 and buffer[0] == '"' and buffer[-1] == '"':
buffer = buffer[1:-1]
columns.append(buffer)
except:
pass
return columns
def ProcessEvents(self, events):
result = {'pageData': {},
'requests': {},
'dns': {},
'sockets': {}}
dns = {}
sockets = {}
requests = {}
pageContexts = []
CMarkup = []
navigating = True
for event in events:
try:
if 'activity' in event:
id = event['activity']
if event['name'] == 'Microsoft-IE/Mshtml_CWindow_SuperNavigate2/Start':
navigating = True
if self.earliest_navigate is None and\
(event['name'] == 'Microsoft-IE/Mshtml_CWindow_SuperNavigate2/Start' or
event['name'] == 'Microsoft-IE/Mshtml_BFCache/Info' or
event['name'] == 'Microsoft-IE/Mshtml_WebOCEvents_BeforeNavigate/Info' or
event['name'] == 'Microsoft-IE/Mshtml_CDoc_Navigation/Info'):
self.earliest_navigate = event['ts']
if navigating and event['name'] == 'Microsoft-IE/Mshtml_CDoc_Navigation/Info':
if 'EventContextId' in event['fields'] and 'CMarkup*' in event['fields']:
pageContexts.append(event['fields']['EventContextId'])
CMarkup.append(event['fields']['CMarkup*'])
navigating = False
if 'start' not in result:
result['start'] = event['ts']
if 'URL' in event['fields'] and 'URL' not in result:
result['URL'] = event['fields']['URL']
elif 'start' in result:
# Page Navigation events
if event['name'] == 'Microsoft-IE/Mshtml_WebOCEvents_DocumentComplete/Info':
if 'CMarkup*' in event['fields'] and event['fields']['CMarkup*'] in CMarkup:
result['pageData']['load'] = event['ts']
if event['name'] == 'Microsoft-IE/Mshtml_CMarkup_LoadEvent_Start/Start':
if 'EventContextId' in event['fields'] and event['fields']['EventContextId'] in pageContexts:
result['pageData']['loadEventStart'] = event['ts']
if event['name'] == 'Microsoft-IE/Mshtml_CMarkup_LoadEvent_Stop/Stop':
if 'EventContextId' in event['fields'] and event['fields']['EventContextId'] in pageContexts:
result['pageData']['loadEventEnd'] = event['ts']
if event['name'] == 'Microsoft-IE/Mshtml_CMarkup_DOMContentLoadedEvent_Start/Start':
if 'EventContextId' in event['fields'] and event['fields']['EventContextId'] in pageContexts:
result['pageData']['domContentLoadedEventStart'] = event['ts']
if event['name'] == 'Microsoft-IE/Mshtml_CMarkup_DOMContentLoadedEvent_Stop/Stop':
if 'EventContextId' in event['fields'] and event['fields']['EventContextId'] in pageContexts:
result['pageData']['domContentLoadedEventEnd'] = event['ts']
# DNS
if event['name'] == 'Microsoft-Windows-WinINet/WININET_DNS_QUERY/Start' and id not in dns:
if 'HostName' in event['fields']:
dns[id] = {'host': event['fields']['HostName']}
if event['name'] == 'Microsoft-Windows-WinINet/WININET_DNS_QUERY/Stop' and id in dns:
if 'AddressList' in event['fields']:
dns[id]['addresses'] = list(filter(None, event['fields']['AddressList'].split(';')))
if event['name'] == 'Microsoft-Windows-WinINet/Wininet_Getaddrinfo/Start' and id in dns:
dns[id]['start'] = event['ts']
if event['name'] == 'Microsoft-Windows-WinINet/Wininet_Getaddrinfo/Stop' and id in dns:
dns[id]['end'] = event['ts']
# Sockets
if event['name'] == 'Microsoft-Windows-WinINet/Wininet_SocketConnect/Start' and id not in result['sockets']:
result['sockets'][id] = {'start': event['ts'], 'index': len(result['sockets'])}
if 'Socket' in event['fields']:
result['sockets'][id]['socket'] = event['fields']['Socket']
if 'SourcePort' in event['fields']:
sockets[event['fields']['SourcePort']] = id # keep a mapping from the source port to the connection activity id
result['sockets'][id]['srcPort'] = event['fields']['SourcePort']
if 'RemoteAddressIndex' in event['fields']:
result['sockets'][id]['addrIndex'] = event['fields']['RemoteAddressIndex']
if event['name'] == 'Microsoft-Windows-WinINet/Wininet_SocketConnect/Stop' and id in | |
"""
library functions for the CIJOE test runner, `cij_runner`.
"""
from __future__ import print_function
from subprocess import Popen, STDOUT
from xml.dom import minidom
import shutil
import copy
import time
import os
import yaml
import cij.test
import cij
HOOK_PATTERNS = {
"enter": [
"%s.sh",
"%s_enter.sh",
"%s_enter.py",
],
"exit": [
"%s_exit.sh",
"%s_exit.py"
]
}
HOOK = {
"evars": {},
"name": None,
"fname": None,
"fpath": None,
"fpath_orig": None,
"res_root": None,
"log_fpath": None,
"rcode": None,
"wallc": None,
}
TESTSUITE = {
"ident": None,
"name": None,
"alias": None,
"hooks": {
"enter": [],
"exit": []
},
"evars": {},
"fpath": None,
"fname": None,
"res_root": None,
"aux_root": None,
"aux_list": [],
"status": "UNKN",
"wallc": None,
"testcases": [],
"hooks_pr_tcase": [],
}
TESTCASE = {
"ident": None,
"fpath": None,
"fname": None,
"name": None,
"res_root": None,
"aux_root": None,
"aux_list": [],
"log_fpath": None,
"hooks": None,
"evars": {},
"status": "UNKN",
"rcode": None,
"wallc": None,
}
TRUN = {
"ver": None,
"conf": None,
"evars": {},
"progress": {
"PASS": 0,
"FAIL": 0,
"UNKN": 0
},
"stamp": {
"begin": None,
"end": None
},
"hooks": {
"enter": [],
"exit": []
},
"res_root": None,
"aux_root": None,
"aux_list": [],
"testsuites": [],
"status": "UNKN",
"wallc": None,
}
def yml_fpath(output_path):
"""Returns the path to the trun YAML-file"""
return os.sep.join([output_path, "trun.yml"])
def junit_fpath(output_path):
"""Returns the path to the jUNIT XML-file"""
return os.sep.join([output_path, "trun.xml"])
def script_run(trun, script):
"""Execute a script or testcase"""
if trun["conf"]["VERBOSE"]:
cij.emph("rnr:script:run { script: %s }" % script)
cij.emph("rnr:script:run:evars: %s" % script["evars"])
launchers = {
".py": "python",
".sh": "source"
}
ext = os.path.splitext(script["fpath"])[-1]
if not ext in launchers.keys():
cij.err("rnr:script:run { invalid script[\"fpath\"]: %r }" % script["fpath"])
return 1
launch = launchers[ext]
with open(script["log_fpath"], "a") as log_fd:
log_fd.write("# script_fpath: %r\n" % script["fpath"])
log_fd.flush()
bgn = time.time()
cmd = [
'bash', '-c',
'CIJ_ROOT=$(cij_root) && '
'source $CIJ_ROOT/modules/cijoe.sh && '
'source %s && '
'CIJ_TEST_RES_ROOT="%s" %s %s ' % (
trun["conf"]["ENV_FPATH"],
script["res_root"],
launch,
script["fpath"]
)
]
if trun["conf"]["VERBOSE"] > 1:
cij.emph("rnr:script:run { cmd: %r }" % " ".join(cmd))
evars = os.environ.copy()
evars.update({k: str(script["evars"][k]) for k in script["evars"]})
process = Popen(
cmd,
stdout=log_fd,
stderr=STDOUT,
cwd=script["res_root"],
env=evars
)
process.wait()
script["rcode"] = process.returncode
script["wallc"] = time.time() - bgn
if trun["conf"]["VERBOSE"]:
cij.emph("rnr:script:run { wallc: %02f }" % script["wallc"])
cij.emph(
"rnr:script:run { rcode: %r } " % script["rcode"],
script["rcode"]
)
return script["rcode"]
def hook_setup(parent, hook_fpath):
"""Setup hook"""
hook = copy.deepcopy(HOOK)
hook["name"] = os.path.splitext(os.path.basename(hook_fpath))[0]
hook["name"] = hook["name"].replace("_enter", "").replace("_exit", "")
hook["res_root"] = parent["res_root"]
hook["fpath_orig"] = hook_fpath
hook["fname"] = "hook_%s" % os.path.basename(hook["fpath_orig"])
hook["fpath"] = os.sep.join([hook["res_root"], hook["fname"]])
hook["log_fpath"] = os.sep.join([
hook["res_root"],
"%s.log" % hook["fname"]
])
hook["evars"].update(copy.deepcopy(parent["evars"]))
shutil.copyfile(hook["fpath_orig"], hook["fpath"])
return hook
def hooks_setup(trun, parent, hnames=None):
"""
Setup test-hooks
@returns dict of hook filepaths {"enter": [], "exit": []}
"""
hooks = {
"enter": [],
"exit": []
}
if hnames is None: # Nothing to do, just return the struct
return hooks
for hname in hnames: # Fill out paths
for med in HOOK_PATTERNS:
for ptn in HOOK_PATTERNS[med]:
fpath = os.sep.join([trun["conf"]["HOOKS"], ptn % hname])
if not os.path.exists(fpath):
continue
hook = hook_setup(parent, fpath)
if not hook:
continue
hooks[med].append(hook)
if not hooks["enter"] + hooks["exit"]:
cij.err("rnr:hooks_setup:FAIL { hname: %r has no files }" % hname)
return None
return hooks
def trun_to_file(trun, fpath=None):
"""Dump the given trun to file"""
if fpath is None:
fpath = yml_fpath(trun["conf"]["OUTPUT"])
with open(fpath, 'w') as yml_file:
data = yaml.dump(trun, explicit_start=True, default_flow_style=False)
yml_file.write(data)
def trun_to_junitfile(trun, fpath=None):
"""Generate jUNIT XML from testrun YML"""
try:
if fpath is None:
fpath = junit_fpath(trun["conf"]["OUTPUT"])
doc = minidom.Document()
doc_testsuites = doc.createElement('testsuites')
duration = 0
stamp = trun.get("stamp", None)
if stamp:
stamp_begin = stamp.get("begin", None)
if not stamp_begin:
stamp_begin = time.time()
stamp_end = stamp.get("end", None)
if not stamp_end:
stamp_end = time.time()
if stamp_end > stamp_begin:
duration = stamp_end - stamp_begin
doc_testsuites.setAttribute("duration", str(duration))
doc.appendChild(doc_testsuites)
for ts_id, tsuite in enumerate(trun.get("testsuites", [])):
doc_tsuite = doc.createElement("testsuite")
doc_tsuite.setAttribute("name", tsuite.get("name", "UNNAMED"))
doc_tsuite.setAttribute("package", tsuite.get("ident", "UNDEFINED"))
doc_tsuite.setAttribute(
"tests",
str(len(tsuite.get("testcases", [])))
)
nfailures = 0
wallc_total = 0.0
for tcase in tsuite["testcases"]:
wallc = tcase.get("wallc", None)
if not wallc:
wallc = 0.0
wallc_total += wallc
doc_tcase = doc.createElement("testcase")
doc_tcase.setAttribute(
"name", str(tcase.get("name", "UNNAMED"))
)
doc_tcase.setAttribute(
"classname", str(tcase.get("ident", "UNDEFINED"))
)
doc_tcase.setAttribute("time", "%0.3f" % wallc)
rcode = tcase.get("rcode", None)
if rcode != 0:
nfailures += 1
doc_failure = doc.createElement("failure")
doc_failure.setAttribute(
"message",
"not executed" if rcode is None else "test failed"
)
doc_tcase.appendChild(doc_failure)
doc_tsuite.appendChild(doc_tcase)
doc_tsuite.setAttribute("failures", str(nfailures))
doc_tsuite.setAttribute("time", "%0.3f" % wallc_total)
doc_testsuites.appendChild(doc_tsuite)
with open(fpath, "w") as f:
f.write(doc.toprettyxml(indent=" "))
except Exception as ex:
cij.err("Failed persisting testrun as jUNIT XML, ex(%r)" % ex)
return 1
return 0
def trun_from_file(fpath):
"""Returns trun from the given fpath"""
with open(fpath, 'r') as yml_file:
return yaml.safe_load(yml_file)
def trun_emph(trun):
"""Print essential info on"""
if trun["conf"]["VERBOSE"] > 1: # Print environment variables
cij.emph("rnr:CONF {")
for cvar in sorted(trun["conf"].keys()):
cij.emph(" % 16s: %r" % (cvar, trun["conf"][cvar]))
cij.emph("}")
if trun["conf"]["VERBOSE"]:
cij.emph("rnr:INFO {")
cij.emph(" OUTPUT: %r" % trun["conf"]["OUTPUT"])
cij.emph(" yml_fpath: %r" % yml_fpath(trun["conf"]["OUTPUT"]))
cij.emph("}")
def tcase_setup(trun, parent, tcase_fname):
"""
Create and initialize a testcase
"""
#pylint: disable=locally-disabled, unused-argument
case = copy.deepcopy(TESTCASE)
case["fname"] = tcase_fname
case["fpath_orig"] = os.sep.join([trun["conf"]["TESTCASES"], case["fname"]])
if not os.path.exists(case["fpath_orig"]):
cij.err('rnr:tcase_setup: !case["fpath_orig"]: %r' % case["fpath_orig"])
return None
case["name"] = os.path.splitext(case["fname"])[0]
case["ident"] = "/".join([parent["ident"], case["fname"]])
case["res_root"] = os.sep.join([parent["res_root"], case["fname"]])
case["aux_root"] = os.sep.join([case["res_root"], "_aux"])
case["log_fpath"] = os.sep.join([case["res_root"], "run.log"])
case["fpath"] = os.sep.join([case["res_root"], case["fname"]])
case["evars"].update(copy.deepcopy(parent["evars"]))
# Initalize
os.makedirs(case["res_root"]) # Create DIRS
os.makedirs(case["aux_root"])
shutil.copyfile(case["fpath_orig"], case["fpath"]) # Copy testcase
# Initialize hooks
case["hooks"] = hooks_setup(trun, case, parent.get("hooks_pr_tcase"))
return case
def tsuite_exit(trun, tsuite):
"""Triggers when exiting the given testsuite"""
if trun["conf"]["VERBOSE"]:
cij.emph("rnr:tsuite:exit")
rcode = 0
for hook in reversed(tsuite["hooks"]["exit"]): # EXIT-hooks
rcode = script_run(trun, hook)
if rcode:
break
if trun["conf"]["VERBOSE"]:
cij.emph("rnr:tsuite:exit { rcode: %r } " % rcode, rcode)
return rcode
def tsuite_enter(trun, tsuite):
"""Triggers when entering the given testsuite"""
if trun["conf"]["VERBOSE"]:
cij.emph("rnr:tsuite:enter { name: %r }" % tsuite["name"])
rcode = 0
for hook in tsuite["hooks"]["enter"]: # ENTER-hooks
rcode = script_run(trun, hook)
if rcode:
break
if trun["conf"]["VERBOSE"]:
cij.emph("rnr:tsuite:enter { rcode: %r } " % rcode, rcode)
return rcode
def tsuite_setup(trun, declr, enum):
"""
Creates and initialized a TESTSUITE struct and site-effects such as creating
output directories and forwarding initialization of testcases
"""
suite = copy.deepcopy(TESTSUITE) # Setup the test-suite
suite["name"] = declr.get("name")
if suite["name"] is None:
cij.err("rnr:tsuite_setup: no testsuite is given")
return None
suite["alias"] = declr.get("alias")
suite["ident"] = "%s_%d" % (suite["name"], enum)
suite["res_root"] = os.sep.join([trun["conf"]["OUTPUT"], suite["ident"]])
suite["aux_root"] = os.sep.join([suite["res_root"], "_aux"])
suite["evars"].update(copy.deepcopy(trun["evars"]))
suite["evars"].update(copy.deepcopy(declr.get("evars", {})))
# Initialize
os.makedirs(suite["res_root"])
os.makedirs(suite["aux_root"])
# Setup testsuite-hooks
suite["hooks"] = hooks_setup(trun, suite, declr.get("hooks"))
# Forward from declaration
suite["hooks_pr_tcase"] = declr.get("hooks_pr_tcase", [])
suite["fname"] = "%s.suite" % suite["name"]
suite["fpath"] = os.sep.join([trun["conf"]["TESTSUITES"], suite["fname"]])
#
# Load testcases from .suite file OR from declaration
#
tcase_fpaths = [] # Load testcase fpaths
if os.path.exists(suite["fpath"]): # From suite-file
suite_lines = (
l.strip() for l in open(suite["fpath"]).read().splitlines()
)
tcase_fpaths.extend(
(l for l in suite_lines if len(l) > 1 and l[0] != "#")
)
else: # From declaration
tcase_fpaths.extend(declr.get("testcases", []))
# NOTE: fix duplicates; allow them
# NOTE: Currently hot-fixed here
if len(set(tcase_fpaths)) != len(tcase_fpaths):
cij.err("rnr:suite: failed: duplicate tcase in suite not supported")
return None
for tcase_fname in tcase_fpaths: # Setup testcases
tcase = tcase_setup(trun, suite, tcase_fname)
if not tcase:
cij.err("rnr:suite: failed: tcase_setup")
return None
suite["testcases"].append(tcase)
return suite
def tcase_exit(trun, tsuite, tcase):
"""..."""
#pylint: disable=locally-disabled, unused-argument
if trun["conf"]["VERBOSE"]:
cij.emph("rnr:tcase:exit { fname: %r }" % tcase["fname"])
rcode = 0
for hook in reversed(tcase["hooks"]["exit"]): # tcase EXIT-hooks
rcode = script_run(trun, hook)
if rcode:
break
if trun["conf"]["VERBOSE"]:
cij.emph("rnr:tcase:exit { rcode: %r }" % rcode, rcode)
return rcode
def tcase_enter(trun, tsuite, tcase):
"""
setup res_root and aux_root, log info and run tcase-enter-hooks
@returns 0 when all hooks succeed, some value othervise
"""
#pylint: disable=locally-disabled, unused-argument
if trun["conf"]["VERBOSE"]:
cij.emph("rnr:tcase:enter")
cij.emph("rnr:tcase:enter { fname: %r }" % tcase["fname"])
cij.emph("rnr:tcase:enter { log_fpath: %r }" % tcase["log_fpath"])
rcode = 0
for hook in tcase["hooks"]["enter"]: # tcase ENTER-hooks
rcode = script_run(trun, hook)
if rcode:
break
if trun["conf"]["VERBOSE"]:
cij.emph("rnr:tcase:exit: { rcode: %r }" % rcode, rcode)
return rcode
def trun_exit(trun):
"""Triggers when exiting the given testrun"""
if trun["conf"]["VERBOSE"]:
cij.emph("rnr:trun:exit")
rcode = 0
for hook in reversed(trun["hooks"]["exit"]): # EXIT-hooks
rcode = script_run(trun, hook)
if rcode:
break
if trun["conf"]["VERBOSE"]:
cij.emph("rnr:trun::exit { rcode: %r }" % rcode, rcode)
return rcode
def trun_enter(trun):
"""Triggers when entering the given testrun"""
if trun["conf"]["VERBOSE"]:
cij.emph("rnr:trun::enter")
trun["stamp"]["begin"] = int(time.time()) # Record start timestamp
rcode = 0
for hook in trun["hooks"]["enter"]: # ENTER-hooks
rcode = | |
<filename>src/sqlfluff/parser/segments_base.py
"""Base segment definitions.
Here we define:
- BaseSegment. This is the root class for all segments, and is
designed to hold other subsegments.
- RawSegment. This is designed to be the root segment, without
any children, and the output of the lexer.
- UnparsableSegment. A special wrapper to indicate that the parse
function failed on this block of segments and to prevent further
analysis.
These are the fundamental building blocks of the rest of the parser.
"""
import logging
from io import StringIO
from benchit import BenchIt
from .match import MatchResult, curtail_string, join_segments_raw
from ..errors import SQLLintError
def verbosity_logger(msg, verbosity=0, level='info', v_level=3):
"""Log or print based on configuration."""
if verbosity >= v_level:
print(msg)
else:
# Should be mostly equivalent to logging.info(msg)
getattr(logging, level)(msg)
def parse_match_logging(grammar, func, msg, parse_context, v_level, **kwargs):
"""Log in a particular consistent format for use while matching."""
# If we can avoid this, bank the performance increase
if parse_context.verbosity <= 1:
return
# Otherwise carry on...
symbol = kwargs.pop('symbol', '')
s = "[PD:{0} MD:{1}]\t{2:<50}\t{3:<20}\t{4:<4}".format(
parse_context.parse_depth, parse_context.match_depth,
('.' * parse_context.match_depth) + str(parse_context.match_segment),
"{0}.{1} {2}".format(grammar, func, msg),
symbol
)
if kwargs:
s += "\t[{0}]".format(
', '.join(
"{0}={1}".format(
k,
repr(v) if isinstance(v, str) else v
) for k, v in kwargs.items()
)
)
verbosity_logger(s, parse_context.verbosity, v_level=v_level)
def frame_msg(msg):
"""Frame a message with hashes so that it covers five lines."""
return "###\n#\n# {0}\n#\n###".format(msg)
def check_still_complete(segments_in, matched_segments, unmatched_segments):
"""Check that the segments in are the same as the segments out."""
initial_str = join_segments_raw(segments_in)
current_str = join_segments_raw(
matched_segments + unmatched_segments
)
if initial_str != current_str:
raise RuntimeError(
"Dropped elements in sequence matching! {0!r} != {1!r}".format(
initial_str, current_str))
class ParseBlacklist:
"""Acts as a cache to stop unnecessary matching."""
def __init__(self):
self._blacklist_struct = {}
def _hashed_version(self):
return {
k: {hash(e) for e in self._blacklist_struct[k]}
for k in self._blacklist_struct
}
def check(self, seg_name, seg_tuple):
"""Check this seg_tuple against this seg_name.
Has this seg_tuple already been matched
unsuccessfully against this segment name.
"""
if seg_name in self._blacklist_struct:
if seg_tuple in self._blacklist_struct[seg_name]:
return True
return False
def mark(self, seg_name, seg_tuple):
"""Mark this seg_tuple as not a match with this seg_name."""
if seg_name in self._blacklist_struct:
self._blacklist_struct[seg_name].add(seg_tuple)
else:
self._blacklist_struct[seg_name] = {seg_tuple}
def clear(self):
"""Clear the blacklist struct."""
self._blacklist_struct = {}
class ParseContext:
"""The context for parsing. It holds configuration and rough state.
We expect that an object (or copy of this object) will be passed
around rather than the individual variables for parse and match depth
as before.
"""
__slots__ = ['match_depth', 'parse_depth', 'verbosity', 'dialect', 'match_segment', 'recurse', 'blacklist']
def __init__(self, dialect=None, verbosity=0, match_depth=0, parse_depth=0, match_segment=None, recurse=True, blacklist=None):
# Write all the variables in a DRY way. Yes it's a bit convoluted. Sorry.
for k in self.__slots__:
setattr(self, k, locals()[k])
# Initialise a blacklist struct if one is not present.
if getattr(self, 'blacklist') is None:
setattr(self, 'blacklist', ParseBlacklist())
def copy(self, incr=None, decr=None, **kwargs):
"""Make a copy of the parse context, optionally with some edited variables."""
current_vals = {k: getattr(self, k) for k in self.__slots__}
current_vals.update(kwargs or {})
# Increment
if isinstance(incr, str):
current_vals[incr] += 1
elif incr:
for k in incr:
current_vals[k] += 1
# Decrement
if isinstance(decr, str):
current_vals[decr] -= 1
elif decr:
for k in decr:
current_vals[k] -= 1
# Return
return self.__class__(**current_vals)
@classmethod
def from_config(cls, config):
"""Construct a `ParseContext` from a `FluffConfig`."""
return cls(dialect=config.get('dialect_obj'), recurse=config.get('recurse'))
class BaseSegment:
"""The base segment element.
This defines the base element which drives both Lexing, Parsing and Linting.
A large chunk of the logic which defines those three operations are centered
here. Much of what is defined in the BaseSegment is also used by it's many
subclasses rather than directly here.
For clarity, the `BaseSement` is mostly centered around a segment which contains
other subsegments. For segments which don't have *children*, refer to the `RawSegment`
class (which still inherits from this one).
Segments are used both as instances to hold chunks of text, but also as classes
themselves where they function a lot like grammars, and return instances of themselves
when they match. The many classmethods in this class are usually to serve their
purpose as a matcher.
"""
# `type` should be the *category* of this kind of segment
type = 'base'
parse_grammar = None
match_grammar = None
grammar = None
comment_seperate = False
is_whitespace = False
optional = False # NB: See the seguence grammar for details
is_segment = True
_name = None
_func = None # Available for use by subclasses (e.g. the LambdaSegment)
is_meta = False
@property
def name(self):
"""The name of this segment.
The reason for two routes for names is that some subclasses
might want to overrise the name rather than just getting it
the class name.
Name should be specific to this kind of segment, while `type`
should be a higher level descriptor of the kind of segment.
For example, the name of `+` is 'plus' but the type might be
'binary_operator'.
"""
return self._name or self.__class__.__name__
@property
def is_expandable(self):
"""Return true if it is meaningful to call `expand` on this segment.
We need to do this recursively because even if *this* segment doesn't
need expanding, maybe one of it's children does.
"""
if self._parse_grammar():
return True
elif self.segments and any(s.is_expandable for s in self.segments):
return True
else:
return False
@classmethod
def simple(cls, parse_context):
"""Does this matcher support an uppercase hash matching route?"""
return False
@property
def is_code(self):
"""Return True if this segment contains any code."""
return any(seg.is_code for seg in self.segments)
@property
def is_comment(self):
"""Return True if this is entirely made of comments."""
return all(seg.is_comment for seg in self.segments)
@classmethod
def is_optional(cls):
"""Return True if this segment is optional.
This is used primarily in sequence matching, where optional
segments can be skipped.
"""
return cls.optional
@classmethod
def _match_grammar(cls):
"""Return the `match_grammar` attribute if present, or the `grammar` attribute if not."""
if cls.match_grammar:
return cls.match_grammar
else:
return cls.grammar
@classmethod
def _parse_grammar(cls):
"""Return the `parse_grammar` attribute if present, or the `grammar` attribute if not."""
if cls.parse_grammar:
return cls.parse_grammar
else:
return cls.grammar
def validate_segments(self, text="constructing", validate=True):
"""Validate the current set of segments.
Check the elements of the `segments` attribute are all
themselves segments, and that the positions match up.
`validate` confirms whether we should check contigiousness.
"""
# Placeholder variables for positions
start_pos = None
end_pos = None
prev_seg = None
for elem in self.segments:
if not isinstance(elem, BaseSegment):
raise TypeError(
"In {0} {1}, found an element of the segments tuple which"
" isn't a segment. Instead found element of type {2}.\nFound: {3}\nFull segments:{4}".format(
text,
type(self),
type(elem),
elem,
self.segments
))
# While applying fixes, we shouldn't validate here, because it will fail.
if validate:
# If we have a comparison point, validate that
if end_pos and elem.get_start_pos_marker() != end_pos:
raise TypeError(
"In {0} {1}, found an element of the segments tuple which"
" isn't contigious with previous: {2} > {3}. End pos: {4}."
" Prev String: {5!r}".format(
text,
type(self),
prev_seg,
elem,
end_pos,
prev_seg.raw
))
start_pos = elem.get_start_pos_marker()
end_pos = elem.get_end_pos_marker()
prev_seg = elem
if start_pos.advance_by(elem.raw) != end_pos:
raise TypeError(
"In {0} {1}, found an element of the segments tuple which"
" isn't self consistent: {2}".format(
text,
type(self),
elem
))
def get_end_pos_marker(self):
"""Return the pos marker at the end of this segment."""
return self.segments[-1].get_end_pos_marker()
def get_start_pos_marker(self):
"""Return the pos marker at the start of this segment."""
return self.segments[0].get_start_pos_marker()
def __init__(self, segments, pos_marker=None, validate=True):
if len(segments) == 0:
raise RuntimeError(
"Setting {0} with a zero length segment set. This shouldn't happen.".format(
self.__class__))
if hasattr(segments, 'matched_segments'):
# Safely extract segments from a match
self.segments = segments.matched_segments
elif isinstance(segments, tuple):
self.segments = segments
elif isinstance(segments, list):
self.segments = tuple(segments)
else:
raise TypeError(
"Unexpected type passed to BaseSegment: {0}".format(
type(segments)))
# Check elements of segments:
self.validate_segments(validate=validate)
if pos_marker:
self.pos_marker = pos_marker
else:
# If no pos given, it's the pos of the first segment
# Work out if we're dealing with a match result...
if hasattr(segments, 'initial_match_pos_marker'):
self.pos_marker = segments.initial_match_pos_marker()
elif isinstance(segments, (tuple, list)):
self.pos_marker = segments[0].pos_marker
else:
raise TypeError(
"Unexpected | |
spec.get("databook order")
full_name = spec["display name"]
if databook_order is None:
order = np.inf
else:
order = databook_order
pages[databook_page].append((spec.name, order))
data.tdve[spec.name] = TimeDependentValuesEntry(full_name, data.tvec, allowed_units=[framework.get_databook_units(full_name)], comment=spec["guidance"])
data.tdve[spec.name].write_units = True
data.tdve[spec.name].write_uncertainty = True
if obj_type == "pars":
data.tdve[spec.name].write_assumption = True
if spec["timed"] == "y":
data.tdve[spec.name].tvec = [] # If parameter is timed, don't show any years
data.tdve[spec.name].write_uncertainty = False # Don't show uncertainty for timed parameters. In theory users could manually add the column and sample over it, but because the duration is rounded to the timestep, it's likely to have confusing stepped effects
data.tdve[spec.name].pop_type = pop_type
# Now convert pages to full names and sort them into the correct order
for _, spec in framework.sheets["databook pages"][0].iterrows():
if spec["datasheet code name"] in pages:
pages[spec["datasheet code name"]].sort(key=lambda x: x[1])
data.tdve_pages[spec["datasheet title"]] = [x[0] for x in pages[spec["datasheet code name"]]]
else:
data.tdve_pages[spec["datasheet title"]] = list()
# Now, proceed to add pops, transfers, and interactions
for code_name, spec in new_pops.items():
data.add_pop(code_name, spec["label"], pop_type=spec["type"])
for code_name, spec in new_transfers.items():
data.add_transfer(code_name, spec["label"], pop_type=spec["type"])
for _, spec in framework.interactions.iterrows():
interpop = data.add_interaction(spec.name, spec["display name"], from_pop_type=spec["from population type"], to_pop_type=spec["to population type"])
if "default value" in spec and np.isfinite(spec["default value"]):
for from_pop in interpop.from_pops:
for to_pop in interpop.to_pops:
ts = TimeSeries(units=interpop.allowed_units[0])
ts.insert(None, spec["default value"])
interpop.ts[(from_pop, to_pop)] = ts
interpop.ts_attributes["Provenance"][(from_pop, to_pop)] = _DEFAULT_PROVENANCE
# Finally, insert parameter and characteristic default values
for df in [framework.comps, framework.characs, framework.pars]:
for _, spec in df.iterrows():
# In order to write a default value
# - The default value should be present and not None
# - The quantity should appear in the databook
if "default value" in spec and np.isfinite(spec["default value"]) and spec["databook page"]:
tdve = data.tdve[spec.name]
for key, ts in tdve.ts.items():
ts.insert(None, spec["default value"])
tdve.ts_attributes["Provenance"][key] = _DEFAULT_PROVENANCE
return data
@staticmethod
def from_spreadsheet(spreadsheet, framework):
"""
Construct ProjectData from spreadsheet
The framework is needed because the databook does not read in or otherwise store
- The valid units for quantities
- Which population type is associated with TDVE tables
:param spreadsheet: The name of a spreadsheet, or a `sc.Spreadsheet`
:param framework: A :class:`ProjectFramework` instance
:return: A new :class:`ProjectData` instance
"""
# Basically the strategy is going to be
# 1. Read in all of the stuff - pops, transfers, interpops can be directly added to Data
# 2. Read in all the other TDVE content, and then store it in the data specs according to the variable type defined in the Framework
# e.g. the fact that 'Alive' is a Characteristic is stored in the Framework and Data but not in the Databook. So for example, we read in
# a TDVE table called 'Alive', but it needs to be stored in data.specs['charac']['ch_alive'] and the 'charac' and 'ch_alive' are only available in the Framework
import openpyxl
self = ProjectData(framework=framework)
if not isinstance(spreadsheet, sc.Spreadsheet):
spreadsheet = sc.Spreadsheet(spreadsheet)
workbook = openpyxl.load_workbook(spreadsheet.tofile(), read_only=True, data_only=True) # Load in read-only mode for performance, since we don't parse comments etc.
validate_category(workbook, "atomica:databook")
# These sheets are optional - if none of these are provided in the databook
# then they will remain empty
self.transfers = list()
self.interpops = list()
for sheet in workbook.worksheets:
if sheet.title.startswith("#ignore"):
continue
if sheet.title == "Population Definitions":
try:
self._read_pops(sheet)
except Exception as e:
message = 'An error was detected on the "Population Definitions" sheet'
raise Exception("%s -> %s" % (message, e)) from e
elif sheet.title == "Transfers":
try:
self._read_transfers(sheet)
except Exception as e:
message = 'An error was detected on the "Transfers" sheet'
raise Exception("%s -> %s" % (message, e)) from e
elif sheet.title == "Interactions":
try:
self._read_interpops(sheet)
except Exception as e:
message = 'An error was detected on the "Interactions" sheet'
raise Exception("%s -> %s" % (message, e)) from e
elif sheet.title == "Metadata":
continue
else:
self.tdve_pages[sheet.title] = []
tables, start_rows = read_tables(sheet)
for table, start_row in zip(tables, start_rows):
try:
tdve = TimeDependentValuesEntry.from_rows(table)
except Exception as e:
message = 'Error on sheet "%s" while trying to read a TDVE table starting on row %d' % (sheet.title, start_row)
raise Exception("%s -> %s" % (message, e)) from e
# If the TDVE is not in the Framework, that's a critical stop error, because the framework needs to at least declare
# what kind of variable this is - otherwise, we don't know the allowed units and cannot write the databook back properly
try:
spec, item_type = framework.get_variable(tdve.name)
except NotFoundError:
message = 'Error on sheet "%s" while reading TDVE table "%s" (row %d). The variable was not found in the Framework' % (sheet.title, tdve.name, start_row)
raise Exception(message)
code_name = spec.name
tdve.allowed_units = [framework.get_databook_units(code_name)]
tdve.pop_type = spec["population type"]
# Migrate the units (20181114)
# All TimeSeries instances in databook TDVE tables should have the same units as the allowed units
# However, if the user entered something that is wrong, we need to keep it and alert them during validation
# Therefore, we can migrate as long as the _old_ units made sense
for ts in tdve.ts.values():
if ts.units != tdve.allowed_units[0]:
if not ts.units or ts.units.strip().lower() == tdve.allowed_units[0].strip().split()[0].strip().lower():
ts.units = tdve.allowed_units[0]
if not spec["databook page"]:
logger.warning('A TDVE table for "%s" (%s) was read in and will be used, but the Framework did not mark this quantity as appearing in the databook', tdve.name, code_name)
tdve.comment = spec["guidance"]
if code_name in self.tdve:
raise Exception('A TDVE table for "%s" (%s) appears more than once in the databook. The first table was on sheet "%s" and the first duplicate table is on sheet "%s" starting on row %d' % (tdve.name, code_name, [k for k, v in self.tdve_pages.items() if code_name in v][0], sheet.title, start_row))
self.tdve[code_name] = tdve
# Store the TDVE on the page it was actually on, rather than the one in the framework. Then, if users move anything around, the change will persist
self.tdve_pages[sheet.title].append(code_name)
tvals = set()
for tdve in self.tdve.values():
tvals.update(tdve.tvec)
for tdc in self.transfers + self.interpops:
tvals.update(tdc.tvec)
self.tvec = np.array(sorted(tvals))
return self
def validate(self, framework) -> bool:
"""
Check if the ProjectData instance can be used to run simulations
A databook can be 'valid' in two senses
- The Excel file adheres to the correct syntax and it can be parsed into a ProjectData object
- The resulting ProjectData object contains sufficient information to run a simulation
Sometimes it is desirable for ProjectData to be valid in one sense rather than the other. For example,
in order to run a simulation, the ProjectData needs to contain at least one value for every TDVE table.
However, the TDVE table does _not_ need to contain values if all we want to do is add another key pop
Thus, the first stage of validation is the ProjectData constructor - if that runs, then users can
access methods like 'add_pop','remove_transfer' etc.
On the other hand, to actually run a simulation, the _contents_ of the databook need to satisfy various conditions
These tests are implemented here. The typical workflow would be that ProjectData.validate() should be used
if a simulation is going to be run. In the first instance, this can be done in `Project.load_databook` but
the FE might want to perform this check at a different point if the databook manipulation methods e.g.
`add_pop` are going to be exposed in the interface
This function throws an informative error if there are any problems identified or otherwise returns True
:param framework: A :class:`ProjectFramework` instance to validate the data against
:return: True if ProjectData is valid. An error will be raised otherwise
"""
# Make sure that all of the quantities the Framework says we should read in have been read in, and that
# those quantities all have some data values associated with them
for pop in self.pops.values():
if pop["type"] is None:
pop["type"] = self._pop_types[0]
assert pop["type"] in self._pop_types, 'Error in population "%s": population type "%s" not found in framework. If the framework defines a non-default population | |
tensor and length the same as
number of classes.
from_logits (bool): whether the output tensor is normalized as a probability (total equal to 1)
ignore_index (int or list of int):
cutoff (None or decimal): the cutoff point of probability for classification, should be None of a number
less than 1..
is_target_onehot (bool): Is the target tensor in onehot format?
label_smooth (bool): Should use label smoothing?
reduction (string): the method to aggrgate loss. None means no need to aggregate, 'mean' means average loss,
'sum' means the summation of losses,'batch_mean' means average loss cross the batch axis then
summation them.
References::
https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/loss/losses.py
normalized (bool): Compute normalized focal loss (https://arxiv.org/pdf/1909.07829.pdf).
threshold (float, optional): Compute reduced focal loss (https://arxiv.org/abs/1903.01347).
Examples:
>>> FocalLoss(reduction='mean',axis=-1)(to_tensor([[0.1, 0.7 , 0.2],[0.3 , 0.6 , 0.1],[0.9 , 0.05 , 0.05],[0.3 , 0.4 , 0.3]]).float(),to_tensor([1,0,1,2]).long()).cpu()
tensor(1.1305)
"""
def __init__(self, alpha=0.5, gamma=2, normalized=False, threshold=None, axis=1, sample_weight=None, auto_balance=False, from_logits=False, ignore_index=-100, cutoff=None,
label_smooth=False, reduction='mean', enable_ohem=False, ohem_ratio=3.5, binding_dataset_symbol=None, input_names=None, output_names=None, name='FocalLoss'):
super().__init__(axis=axis, sample_weight=sample_weight, auto_balance=auto_balance, from_logits=from_logits, ignore_index=ignore_index, cutoff=cutoff,
label_smooth=label_smooth, reduction=reduction, enable_ohem=enable_ohem, ohem_ratio=ohem_ratio, binding_dataset_symbol=None , input_names=input_names, output_names=output_names, name=name)
self.alpha = alpha
self.gamma = gamma
self.threshold = threshold
self.normalized = normalized
self.need_target_onehot = False
def calculate_loss(self, output, target, **kwargs):
"""
Args:
output: Tensor of arbitrary shape
target: Tensor of the same shape as input
Returns:
"""
alpha = torch.zeros(self.num_classes).to(output.device)
alpha[0] += self.alpha
alpha[1:] += (1 - self.alpha)
if ndim(output) > 2 and self.axis==1:
output = output.view(output.size(0), output.size(1), -1) # N,C,H,W => N,C,H*W
output = output.transpose(1, 2) # N,C,H*W => N,H*W,C
output = output.contiguous().view(-1, output.size(2)) # N,H*W,C => N*H*W,C
if self.is_target_onehot and target.dtype != Dtype.long:
target = argmax(target, self.axis)
self.is_target_onehot=False
target = target.view(-1, 1)
if not self.is_logsoftmax:
output=log_softmax(output)
self.is_logsoftmax=True
logpt=output
logpt = logpt.gather(1, target)
pt = logpt.data.exp()
#
loss=-1 * ((1 - pt) ** self.gamma) * logpt
alpha=alpha.gather(0, target.view(-1))
loss=loss*(alpha.view(-1,1))
return loss
# - \alpha(1 - softmax(x)[class ]) ^ gamma \log(softmax(x)[class])
#
# if self.is_logsoftmax:
# output = clip(exp(output), 1e-8, 1 - 1e-8)
# logpt = -F.cross_entropy(output, target, weight=self.sample_weight, ignore_index=self.ignore_index, reduction="none")
# pt = clip(exp(logpt), 1e-8, 1 - 1e-8)
#
# # compute the loss
# if self.threshold is None or self.threshold == 0:
# focal_term = (1 - pt).pow(self.gamma)
# else:
# focal_term = ((1.0 - pt) / self.threshold).pow(self.gamma)
# focal_term[pt < self.threshold] = 1
#
# loss = -focal_term * logpt
#
# if self.alpha is not None:
# loss = loss * (self.alpha * target + (1 - self.alpha) * (1 - target))
# if self.normalized:
# norm_factor = sum(focal_term)
# loss = loss / norm_factor
#
# return loss
class BCELoss(_ClassificationLoss):
def __init__(self, axis=1, sample_weight=None, auto_balance=False, from_logits=False, ignore_index=-100, cutoff=None, label_smooth=False, reduction='mean', enable_ohem=False,
ohem_ratio=3.5, binding_dataset_symbol=None, input_names=None, output_names=None, name='BCELoss'):
super().__init__(axis=axis, sample_weight=sample_weight, auto_balance=auto_balance, from_logits=from_logits, ignore_index=ignore_index, cutoff=cutoff,
label_smooth=label_smooth, reduction=reduction, enable_ohem=enable_ohem, ohem_ratio=ohem_ratio, binding_dataset_symbol=binding_dataset_symbol , input_names=input_names, output_names=output_names, name=name)
self._built = True
self.num_classes = None
self.is_logsoftmax = False
self.need_target_onehot = True
self.is_target_onehot = False
def calculate_loss(self, output, target, **kwargs):
"""
Args:
output ():
target ():
**kwargs ():
Returns:
"""
if self.is_logsoftmax:
output = exp(output)
loss = binary_cross_entropy(output, target, from_logits=self.from_logits)
return loss
class DiceLoss(_ClassificationLoss):
r"""This criterion combines :func:`nn.LogSoftmax` and :func:`nn.NLLLoss` in one single class.
It is useful when training a classification problem with `C` classes.
If provided, the optional argument :attr:`weight` should be a 1D `Tensor`
assigning weight to each of the classes.
This is particularly useful when you have an unbalanced training set.
Args:
axis (int): the position where the classes is.
sample_weight (Tensor): means the weights of classes , it shoud be a 1D tensor and length the same as
number of classes.
from_logits (bool): whether the output tensor is normalized as a probability (total equal to 1)
ignore_index (int or list of int):
cutoff (None or decimal): the cutoff point of probability for classification, should be None of a number
less than 1..
label_smooth (bool): Should use label smoothing?
reduction (string): the method to aggrgate loss. None means no need to aggregate, 'mean' means average loss,
'sum' means the summation of losses,'batch_mean' means average loss cross the batch axis then
summation them.
Examples:
>>> output=zeros((1,3,128,128))
>>> output[0,1,32:64,48:92]=1
>>> output[0,2,12:32,56:64]=1
>>> target=zeros((1,128,128)).long()
>>> target[0,33:63,50:9]=1
>>> target[0,13:35,52:65]=2
>>> DiceLoss(reduction='mean')(output,target).cpu()
tensor(0.8271)
>>> DiceLoss(ignore_index=0,reduction='mean')(output,target).cpu()
tensor(0.9829)
Reference:
https://arxiv.org/abs/1707.03237
"""
def __init__(self, smooth=1., axis=1, sample_weight=None, auto_balance=False, from_logits=False, ignore_index=-100, cutoff=None, label_smooth=False, reduction='mean',
enable_ohem=False, ohem_ratio=3.5, binding_dataset_symbol=None, input_names=None, output_names=None,name='DiceLoss'):
"""
Args:
axis (int): the axis where the class label is.
sample_weight ():
from_logits ():
ignore_index ():
cutoff ():
label_smooth ():
reduction (string):
name (stringf):
"""
super().__init__(axis=axis, sample_weight=sample_weight, auto_balance=auto_balance, from_logits=from_logits, ignore_index=ignore_index, cutoff=cutoff,
label_smooth=label_smooth, reduction=reduction, enable_ohem=enable_ohem, ohem_ratio=ohem_ratio, binding_dataset_symbol=binding_dataset_symbol , input_names=input_names, output_names=output_names, name=name)
self.smooth = smooth
self.is_logsoftmax = False
self.need_target_onehot = True
self.is_multiselection = False
self._built = True
def calculate_loss(self, output, target, **kwargs):
"""
Args:
output ():
target ():
**kwargs ():
Returns:
"""
if self.is_logsoftmax:
output = exp(output)
reduce_axes = list(range(target.ndim))
axis = self.axis if self.axis >= 0 else target.ndim + self.axis
reduce_axes.remove(0)
sample_weight = expand_dims(self.sample_weight.to(get_device()) * self.ignore_index_weight.to(get_device()), 0)
n_ = ndim(output) - ndim(sample_weight)
for n in range(n_):
sample_weight = expand_dims(sample_weight, -1)
# for k in range(target.ndim-self.loss_weights.ndim):
# loss_weights=loss_weights.expand_dims(0)
intersection = reduce_sum(target * output * sample_weight, axis=reduce_axes)
den1 = reduce_sum(output * sample_weight, axis=reduce_axes)
den2 = reduce_sum(target * sample_weight, axis=reduce_axes)
dice = 1.0 - (2.0 * intersection + self.smooth) / (den1 + den2 + self.smooth)
return dice
class KLDivergenceLoss(_ClassificationLoss):
def __init__(self, axis=1, sample_weight=None, auto_balance=False, from_logits=False, ignore_index=-100, cutoff=None, label_smooth=False, reduction='mean', enable_ohem=False,
ohem_ratio=3.5, binding_dataset_symbol=None, input_names=None, output_names=None, name='KLDivergenceLoss'):
super().__init__(axis=axis, sample_weight=sample_weight, auto_balance=auto_balance, from_logits=from_logits, ignore_index=ignore_index, cutoff=cutoff,
label_smooth=label_smooth, reduction=reduction, enable_ohem=enable_ohem, ohem_ratio=ohem_ratio, binding_dataset_symbol=binding_dataset_symbol , input_names=input_names, output_names=output_names, name=name)
self._built = True
self.num_classes = 1
self.is_logsoftmax = False
self.need_target_onehot = True
self.is_target_onehot = False
def calculate_loss(self, output, target, **kwargs):
"""
Args:
output ():
target ():
**kwargs ():
Returns:
"""
return nn.functional.kl_div(output, target, reduction='none', log_target=False)
class L1Loss(_PairwiseLoss):
r"""l1_loss(input, target, size_average=None, reduce=None, reduction='mean') -> Tensor
Function that takes the mean element-wise absolute value difference.
See :class:`~torch.nn.L1Loss` for details.
"""
def __init__(self, reduction='mean', enable_ohem=False, ohem_ratio=3.5, input_names=None, output_names=None,name='L1Loss'):
super(L1Loss, self).__init__(reduction=reduction, enable_ohem=enable_ohem, ohem_ratio=ohem_ratio , input_names=input_names, output_names=output_names, name=name)
self.name = name
self.reduction = reduction
def calculate_loss(self, output, target, **kwargs):
"""
Args:
output ():
target ():
**kwargs ():
Returns:
"""
batch = int_shape(output)[0]
return F.l1_loss(output.view(batch, -1), target.view(batch, -1), reduction='none')
class L2Loss(_PairwiseLoss):
r"""mse_loss(input, target, size_average=None, reduce=None, reduction='mean') -> Tensor
Measures the element-wise mean squared error.
See :class:`~torch.nn.MSELoss` for details.
"""
def __init__(self, reduction='mean', enable_ohem=False, ohem_ratio=3.5, input_names=None, output_names=None ,name='MSELoss'):
super(L2Loss, self).__init__(reduction=reduction, enable_ohem=enable_ohem, ohem_ratio=ohem_ratio, input_names=input_names, output_names=output_names,name=name)
self.name = name
self.reduction = reduction
def calculate_loss(self, output, target, **kwargs):
"""
Args:
output ():
target ():
**kwargs ():
Returns:
"""
batch = int_shape(output)[0]
return 0.5 * ((output.view(batch, -1)-target.view(batch, -1))**2)
class SmoothL1Loss(_PairwiseLoss):
r"""Function that uses a squared term if the absolute
element-wise error falls below 1 and an L1 term otherwise.
See :class:`~torch.nn.SmoothL1Loss` for details.
"""
def __init__(self, reduction='mean', enable_ohem=False, ohem_ratio=3.5, input_names=None, output_names=None , name='SmoothL1Loss'):
super(SmoothL1Loss, self).__init__(enable_ohem=enable_ohem, ohem_ratio=ohem_ratio, reduction=reduction,input_names=input_names, output_names=output_names, name=name)
self.name = name
self.reduction = reduction
self.huber_delta = 0.5
def calculate_loss(self, output, target, **kwargs):
"""
Args:
output ():
target ():
**kwargs ():
Returns:
"""
batch = int_shape(output)[0]
return F.smooth_l1_loss(output.view(batch, -1), target.view(batch, -1), reduction='none')
class MSELoss(_PairwiseLoss):
r"""mse_loss(input, target, size_average=None, reduce=None, reduction='mean') -> Tensor
Measures the element-wise mean squared error.
See :class:`~torch.nn.MSELoss` for details.
"""
def __init__(self, reduction='mean', enable_ohem=False, ohem_ratio=3.5, input_names=None, output_names=None,name='MSELoss'):
super(MSELoss, self).__init__(reduction=reduction, enable_ohem=enable_ohem, ohem_ratio=ohem_ratio , input_names=input_names, output_names=output_names, name=name)
self.name = name
self.reduction = reduction
def calculate_loss(self, output, target, **kwargs):
"""
Args:
output ():
target ():
**kwargs ():
Returns:
"""
batch = int_shape(output)[0]
return ((output.view(batch, -1)-target.view(batch, -1))**2)
class WingLoss(_PairwiseLoss):
def __init__(self, omega=10, epsilon=2, input_names=None, output_names=None,name='WingLoss'):
super(WingLoss, self).__init__( input_names=input_names, output_names=output_names, name=name)
self.name = name
self.omega = omega
self.epsilon = epsilon
def calculate_loss(self, output, target, **kwargs):
"""
Args:
output ():
target ():
**kwargs ():
Returns:
"""
delta_y = (target - output).abs()
c = self.omega * (1.0 - log(1.0 + self.omega / self.epsilon))
losses = where(
greater(delta_y, self.omega),
self.omega * log(1.0 + delta_y / self.epsilon),
delta_y - c
)
return losses
class AdaptiveWingLoss(_PairwiseLoss):
def __init__(self, omega=14, theta=0.5, epsilon=1, alpha=2.1, input_names=None, output_names=None,name='AdaptiveWingLoss'):
super(AdaptiveWingLoss, self).__init__( input_names=input_names, output_names=output_names, name=name)
self.name = name
self.omega = omega
self.theta = theta
self.epsilon = epsilon
self.alpha = alpha
def calculate_loss(self, output, target, **kwargs):
"""
Args:
output | |
import argparse
import math
import random
import os
import numpy as np
import torch
from torch import nn, autograd, optim
from torch.nn import functional as F
from torch.utils import data
import torch.distributed as dist
from torchvision import transforms, utils
from tqdm import tqdm
import torchvision.datasets as dset
from dataset import MultiResolutionDataset
from distributed import (
get_rank,
synchronize,
reduce_loss_dict,
reduce_sum,
get_world_size,
)
from op import conv2d_gradfix
from non_leaking import augment, AdaptiveAugment
def data_sampler(dataset, shuffle, distributed):
if distributed:
return data.distributed.DistributedSampler(dataset, shuffle=shuffle)
if shuffle:
return data.RandomSampler(dataset)
else:
return data.SequentialSampler(dataset)
def requires_grad(model, flag=True):
for p in model.parameters():
p.requires_grad = flag
def accumulate(model1, model2, decay=0.999):
par1 = dict(model1.named_parameters())
par2 = dict(model2.named_parameters())
for k in par1.keys():
par1[k].data.mul_(decay).add_(par2[k].data, alpha=1 - decay)
def sample_data(loader):
while True:
for batch in loader:
yield batch
def d_logistic_loss(real_pred, fake_pred):
real_loss = F.softplus(-real_pred)
fake_loss = F.softplus(fake_pred)
return real_loss.mean() + fake_loss.mean()
def d_r1_loss(real_pred, real_img):
with conv2d_gradfix.no_weight_gradients():
grad_real, = autograd.grad(
outputs=real_pred.sum(), inputs=real_img, create_graph=True
)
grad_penalty = grad_real.pow(2).reshape(
grad_real.shape[0], -1).sum(1).mean()
return grad_penalty
def g_nonsaturating_loss(fake_pred):
loss = F.softplus(-fake_pred).mean()
return loss
def g_path_regularize(path_lengths,mean_path_length, decay=0.01):
path_mean = mean_path_length + decay * \
(path_lengths.mean() - mean_path_length)
path_penalty = (path_lengths - path_mean).pow(2).mean()
return path_penalty, path_mean.detach(), path_lengths
def make_noise(batch, latent_dim, n_noise, device):
if n_noise == 1:
return torch.randn(batch, latent_dim, device=device)
noises = torch.randn(n_noise, batch, latent_dim, device=device).unbind(0)
return noises
def mixing_noise(batch, latent_dim, prob, device):
if prob > 0 and random.random() < prob:
return make_noise(batch, latent_dim, 2, device)
else:
return [make_noise(batch, latent_dim, 1, device)]
def set_grad_none(model, targets):
for n, p in model.named_parameters():
if n in targets:
p.grad = None
def train(args, loader, generator, discriminator, g_optim, d_optim, g_ema, device):
'''
Trains a family of Generative model with the intend of being used for ILO inversion method downstream for solving inverse problems.
If args.rtil==True will train a family of generative models otherwise the model will be trained with a standard Gan objective.
'''
loader = sample_data(loader)
pbar = range(args.iter)
if get_rank() == 0:
pbar = tqdm(pbar, initial=args.start_iter,
dynamic_ncols=True, smoothing=0.01)
if args.rtil==True:
mean_path_length1 = 0
mean_path_length2 = 0
mean_path_length3 = 0
mean_path_length4 = 0
mean_path_length5 = 0
else:
mean_path_length1 = 0
d_loss_val = 0
r1_loss = torch.tensor(0.0, device=device)
g_loss_val = 0
path_loss = torch.tensor(0.0, device=device)
path_lengths = torch.tensor(0.0, device=device)
mean_path_length_avg = 0
loss_dict = {}
if args.distributed:
g_module = generator.module
d_module = discriminator.module
else:
g_module = generator
d_module = discriminator
accum = 0.5 ** (32 / (10 * 1000))
ada_aug_p = args.augment_p if args.augment_p > 0 else 0.0
r_t_stat = 0
if args.augment and args.augment_p == 0:
ada_augment = AdaptiveAugment(
args.ada_target, args.ada_length, 8, device)
if args.rtil==True:
sample_z = torch.randn(args.n_sample, args.latent, device=device)
sample_z1 = torch.randn(args.n_sample, args.latent, device=device)
sample_z2 = torch.randn(args.n_sample, args.latent, device=device)
sample_z3 = torch.randn(args.n_sample, args.latent, device=device)
sample_z4 = torch.randn(args.n_sample, args.latent, device=device)
else:
sample_z = torch.randn(args.n_sample, args.latent, device=device)
for idx in pbar:
i = idx + args.start_iter
if i > args.iter:
print("Done!")
break
real_img = next(loader)
real_img = real_img[0].to(device)
requires_grad(generator, False)
requires_grad(discriminator, True)
if args.rtil==True:
noise = mixing_noise(args.batch, args.latent, args.mixing, device)
noise1 = mixing_noise(args.batch, args.latent, args.mixing, device)
noise2 = mixing_noise(args.batch, args.latent, args.mixing, device)
noise3 = mixing_noise(args.batch, args.latent, args.mixing, device)
noise4 = mixing_noise(args.batch, args.latent, args.mixing, device)
else:
noise = mixing_noise(args.batch, args.latent, args.mixing, device)
if args.rtil==True:
fake_img,_ = generator(noise, rtil=0)
fake_img1,_ = generator(noise1, rtil=1)
fake_img2,_ = generator(noise2, rtil=2)
fake_img3,_ = generator(noise3, rtil=3)
fake_img4,_ = generator(noise4,rtil=4)
else:
fake_img,_ = generator(noise, rtil=0)
if args.augment:
real_img_aug, _ = augment(real_img, ada_aug_p)
fake_img, _ = augment(fake_img, ada_aug_p)
else:
real_img_aug = real_img
if args.rtil==True:
fake_pred = discriminator(fake_img)
fake_pred1 = discriminator(fake_img1)
fake_pred2 = discriminator(fake_img2)
fake_pred3 = discriminator(fake_img3)
fake_pred4 = discriminator(fake_img4)
else:
fake_pred = discriminator(fake_img)
real_pred = discriminator(real_img_aug)
if args.rtil==True:
d_loss1 = d_logistic_loss(real_pred, fake_pred)
d_loss2 = d_logistic_loss(real_pred, fake_pred1)
d_loss3 = d_logistic_loss(real_pred, fake_pred2)
d_loss4 = d_logistic_loss(real_pred, fake_pred3)
d_loss5 = d_logistic_loss(real_pred, fake_pred4)
d_loss = 1/5 * (d_loss1 + d_loss2 + d_loss3 +
d_loss4 + d_loss5)
else:
d_loss = d_logistic_loss(real_pred, fake_pred)
loss_dict["d"] = d_loss
loss_dict["real_score"] = real_pred.mean()
loss_dict["fake_score"] = fake_pred.mean()
discriminator.zero_grad()
d_loss.backward()
d_optim.step()
if args.augment and args.augment_p == 0:
ada_aug_p = ada_augment.tune(real_pred)
r_t_stat = ada_augment.r_t_stat
d_regularize = i % args.d_reg_every == 0
if d_regularize:
real_img.requires_grad = True
if args.augment:
real_img_aug, _ = augment(real_img, ada_aug_p)
else:
real_img_aug = real_img
real_pred = discriminator(real_img_aug)
r1_loss = d_r1_loss(real_pred, real_img)
discriminator.zero_grad()
(args.r1 / 2 * r1_loss * args.d_reg_every +
0 * real_pred[0]).backward()
d_optim.step()
loss_dict["r1"] = r1_loss
requires_grad(generator, True)
requires_grad(discriminator, False)
if args.rtil==True:
noise = mixing_noise(args.batch, args.latent, args.mixing, device)
noise1 = mixing_noise(args.batch, args.latent, args.mixing, device)
noise2 = mixing_noise(args.batch, args.latent, args.mixing, device)
noise3 = mixing_noise(args.batch, args.latent, args.mixing, device)
noise4 = mixing_noise(args.batch, args.latent, args.mixing, device)
else:
noise = mixing_noise(args.batch, args.latent, args.mixing, device)
if args.rtil==True:
fake_img, _ = generator(noise, rtil=0)
fake_img1, _ = generator(noise1, rtil=1)
fake_img2, _ = generator(noise2, rtil=2)
fake_img3, _ = generator(noise3, rtil=3)
fake_img4, _ = generator(noise4, rtil=4)
else:
fake_img, _ = generator(noise, tail=0)
if args.augment:
fake_img, _ = augment(fake_img, ada_aug_p)
if args.rtil==True:
fake_pred = discriminator(fake_img)
fake_pred1 = discriminator(fake_img1)
fake_pred2 = discriminator(fake_img2)
fake_pred3 = discriminator(fake_img3)
fake_pred4 = discriminator(fake_img4)
g_loss1 = g_nonsaturating_loss(fake_pred)
g_loss2 = g_nonsaturating_loss(fake_pred1)
g_loss3 = g_nonsaturating_loss(fake_pred2)
g_loss4 = g_nonsaturating_loss(fake_pred3)
g_loss5 = g_nonsaturating_loss(fake_pred4)
g_loss = 1/5*(g_loss1+g_loss2+g_loss3+g_loss4+g_loss5)
else:
fake_pred = discriminator(fake_img)
g_loss = g_nonsaturating_loss(fake_pred)
g_regularize = i % args.g_reg_every == 0
loss_dict["g"] = g_loss
generator.zero_grad()
if g_regularize:
g_loss.backward(retain_graph=True)
else:
g_loss.backward()
g_optim.step()
if g_regularize:
if args.rtil==True:
path_batch_size=args.path_batch_shrink
noise1 = mixing_noise(
path_batch_size, args.latent, args.mixing, device)
noise2 = mixing_noise(
path_batch_size, args.latent, args.mixing, device)
noise3 = mixing_noise(
path_batch_size, args.latent, args.mixing, device)
noise4 = mixing_noise(
path_batch_size, args.latent, args.mixing, device)
fake_img1,p_l1 = generator(noise1,rtil=0, path_reg=True)
fake_img2,p_l2 = generator(noise2,rtil=1, path_reg=True)
fake_img3,p_l3 = generator(noise3,rtil=2, path_reg=True)
fake_img4,p_l4 = generator(noise4,rtil=3, path_reg=True)
fake_img5,p_l5 = generator(noise4,rtil=4, path_reg=True)
path_loss1, mean_path_length1, path_lengths1 = g_path_regularize(p_l1,mean_path_length1)
path_loss2, mean_path_length2, path_lengths2 = g_path_regularize(p_l2,mean_path_length2)
path_loss3, mean_path_length3, path_lengths3 = g_path_regularize(p_l3,mean_path_length3)
path_loss4, mean_path_length4, path_lengths4 = g_path_regularize(p_l4,mean_path_length4)
path_loss5, mean_path_length5, path_lengths5 = g_path_regularize(p_l5,mean_path_length5)
path_loss=(1/5)*(path_loss1+path_loss2+path_loss3+path_loss4+path_loss5)
mean_path_length=(1/5)*(mean_path_length1+ mean_path_length2 + mean_path_length3 + mean_path_length4 + mean_path_length5)
path_lengths=(1/5)*(path_lengths1+ path_lengths2 + path_lengths3 + path_lengths4 + path_lengths5)
generator.zero_grad()
weighted_path_loss1 = args.path_regularize * args.g_reg_every * path_loss1
weighted_path_loss2 = args.path_regularize * args.g_reg_every * path_loss2
weighted_path_loss3 = args.path_regularize * args.g_reg_every * path_loss3
weighted_path_loss4 = args.path_regularize * args.g_reg_every * path_loss4
weighted_path_loss5 = args.path_regularize * args.g_reg_every * path_loss5
if args.path_batch_shrink:
weighted_path_loss1 += 0 * fake_img1[0, 0, 0, 0]
weighted_path_loss2 += 0 * fake_img2[0, 0, 0, 0]
weighted_path_loss3 += 0 * fake_img3[0, 0, 0, 0]
weighted_path_loss4 += 0 * fake_img4[0, 0, 0, 0]
weighted_path_loss5 += 0 * fake_img5[0, 0, 0, 0]
weighted_path_loss= 1/5* (weighted_path_loss1 + weighted_path_loss2 + weighted_path_loss3 + weighted_path_loss4 + weighted_path_loss5)
else:
path_batch_size=args.path_batch_shrink
noise1 = mixing_noise(path_batch_size, args.latent, args.mixing, device)
fake_img1,p_l1 = generator(noise1,tail=0, path_reg=True)
path_loss1, mean_path_length1, path_lengths1 = g_path_regularize(p_l1,mean_path_length1)
path_loss=path_loss1
mean_path_length=mean_path_length1
path_lengths=path_lengths1
generator.zero_grad()
weighted_path_loss1 = args.path_regularize * args.g_reg_every * path_loss1
if args.path_batch_shrink:
weighted_path_loss1 += 0 * fake_img1[0, 0, 0, 0]
weighted_path_loss=weighted_path_loss1
weighted_path_loss.backward()
g_optim.step()
mean_path_length_avg = (
reduce_sum(mean_path_length).item() / get_world_size()
)
loss_dict["path"] = path_loss
loss_dict["path_length"] = path_lengths.mean()
accumulate(g_ema, g_module, accum)
loss_reduced = reduce_loss_dict(loss_dict)
d_loss_val = loss_reduced["d"].mean().item()
g_loss_val = loss_reduced["g"].mean().item()
r1_val = loss_reduced["r1"].mean().item()
path_loss_val = loss_reduced["path"].mean().item()
real_score_val = loss_reduced["real_score"].mean().item()
fake_score_val = loss_reduced["fake_score"].mean().item()
path_length_val = loss_reduced["path_length"].mean().item()
if get_rank() == 0:
pbar.set_description(
(
f"d: {d_loss_val:.4f}; g: {g_loss_val:.4f}; r1: {r1_val:.4f}; "
f"path: {path_loss_val:.4f}; mean path: {mean_path_length_avg:.4f}; "
f"augment: {ada_aug_p:.4f}"
)
)
if i % 1000 == 0:
if args.rtil==True:
with torch.no_grad():
g_ema.eval()
sample, _, = g_ema([sample_z], rtil=0)
sample1, _, = g_ema([sample_z1], rtil=1)
sample2, _, = g_ema([sample_z2], rtil=2)
sample3, _, = g_ema([sample_z3], rtil=3)
sample4, _, = g_ema([sample_z4], rtil=4)
utils.save_image(
sample,
f"rtil/sample/{str(i).zfill(6)}.png",
nrow=int(args.n_sample ** 0.5),
normalize=True,
range=(-1, 1),
)
utils.save_image(
sample1,
f"rtil/sample1/{str(i).zfill(6)}.png",
nrow=int(args.n_sample ** 0.5),
normalize=True,
range=(-1, 1),
)
utils.save_image(
sample2,
f"rtil/sample2/{str(i).zfill(6)}.png",
nrow=int(args.n_sample ** 0.5),
normalize=True,
range=(-1, 1),
)
utils.save_image(
sample3,
f"rtil/sample3/{str(i).zfill(6)}.png",
nrow=int(args.n_sample ** 0.5),
normalize=True,
range=(-1, 1),
)
utils.save_image(
sample4,
f"rtil/sample4/{str(i).zfill(6)}.png",
nrow=int(args.n_sample ** 0.5),
normalize=True,
range=(-1, 1),
)
else:
with torch.no_grad():
g_ema.eval()
sample, _, = g_ema([sample_z], rtil=0)
utils.save_image(sample,
f"van/sample/{str(i).zfill(6)}.png",
nrow=int(args.n_sample ** 0.5),
normalize=True,
range=(-1, 1),)
if i % 5000 == 0:
if args.rtil==True:
torch.save(
{
"g": g_module.state_dict(),
"d": d_module.state_dict(),
"g_ema": g_ema.state_dict(),
"g_optim": g_optim.state_dict(),
"d_optim": d_optim.state_dict(),
"args": args,
"ada_aug_p": ada_aug_p,
},
f"rtil/checkpoint/{str(i).zfill(6)}.pt",
)
np.save(f"rtil/loss_stat/{str(i).zfill(6)}.npy", loss_dict)
else:
torch.save(
{
"g": g_module.state_dict(),
"d": d_module.state_dict(),
"g_ema": g_ema.state_dict(),
"g_optim": g_optim.state_dict(),
"d_optim": d_optim.state_dict(),
"args": args,
"ada_aug_p": ada_aug_p,
},
f"van/checkpoint/{str(i).zfill(6)}.pt",
)
np.save(f"van/loss_stat/{str(i).zfill(6)}.npy", loss_dict)
if __name__ == "__main__":
device = "cuda"
parser = argparse.ArgumentParser(description="StyleGAN2 trainer")
parser.add_argument("--rtil",type=int,default=True,help="Type of Training Mehotd ",)
parser.add_argument(
"--path", type=str, default= './ffhq', help="path to the lmdb | |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Batching dataset transformations."""
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import structured_function
from tensorflow.python.data.util import convert
from tensorflow.python.data.util import nest
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
@tf_export("data.experimental.dense_to_ragged_batch")
def dense_to_ragged_batch(batch_size,
drop_remainder=False,
row_splits_dtype=dtypes.int64):
"""A transformation that batches ragged elements into `tf.RaggedTensor`s.
This transformation combines multiple consecutive elements of the input
dataset into a single element.
Like `tf.data.Dataset.batch`, the components of the resulting element will
have an additional outer dimension, which will be `batch_size` (or
`N % batch_size` for the last element if `batch_size` does not divide the
number of input elements `N` evenly and `drop_remainder` is `False`). If
your program depends on the batches having the same outer dimension, you
should set the `drop_remainder` argument to `True` to prevent the smaller
batch from being produced.
Unlike `tf.data.Dataset.batch`, the input elements to be batched may have
different shapes:
* If an input element is a `tf.Tensor` whose static `tf.TensorShape` is
fully defined, then it is batched as normal.
* If an input element is a `tf.Tensor` whose static `tf.TensorShape` contains
one or more axes with unknown size (i.e., `shape[i]=None`), then the output
will contain a `tf.RaggedTensor` that is ragged up to any of such
dimensions.
* If an input element is a `tf.RaggedTensor` or any other type, then it is
batched as normal.
Example:
>>> dataset = tf.data.Dataset.from_tensor_slices(np.arange(6))
>>> dataset = dataset.map(lambda x: tf.range(x))
>>> dataset.element_spec.shape
TensorShape([None])
>>> dataset = dataset.apply(
... tf.data.experimental.dense_to_ragged_batch(batch_size=2))
>>> for batch in dataset:
... print(batch)
<tf.RaggedTensor [[], [0]]>
<tf.RaggedTensor [[0, 1], [0, 1, 2]]>
<tf.RaggedTensor [[0, 1, 2, 3], [0, 1, 2, 3, 4]]>
Args:
batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
consecutive elements of this dataset to combine in a single batch.
drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing
whether the last batch should be dropped in the case it has fewer than
`batch_size` elements; the default behavior is not to drop the smaller
batch.
row_splits_dtype: The dtype that should be used for the `row_splits` of any
new ragged tensors. Existing `tf.RaggedTensor` elements do not have their
row_splits dtype changed.
Returns:
Dataset: A `Dataset`.
"""
def _apply_fn(dataset):
ragged_dataset = _DenseToRaggedDataset(dataset, row_splits_dtype)
return dataset_ops.BatchDataset(
ragged_dataset, batch_size=batch_size, drop_remainder=drop_remainder)
return _apply_fn
@tf_export("data.experimental.dense_to_sparse_batch")
def dense_to_sparse_batch(batch_size, row_shape):
"""A transformation that batches ragged elements into `tf.sparse.SparseTensor`s.
Like `Dataset.padded_batch()`, this transformation combines multiple
consecutive elements of the dataset, which might have different
shapes, into a single element. The resulting element has three
components (`indices`, `values`, and `dense_shape`), which
comprise a `tf.sparse.SparseTensor` that represents the same data. The
`row_shape` represents the dense shape of each row in the
resulting `tf.sparse.SparseTensor`, to which the effective batch size is
prepended. For example:
```python
# NOTE: The following examples use `{ ... }` to represent the
# contents of a dataset.
a = { ['a', 'b', 'c'], ['a', 'b'], ['a', 'b', 'c', 'd'] }
a.apply(tf.data.experimental.dense_to_sparse_batch(
batch_size=2, row_shape=[6])) ==
{
([[0, 0], [0, 1], [0, 2], [1, 0], [1, 1]], # indices
['a', 'b', 'c', 'a', 'b'], # values
[2, 6]), # dense_shape
([[0, 0], [0, 1], [0, 2], [0, 3]],
['a', 'b', 'c', 'd'],
[1, 6])
}
```
Args:
batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
consecutive elements of this dataset to combine in a single batch.
row_shape: A `tf.TensorShape` or `tf.int64` vector tensor-like object
representing the equivalent dense shape of a row in the resulting
`tf.sparse.SparseTensor`. Each element of this dataset must have the same
rank as `row_shape`, and must have size less than or equal to `row_shape`
in each dimension.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
return _DenseToSparseBatchDataset(dataset, batch_size, row_shape)
return _apply_fn
@deprecation.deprecated(None, "Use `tf.data.experimental.map_and_batch()")
@tf_export(v1=["data.experimental.map_and_batch_with_legacy_function"])
def map_and_batch_with_legacy_function(map_func,
batch_size,
num_parallel_batches=None,
drop_remainder=False,
num_parallel_calls=None):
"""Fused implementation of `map` and `batch`.
NOTE: This is an escape hatch for existing uses of `map_and_batch` that do not
work with V2 functions. New uses are strongly discouraged and existing uses
should migrate to `map_and_batch` as this method will not be removed in V2.
Args:
map_func: A function mapping a nested structure of tensors to another
nested structure of tensors.
batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
consecutive elements of this dataset to combine in a single batch.
num_parallel_batches: (Optional.) A `tf.int64` scalar `tf.Tensor`,
representing the number of batches to create in parallel. On one hand,
higher values can help mitigate the effect of stragglers. On the other
hand, higher values can increase contention if CPU is scarce.
drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing
whether the last batch should be dropped in case its size is smaller than
desired; the default behavior is not to drop the smaller batch.
num_parallel_calls: (Optional.) A `tf.int32` scalar `tf.Tensor`,
representing the number of elements to process in parallel. If not
specified, `batch_size * num_parallel_batches` elements will be processed
in parallel. If the value `tf.data.AUTOTUNE` is used, then
the number of parallel calls is set dynamically based on available CPU.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
Raises:
ValueError: If both `num_parallel_batches` and `num_parallel_calls` are
specified.
"""
if num_parallel_batches is None and num_parallel_calls is None:
num_parallel_calls = batch_size
elif num_parallel_batches is not None and num_parallel_calls is None:
num_parallel_calls = batch_size * num_parallel_batches
elif num_parallel_batches is not None and num_parallel_calls is not None:
raise ValueError(
"`map_and_batch_with_legacy_function` allows only one of "
"`num_parallel_batches` and "
"`num_parallel_calls` to be set, but "
f"`num_parallel_batches` was set to {num_parallel_batches} "
f"and `num_parallel_calls` as set to {num_parallel_calls}.")
def _apply_fn(dataset):
return _MapAndBatchDataset(dataset, map_func, batch_size,
num_parallel_calls, drop_remainder,
use_legacy_function=True)
return _apply_fn
@deprecation.deprecated(
None,
"Use `tf.data.Dataset.map(map_func, num_parallel_calls)` followed by "
"`tf.data.Dataset.batch(batch_size, drop_remainder)`. Static tf.data "
"optimizations will take care of using the fused implementation.")
@tf_export("data.experimental.map_and_batch")
def map_and_batch(map_func,
batch_size,
num_parallel_batches=None,
drop_remainder=False,
num_parallel_calls=None):
"""Fused implementation of `map` and `batch`.
Maps `map_func` across `batch_size` consecutive elements of this dataset
and then combines them into a batch. Functionally, it is equivalent to `map`
followed by `batch`. This API is temporary and deprecated since input pipeline
optimization now fuses consecutive `map` and `batch` operations automatically.
Args:
map_func: A function mapping a nested structure of tensors to another
nested structure of tensors.
batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
consecutive elements of this dataset to combine in a single batch.
num_parallel_batches: (Optional.) A `tf.int64` scalar `tf.Tensor`,
representing the number of batches to create in parallel. On one hand,
higher values can help mitigate the effect of stragglers. On the other
hand, higher values can increase contention if CPU is scarce.
drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing
whether the last batch should be dropped in case its size is smaller than
desired; the default behavior is not to drop the smaller batch.
num_parallel_calls: (Optional.) A `tf.int32` scalar `tf.Tensor`,
representing the number of elements to process in parallel. If not
specified, `batch_size * num_parallel_batches` elements will be processed
in parallel. If the value `tf.data.AUTOTUNE` is used, then
the number of parallel calls is set dynamically based on available CPU.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
Raises:
ValueError: If both `num_parallel_batches` and `num_parallel_calls` are
specified.
"""
if num_parallel_batches is None and num_parallel_calls is None:
num_parallel_calls = batch_size
elif num_parallel_batches is not None and num_parallel_calls is None:
num_parallel_calls = batch_size * num_parallel_batches
elif num_parallel_batches is not None and num_parallel_calls is not None:
raise | |
== 'outbound':
self.partner_type = 'supplier'
elif self.payment_type not in ('inbound', 'outbound'):
self.partner_type = False
# Set payment method domain
res = self._onchange_journal()
if not res.get('domain', {}):
res['domain'] = {}
jrnl_filters = self._compute_journal_domain_and_types()
journal_types = jrnl_filters['journal_types']
journal_types.update(['bank', 'cash'])
res['domain']['journal_id'] = jrnl_filters['domain'] + [('type', 'in', list(journal_types))]
return res
def _compute_journal_domain_and_types(self):
journal_type = ['bank', 'cash']
domain = []
if self.invoice_ids:
domain.append(('company_id', '=', self.invoice_ids[0].company_id.id))
if self.currency_id.is_zero(self.amount) and self.has_invoices:
# In case of payment with 0 amount, allow to select a journal of type 'general' like
# 'Miscellaneous Operations' and set this journal by default.
journal_type = ['general']
self.payment_difference_handling = 'reconcile'
else:
if self.payment_type == 'inbound':
domain.append(('at_least_one_inbound', '=', True))
else:
domain.append(('at_least_one_outbound', '=', True))
return {'domain': domain, 'journal_types': set(journal_type)}
@api.onchange('amount', 'currency_id')
def _onchange_amount(self):
jrnl_filters = self._compute_journal_domain_and_types()
journal_types = jrnl_filters['journal_types']
domain_on_types = [('type', 'in', list(journal_types))]
if self.invoice_ids:
domain_on_types.append(('company_id', '=', self.invoice_ids[0].company_id.id))
if self.journal_id.type not in journal_types or (self.invoice_ids and self.journal_id.company_id != self.invoice_ids[0].company_id):
self.journal_id = self.env['account.journal'].search(domain_on_types + [('company_id', '=', self.env.company.id)], limit=1)
return {'domain': {'journal_id': jrnl_filters['domain'] + domain_on_types}}
@api.onchange('currency_id')
def _onchange_currency(self):
self.amount = abs(self._compute_payment_amount(self.invoice_ids, self.currency_id, self.journal_id, self.payment_date))
if self.journal_id: # TODO: only return if currency differ?
return
# Set by default the first liquidity journal having this currency if exists.
domain = [('type', 'in', ('bank', 'cash')), ('currency_id', '=', self.currency_id.id)]
if self.invoice_ids:
domain.append(('company_id', '=', self.invoice_ids[0].company_id.id))
journal = self.env['account.journal'].search(domain, limit=1)
if journal:
return {'value': {'journal_id': journal.id}}
@api.model
def _compute_payment_amount(self, invoices, currency, journal, date):
'''Compute the total amount for the payment wizard.
:param invoices: Invoices on which compute the total as an account.invoice recordset.
:param currency: The payment's currency as a res.currency record.
:param journal: The payment's journal as an account.journal record.
:param date: The payment's date as a datetime.date object.
:return: The total amount to pay the invoices.
'''
company = journal.company_id
currency = currency or journal.currency_id or company.currency_id
date = date or fields.Date.today()
if not invoices:
return 0.0
self.env['account.move'].flush(['type', 'currency_id'])
self.env['account.move.line'].flush(['amount_residual', 'amount_residual_currency', 'move_id', 'account_id'])
self.env['account.account'].flush(['user_type_id'])
self.env['account.account.type'].flush(['type'])
self._cr.execute('''
SELECT
move.type AS type,
move.currency_id AS currency_id,
SUM(line.amount_residual) AS amount_residual,
SUM(line.amount_residual_currency) AS residual_currency
FROM account_move move
LEFT JOIN account_move_line line ON line.move_id = move.id
LEFT JOIN account_account account ON account.id = line.account_id
LEFT JOIN account_account_type account_type ON account_type.id = account.user_type_id
WHERE move.id IN %s
AND account_type.type IN ('receivable', 'payable')
GROUP BY move.id, move.type
''', [tuple(invoices.ids)])
query_res = self._cr.dictfetchall()
total = 0.0
for res in query_res:
move_currency = self.env['res.currency'].browse(res['currency_id'])
if move_currency == currency and move_currency != company.currency_id:
total += res['residual_currency']
else:
total += company.currency_id._convert(res['amount_residual'], currency, company, date)
return total
def name_get(self):
return [(payment.id, payment.name or _('Draft Payment')) for payment in self]
@api.model
def _get_move_name_transfer_separator(self):
return '§§'
@api.depends('move_line_ids.reconciled')
def _get_move_reconciled(self):
for payment in self:
rec = True
for aml in payment.move_line_ids.filtered(lambda x: x.account_id.reconcile):
if not aml.reconciled:
rec = False
break
payment.move_reconciled = rec
def open_payment_matching_screen(self):
# Open reconciliation view for customers/suppliers
move_line_id = False
for move_line in self.move_line_ids:
if move_line.account_id.reconcile:
move_line_id = move_line.id
break
if not self.partner_id:
raise UserError(_("Payments without a customer can't be matched"))
action_context = {'company_ids': [self.company_id.id], 'partner_ids': [self.partner_id.commercial_partner_id.id]}
if self.partner_type == 'customer':
action_context.update({'mode': 'customers'})
elif self.partner_type == 'supplier':
action_context.update({'mode': 'suppliers'})
if move_line_id:
action_context.update({'move_line_id': move_line_id})
return {
'type': 'ir.actions.client',
'tag': 'manual_reconciliation_view',
'context': action_context,
}
@api.depends('invoice_ids', 'payment_type', 'partner_type', 'partner_id')
def _compute_destination_account_id(self):
self.destination_account_id = False
for payment in self:
if payment.invoice_ids:
payment.destination_account_id = payment.invoice_ids[0].mapped(
'line_ids.account_id').filtered(
lambda account: account.user_type_id.type in ('receivable', 'payable'))[0]
elif payment.payment_type == 'transfer':
if not payment.company_id.transfer_account_id.id:
raise UserError(_('There is no Transfer Account defined in the accounting settings. Please define one to be able to confirm this transfer.'))
payment.destination_account_id = payment.company_id.transfer_account_id.id
elif payment.partner_id:
partner = payment.partner_id.with_context(force_company=payment.company_id.id)
if payment.partner_type == 'customer':
payment.destination_account_id = partner.property_account_receivable_id.id
else:
payment.destination_account_id = partner.property_account_payable_id.id
elif payment.partner_type == 'customer':
default_account = self.env['ir.property'].with_context(force_company=payment.company_id.id).get('property_account_receivable_id', 'res.partner')
payment.destination_account_id = default_account.id
elif payment.partner_type == 'supplier':
default_account = self.env['ir.property'].with_context(force_company=payment.company_id.id).get('property_account_payable_id', 'res.partner')
payment.destination_account_id = default_account.id
@api.depends('move_line_ids.matched_debit_ids', 'move_line_ids.matched_credit_ids')
def _compute_reconciled_invoice_ids(self):
for record in self:
reconciled_moves = record.move_line_ids.mapped('matched_debit_ids.debit_move_id.move_id')\
+ record.move_line_ids.mapped('matched_credit_ids.credit_move_id.move_id')
record.reconciled_invoice_ids = reconciled_moves.filtered(lambda move: move.is_invoice())
record.has_invoices = bool(record.reconciled_invoice_ids)
record.reconciled_invoices_count = len(record.reconciled_invoice_ids)
def action_register_payment(self):
active_ids = self.env.context.get('active_ids')
if not active_ids:
return ''
return {
'name': _('Register Payment'),
'res_model': len(active_ids) == 1 and 'account.payment' or 'account.payment.register',
'view_mode': 'form',
'view_id': len(active_ids) != 1 and self.env.ref('account.view_account_payment_form_multi').id or self.env.ref('account.view_account_payment_invoice_form').id,
'context': self.env.context,
'target': 'new',
'type': 'ir.actions.act_window',
}
def button_journal_entries(self):
return {
'name': _('Journal Items'),
'view_mode': 'tree,form',
'res_model': 'account.move.line',
'view_id': False,
'type': 'ir.actions.act_window',
'domain': [('payment_id', 'in', self.ids)],
}
def button_invoices(self):
return {
'name': _('Paid Invoices'),
'view_mode': 'tree,form',
'res_model': 'account.move',
'view_id': False,
'views': [(self.env.ref('account.view_move_tree').id, 'tree'), (self.env.ref('account.view_move_form').id, 'form')],
'type': 'ir.actions.act_window',
'domain': [('id', 'in', [x.id for x in self.reconciled_invoice_ids])],
'context': {'create': False},
}
def unreconcile(self):
""" Set back the payments in 'posted' or 'sent' state, without deleting the journal entries.
Called when cancelling a bank statement line linked to a pre-registered payment.
"""
for payment in self:
if payment.payment_reference:
payment.write({'state': 'sent'})
else:
payment.write({'state': 'posted'})
def cancel(self):
self.write({'state': 'cancelled'})
def unlink(self):
if any(bool(rec.move_line_ids) for rec in self):
raise UserError(_("You cannot delete a payment that is already posted."))
if any(rec.move_name for rec in self):
raise UserError(_('It is not allowed to delete a payment that already created a journal entry since it would create a gap in the numbering. You should create the journal entry again and cancel it thanks to a regular revert.'))
return super(account_payment, self).unlink()
def _prepare_payment_moves(self):
''' Prepare the creation of journal entries (account.move) by creating a list of python dictionary to be passed
to the 'create' method.
Example 1: outbound with write-off:
Account | Debit | Credit
---------------------------------------------------------
BANK | 900.0 |
RECEIVABLE | | 1000.0
WRITE-OFF ACCOUNT | 100.0 |
Example 2: internal transfer from BANK to CASH:
Account | Debit | Credit
---------------------------------------------------------
BANK | | 1000.0
TRANSFER | 1000.0 |
CASH | 1000.0 |
TRANSFER | | 1000.0
:return: A list of Python dictionary to be passed to env['account.move'].create.
'''
all_move_vals = []
for payment in self:
company_currency = payment.company_id.currency_id
move_names = payment.move_name.split(payment._get_move_name_transfer_separator()) if payment.move_name else None
# Compute amounts.
write_off_amount = payment.payment_difference_handling == 'reconcile' and -payment.payment_difference or 0.0
if payment.payment_type in ('outbound', 'transfer'):
counterpart_amount = payment.amount
liquidity_line_account = payment.journal_id.default_debit_account_id
else:
counterpart_amount = -payment.amount
liquidity_line_account = payment.journal_id.default_credit_account_id
# Manage currency.
if payment.currency_id == company_currency:
# Single-currency.
balance = counterpart_amount
write_off_balance = write_off_amount
counterpart_amount = write_off_amount = 0.0
currency_id = False
else:
# Multi-currencies.
balance = payment.currency_id._convert(counterpart_amount, company_currency, payment.company_id, payment.payment_date)
write_off_balance = payment.currency_id._convert(write_off_amount, company_currency, payment.company_id, payment.payment_date)
currency_id = payment.currency_id.id
# Manage custom currency on journal for liquidity line.
if payment.journal_id.currency_id and payment.currency_id != payment.journal_id.currency_id:
# Custom currency on journal.
if payment.journal_id.currency_id == company_currency:
# Single-currency
liquidity_line_currency_id = False
else:
liquidity_line_currency_id = payment.journal_id.currency_id.id
liquidity_amount = company_currency._convert(
balance, payment.journal_id.currency_id, payment.company_id, payment.payment_date)
else:
# Use the payment currency.
liquidity_line_currency_id = currency_id
liquidity_amount = counterpart_amount
# Compute 'name' to be used in receivable/payable line.
rec_pay_line_name = ''
if payment.payment_type == 'transfer':
rec_pay_line_name = payment.name
else:
if payment.partner_type == 'customer':
if payment.payment_type == 'inbound':
rec_pay_line_name += _("Customer Payment")
elif payment.payment_type == 'outbound':
rec_pay_line_name += _("Customer Credit Note")
elif payment.partner_type == 'supplier':
if payment.payment_type == 'inbound':
rec_pay_line_name += _("Vendor Credit Note")
elif payment.payment_type == 'outbound':
rec_pay_line_name += _("Vendor Payment")
if payment.invoice_ids:
rec_pay_line_name += ': %s' % ', '.join(payment.invoice_ids.mapped('name'))
# Compute 'name' to be used in liquidity line.
if payment.payment_type == 'transfer':
liquidity_line_name = _('Transfer to %s') % payment.destination_journal_id.name
else:
liquidity_line_name = payment.name
# ==== 'inbound' / 'outbound' ====
move_vals = {
'date': payment.payment_date,
'ref': payment.communication,
'journal_id': payment.journal_id.id,
'currency_id': payment.journal_id.currency_id.id or payment.company_id.currency_id.id,
'partner_id': payment.partner_id.id,
'line_ids': [
# Receivable / Payable / Transfer line.
(0, 0, {
'name': rec_pay_line_name,
'amount_currency': counterpart_amount + write_off_amount if currency_id else 0.0,
'currency_id': currency_id,
'debit': balance + write_off_balance > 0.0 and balance + write_off_balance or 0.0,
'credit': balance + write_off_balance < 0.0 and -balance - write_off_balance or 0.0,
'date_maturity': payment.payment_date,
'partner_id': payment.partner_id.commercial_partner_id.id,
'account_id': payment.destination_account_id.id,
'payment_id': payment.id,
}),
# Liquidity line.
(0, 0, {
'name': liquidity_line_name,
'amount_currency': -liquidity_amount if liquidity_line_currency_id else 0.0,
'currency_id': liquidity_line_currency_id,
'debit': balance < 0.0 and -balance or 0.0,
'credit': balance > 0.0 and balance or 0.0,
'date_maturity': payment.payment_date,
'partner_id': payment.partner_id.commercial_partner_id.id,
'account_id': liquidity_line_account.id,
'payment_id': payment.id,
}),
],
}
if write_off_balance:
# Write-off line.
move_vals['line_ids'].append((0, 0, {
'name': payment.writeoff_label,
'amount_currency': -write_off_amount,
'currency_id': | |
<gh_stars>1-10
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import sys
sys.path.insert(0, '..')
from algorithms.dbscan_gmm import DBSCAN_GMM
from algorithms.grid_based_dbscan import GridBasedDBSCAN
from algorithms.grid_based_dbscan_gmm import GridBasedDBSCAN_GMM
from utilities.plot_utils import *
from utility import Skills, ScatterDetection
import pandas as pd
import datetime
from plots_report import *
from get_sd_data import *
from matplotlib.dates import date2num, num2date
from sma import ScatterTypeDetection
def estimate_kappa(l1, l2):
from sklearn.metrics import cohen_kappa_score
print(len(l1), len(l1[l1==0]), len(l1[l1==1]), len(l1[l1==-1]), len(l2[l2==0]), len(l2[l2==1]), len(l2[l2==-1]))
k = cohen_kappa_score(l1.astype(int), l2.astype(int))
print(k)
return k
def estimate_skills(_dict_, labels):
V, W, L = [], [], []
for v, w, l in zip(_dict_["vel"], _dict_["wid"], labels):
V.extend(v.tolist())
W.extend(w.tolist())
L.extend(l.tolist())
V, W, L = np.array(V), np.array(W), np.array(L)
X = np.array([V.tolist(), W.tolist()]).T
sk = Skills(X, L)
return sk
def estimate_df_skills(df, labels):
V, W, L = [], [], []
V, W, L = np.array(df.v), np.array(df.w_l), np.array(df.labels)
X = np.array([V.tolist(), W.tolist()]).T
sk = Skills(X, L)
return sk
def _filter_by_time(start_time, end_time, data_dict):
time = data_dict['time']
start_i, end_i = None, None
start_time, end_time = date2num(start_time), date2num(end_time)
if start_time < time[0][0]: # Sometimes start time is a few seconds before the first scan
start_time = time[0][0]
for i, t in enumerate(time):
if np.sum(start_time >= t) > 0 and start_i == None:
start_i = i
if np.sum(end_time > t) > 0 and start_i != None:
end_i = i+1
data_dict['gate'] = data_dict['gate'][start_i:end_i]
data_dict['time'] = data_dict['time'][start_i:end_i]
data_dict['beam'] = data_dict['beam'][start_i:end_i]
data_dict['vel'] = data_dict['vel'][start_i:end_i]
data_dict['wid'] = data_dict['wid'][start_i:end_i]
data_dict['elv'] = data_dict['elv'][start_i:end_i]
data_dict['trad_gsflg'] = data_dict['trad_gsflg'][start_i:end_i]
return data_dict
def todf(dicts, keys=['gate', 'beam', 'vel', 'wid', 'time', 'trad_gsflg', 'elv', 'pow', 'clust_flg']):
df = pd.DataFrame()
_o = {}
print(dicts.keys())
for k in keys:
_o[k] = []
for x in dicts[k]:
_o[k].extend(x)
df = pd.DataFrame.from_records(_o)
df = df.rename(columns={'gate':"slist", 'beam':"bmnum", 'vel':'v', 'wid':"w_l",
'time':"time", 'pow':"p_l", 'clust_flg':"labels"})
return df
def sma_bbox(scans, sdur=5, idx=None, dbeam=15, window=7):
df = pd.DataFrame()
plot=False
for i in range(int(len(scans)/sdur)):
if (idx is not None) and (i == idx): plot=True
if i == 0: mlf = MiddleLatFilter(rad, scans=scans[i*sdur:(i+1)*sdur], plot=plot)
elif i == int(len(scans)/sdur)-1: mlf._reset_(rad, scans[i*sdur:], plot=plot)
else: mlf._reset_(rad, scans[i*sdur:(i+1)*sdur], plot=plot)
dx = mlf.doFilter(fdata, dbeam=dbeam, window=window)
slist = np.array(dx.slist)
labs = np.array(dx["labels"])
labs[labs<0] = np.nan
labs = labs + (10*i)
labs[np.isnan(labs)] = -1
dx["labels"] = labs
df = pd.concat([df, dx])
return df
def lower_range(df, gf=None):
u = df.copy()
slist = np.array(u.slist)
labs = np.array(u["labels"])
if gf is not None: labs[slist<8] = gf
u["labels"] = labs
return u
# In[2]:
case = 3
if case == 0:
start_time = datetime.datetime(2017, 4, 4)
end_time = datetime.datetime(2017, 4, 5)
rad, bm = "cvw",7
#start_time = datetime.datetime(2015, 3, 17)
#end_time = datetime.datetime(2015, 3, 17, 12)
#rad, bm = "bks",13
db = DBSCAN_GMM(start_time, end_time, rad, BoxCox=True, load_model=False, save_model=False, run_gmm=False)
setattr(db, "skill", estimate_skills(db.data_dict, db.clust_flg))
dbgmm = DBSCAN_GMM(start_time, end_time, rad, BoxCox=True, load_model=False, save_model=True)
setattr(dbgmm, "skill", estimate_skills(dbgmm.data_dict, dbgmm.clust_flg))
gbdb = GridBasedDBSCAN(start_time, end_time, rad, load_model=False, save_model=True)
setattr(gbdb, "skill", estimate_skills(gbdb.data_dict, gbdb.clust_flg))
gbdbgmm = GridBasedDBSCAN_GMM(start_time, end_time, rad, load_model=False, save_model=True)
setattr(gbdbgmm, "skill", estimate_skills(gbdbgmm.data_dict, gbdbgmm.clust_flg))
rti = RangeTimePlot(110, np.unique(np.hstack(db.data_dict["time"])), "", num_subplots=7)
rti.addClusterPlot(db.data_dict, db.clust_flg, bm, "DBSCAN", label_clusters=True, skill=db.skill)
rti.addClusterPlot(dbgmm.data_dict, dbgmm.clust_flg, bm, "DBSCAN + GMM", label_clusters=True, skill=dbgmm.skill)
rti.addClusterPlot(gbdb.data_dict, gbdb.clust_flg, bm, "GB-DBSCAN", label_clusters=True, skill=gbdb.skill)
rti.addClusterPlot(gbdbgmm.data_dict, gbdbgmm.clust_flg, bm, "GB-DBSCAN + GMM ", label_clusters=True, xlabel="Time, UT",
skill=gbdbgmm.skill)
#rti.save("figs/rti.example.ii.png")
rti.save("figs/rti.example.png")
if case == 1:
plot_acfs(rad="kap")
plot_lims(False)
plot_lims(True)
plot_rad_acfs()
plot_hist_hr()
plot_hist_hrw()
probabilistic_curve()
if case == 2:
start_time = datetime.datetime(2015, 3, 17)
end_time = datetime.datetime(2015, 3, 17, 12)
rad, bm = "bks",15
db = DBSCAN_GMM(start_time, end_time, rad, BoxCox=True, load_model=False, save_model=True, run_gmm=False)
rti = RangeTimePlot(110, np.unique(np.hstack(db.data_dict["time"])), "", num_subplots=3)
rti.addClusterPlot(db.data_dict, db.clust_flg, bm, "DBSCAN", label_clusters=True, skill=None)
rti.addGSISPlot(db.data_dict, db.data_dict["trad_gsflg"], bm, "GS-ID:Traditioanl", show_closerange=True, xlabel='')
rti.addVelPlot(db.data_dict, bm, "Velocity", vel_max=200, vel_step=50, xlabel='Time UT')
rti.save("figs/dbscan.trad.png")
if case == 3:
start_time = datetime.datetime(2015, 3, 17)
start_time = datetime.datetime(2010, 1, 15)
end_time = datetime.datetime(2015, 3, 17, 12)
end_time = datetime.datetime(2010, 1, 16)
rads, bm, crj = ["bks"],7, 0
start_time = datetime.datetime(2017, 4, 4)
end_time = datetime.datetime(2017, 4, 5)
#kinds = ["dbscan", "dbscan-gmm", "gb-dbscan", "gb-dbscan-gmm"]
rads, bm, crj = ["cvw"],7, 0
#kinds = ["gb-dbscan-gmm"]
kinds = ["dbscan"]
for rad in rads:
for kind in kinds:
if len(kinds) == 1: dbx = DBSCAN_GMM(start_time, end_time, rad, BoxCox=True, load_model=False, save_model=True, run_gmm=False)
if kind == "dbscan": db = DBSCAN_GMM(start_time, end_time, rad, BoxCox=True, load_model=False, save_model=True, run_gmm=False)
if kind == "dbscan-gmm": db = DBSCAN_GMM(start_time, end_time, rad, BoxCox=True, load_model=False, save_model=True, run_gmm=True)
if kind == "gb-dbscan": db = GridBasedDBSCAN(start_time, end_time, rad, load_model=False, save_model=True)
if kind == "gb-dbscan-gmm": db = GridBasedDBSCAN_GMM(start_time, end_time, rad, load_model=False, save_model=True,
features=['beam', 'gate', 'time','vel','wid'], scan_eps=10)
sd = ScatterDetection(db.data_dict)
#rti = RangeTimePlot(110, np.unique(np.hstack(db.data_dict["time"])), "", num_subplots=6)
#rti.addClusterPlot(db.data_dict, db.clust_flg, bm, kind.upper(), label_clusters=True, skill=None)
#rti.addGSISPlot(db.data_dict, sd.run(kind=1, case=0), bm, "GS-ID:Median(Sudden)", show_closerange=True, xlabel='')
#rti.addGSISPlot(db.data_dict, sd.run(kind=1, case=1), bm, "GS-ID:Median(Blanchard 2006)", show_closerange=True, xlabel='')
#rti.addGSISPlot(db.data_dict, sd.run(kind=1, case=2), bm, "GS-ID:Median(Blanchard 2009)", show_closerange=True, xlabel='')
#rti.addGSISPlot(db.data_dict, sd.run(kind=1, case=3), bm, "GS-ID:Median(Proposed)", show_closerange=True, xlabel='')
#rti.addVelPlot(db.data_dict, bm, "Velocity", vel_max=200, vel_step=50, xlabel='Time UT')
#rti.save("figs/%s.median.png"%kind)
#rti = RangeTimePlot(110, np.unique(np.hstack(db.data_dict["time"])), "", num_subplots=6)
#rti.addClusterPlot(db.data_dict, db.clust_flg, bm, kind.upper(), label_clusters=True, skill=None)
#rti.addGSISPlot(db.data_dict, sd.run(kind=2, thresh=[0.1,0.9], case=0), bm, "GS-ID:Median(Sudden)", show_closerange=True, xlabel='')
#rti.addGSISPlot(db.data_dict, sd.run(kind=2, thresh=[0.1,0.9], case=1), bm, "GS-ID:Median(Blanchard 2006)",
# show_closerange=True, xlabel='')
#rti.addGSISPlot(db.data_dict, sd.run(kind=2, thresh=[0.1,0.9], case=2), bm, "GS-ID:Median(Blanchard 2009)",
# show_closerange=True, xlabel='')
#rti.addGSISPlot(db.data_dict, sd.run(kind=2, case=3), bm, "GS-ID:Median(Proposed)", show_closerange=True, xlabel='')
#rti.addVelPlot(db.data_dict, bm, "Velocity", vel_max=200, vel_step=50, xlabel='Time UT')
#rti.save("figs/%s.kde.png"%kind)
rti = RangeTimePlot(110, np.unique(np.hstack(db.data_dict["time"])), "", num_subplots=5)
rti.addGSISPlot(db.data_dict, db.data_dict["trad_gsflg"], bm, "GsI:[Traditional]", show_closerange=True, xlabel='', close_range_black=crj)
rti.addClusterPlot(db.data_dict, db.clust_flg, bm, kind.upper(), label_clusters=True, skill=None, close_range_black=crj)
rti.addGSISPlot(db.data_dict, sd.run(kind=1, case=0), bm, "GsI:[Sudden]", show_closerange=True, xlabel='', close_range_black=crj)
rti.addGSISPlot(db.data_dict, sd.run(kind=1, case=1), bm, "GsI:[Blanchard 2006]", show_closerange=True, xlabel='',close_range_black=crj)
rti.addGSISPlot(db.data_dict, sd.run(kind=1, case=2), bm, "GsI:[Blanchard 2009]", show_closerange=True, xlabel='Time, [UT]',close_range_black=crj)
#rti.addGSISPlot(db.data_dict, sd.run(kind=1, case=3), bm, "GsI:[Chakraborty]", show_closerange=True, xlabel='',close_range_black=8)
#rti.addVelPlot(db.data_dict, bm, "Velocity", vel_max=200, vel_step=50, xlabel='Time UT')
rti.save("figs/%s_%s.med.png"%(rad, kind))
if len(kinds) == 1:
rti = RangeTimePlot(110, np.unique(np.hstack(db.data_dict["time"])), "", num_subplots=4)
keys=['gate', 'beam', 'vel', 'wid', 'time', 'trad_gsflg', 'pow', 'clust_flg']
df = todf(dbx.data_dict, keys=keys)
sdf = ScatterTypeDetection(df)
#rti.addParamPlot(df, bm, "Velocity", p_max=100, p_min=-100, p_step=25, xlabel="", zparam="v", label='Velocity [m/s]')
#rti.addParamPlot(df, bm, "Spec. Width", p_max=100, p_min=0, p_step=10, xlabel="", zparam="w_l", label='Spec. Width [m/s]')
rti.addCluster(df, bm, kind.upper(), label_clusters=True, skill=None)#, close_range_black=crj)
rti.addGSIS(sdf.run(kind=0, case=0), bm, "GsI:[Sudden]", xlabel='')#, close_range_black=crj)
rti.addGSIS(sdf.run(kind=0, case=2), bm, "GsI:[Blanchard 2009]", xlabel="")#'Time [UT]',close_range_black=crj)
rti.addGSIS(sdf.run(kind=0, case=3, mod=False), bm, r"GsI:[Chakraborty]", xlabel='Time, UT')
dfx1, dfx2, dfx3 = sdf.run(kind=0, case=0).copy(), sdf.run(kind=0, case=2).copy(), sdf.run(kind=0, case=3, mod=True).copy()
dfx1, dfx2, dfx3 = dfx1[dfx1.bmnum==bm], dfx2[dfx2.bmnum==bm], dfx3[dfx3.bmnum==bm]
estimate_kappa(np.array(dfx1.gflg), np.array(dfx2.gflg))
estimate_kappa(np.array(dfx1.gflg), np.array(dfx3.gflg))
estimate_kappa(np.array(dfx2.gflg), np.array(dfx3.gflg))
rti.save("figs/%s_%s.med.png"%(rad, kind))
rti = RangeTimePlot(110, np.unique(np.hstack(db.data_dict["time"])), "", num_subplots=4)
rti.addGSIS(sdf.run(kind=0, case=0), bm, "GsI:[Sudden]", xlabel='')
rti.addGSIS(sdf.run(kind=11, case=3), bm, r"GsI:[Chakraborty]", xlabel='Time, UT')
rti.save("figs/%s_%s.group.png"%(rad, kind))
if case == 4:
pass
run = False
if run:
from sma import MiddleLatFilter
start_time = datetime.datetime(2017, 4, 4)
end_time = datetime.datetime(2017, 4, 5)
rad, bm = "cvw",7
fdata = FetchData( rad, [start_time, end_time] )
_, scans = fdata.fetch_data(by="scan", scan_prop={"dur": 2, "stype": "themis"})
print(" Total numbe of scans: ", len(scans))
import pickle
data_dict = pickle.load(open("../data/cvw_2017-04-04_scans.pickle", 'rb'))
data_dict = _filter_by_time(start_time, end_time, data_dict)
import os
os.system("rm figs/cvw*")
df = sma_bbox(scans, sdur=30, idx=None, dbeam=None, window=5)
from sma import ScatterTypeDetection
rti = RangeTimePlot(110, np.unique(np.hstack(data_dict["time"])), "", num_subplots=4)
rti.addParamPlot(df, bm, "Velocity", p_max=100, p_min=-100, p_step=25, xlabel="", zparam="v", label='Velocity [m/s]')
rti.addParamPlot(df, bm, "Power", p_max=30, p_min=3, p_step=3, xlabel="", zparam="p_l", label='Power [dB]')
rti.addParamPlot(df, bm, "Spec. Width", p_max=100, p_min=0, p_step=10, xlabel="", zparam="w_l", label='Spec. Width [m/s]')
rti.addCluster(lower_range(df, -1), bm, "BSC", label_clusters=True, skill=estimate_df_skills(df, df.labels), xlabel='Time, UT')
rti.save("figs/cvw_07_sma.png")
rti.close()
sd = ScatterTypeDetection(df)
rti = RangeTimePlot(110, np.unique(np.hstack(data_dict["time"])), "", num_subplots=5)
rti.addCluster(lower_range(df, -1), bm, "BSC", label_clusters=True, skill=estimate_df_skills(df, df.labels))
rti.addGSIS(sd.run(kind=1, case=0), bm, r"GsI:[Sudden]")
rti.addGSIS(sd.run(kind=1, case=1), bm, r"GsI:[Blanchard 2006]")
rti.addGSIS(sd.run(kind=1, case=2), bm, r"GsI:[Blanchard 2009]")
sd = ScatterTypeDetection(lower_range(df, -1))
rti.addGSIS(sd.run(kind=1, case=3, mod=True), bm, r"GsI:[Chakraborty]", xlabel='Time, UT')
rti.save("figs/cvw_07_sma_is.png")
rti.close()
# In[ ]:
run = False
if run:
from sma import MiddleLatFilter
start_time = datetime.datetime(2015, 3, 17)
end_time = datetime.datetime(2015, 3, 17, 12)
rad, bm = "bks",13
fdata = FetchData( rad, [start_time, end_time] )
_, scans = fdata.fetch_data(by="scan", scan_prop={"dur": 2, "stype": "themis"})
print(" Total numbe of scans: ", len(scans))
import pickle
data_dict = pickle.load(open("../data/bks_2015-03-17_scans.pickle", 'rb'))
data_dict = _filter_by_time(start_time, end_time, data_dict)
import os
os.system("rm figs/bks*")
df = sma_bbox(scans, sdur=30, idx=None, dbeam=15, window=5)
from sma import ScatterTypeDetection
rti = RangeTimePlot(110, np.unique(np.hstack(data_dict["time"])), "", num_subplots=4)
rti.addParamPlot(df, bm, "Velocity", p_max=100, p_min=-100, p_step=25, xlabel="", zparam="v", label='Velocity [m/s]')
rti.addParamPlot(df, bm, "Power", p_max=30, p_min=3, p_step=3, xlabel="", zparam="p_l", label='Power [dB]')
rti.addParamPlot(df, bm, "Spec. Width", p_max=100, p_min=0, p_step=10, xlabel="", zparam="w_l", label='Spec. Width [m/s]')
rti.addCluster(lower_range(df, -1), bm, "BSC", label_clusters=True, skill=estimate_df_skills(df, df.labels), xlabel='Time, UT')
rti.save("figs/bks_07_sma.png")
rti.close()
sd = ScatterTypeDetection(df)
rti = RangeTimePlot(110, np.unique(np.hstack(data_dict["time"])), "", num_subplots=5)
rti.addCluster(lower_range(df, -1), bm, "BSC", label_clusters=True, skill=estimate_df_skills(df, df.labels))
rti.addGSIS(sd.run(kind=1, case=0), bm, r"GsI:[Sudden]")
rti.addGSIS(sd.run(kind=1, case=1), bm, r"GsI:[Blanchard 2006]")
rti.addGSIS(sd.run(kind=1, case=2), bm, r"GsI:[Blanchard 2009]")
sd = | |
the operator is spin conserving. The result will
be accumulated to self
Args:
coeff (complex): scalar coefficient to be multiplied to the result
idata (FqeData): input FqeData to which the operators are applied
daga (List[int]): indices corresponding to the alpha creation \
operators in the Hamiltonian
undaga (List[int]): indices corresponding to the alpha annihilation \
operators in the Hamiltonian
dagb (List[int]): indices corresponding to the beta creation \
operators in the Hamiltonian
undagb (List[int]): indices corresponding to the beta annihilation \
operators in the Hamiltonian
"""
assert len(daga) == len(undaga) and len(dagb) == len(undagb)
ualphamap = numpy.zeros((self.lena(), 3), dtype=numpy.uint64)
ubetamap = numpy.zeros((self.lenb(), 3), dtype=numpy.uint64)
acount = self._core.make_mapping_each(ualphamap, True, daga, undaga)
if acount == 0:
return
bcount = self._core.make_mapping_each(ubetamap, False, dagb, undagb)
if bcount == 0:
return
ualphamap = ualphamap[:acount, :]
ubetamap = ubetamap[:bcount, :]
alphamap = numpy.zeros((acount, 3), dtype=numpy.int64)
sourceb_vec = numpy.zeros((bcount,), dtype=numpy.int64)
targetb_vec = numpy.zeros((bcount,), dtype=numpy.int64)
parityb_vec = numpy.zeros((bcount,), dtype=numpy.int64)
alphamap[:, 0] = ualphamap[:, 0]
for i in range(acount):
alphamap[i, 1] = self._core.index_alpha(ualphamap[i, 1])
alphamap[:, 2] = 1 - 2 * ualphamap[:, 2]
sourceb_vec[:] = ubetamap[:, 0]
for i in range(bcount):
targetb_vec[i] = self._core.index_beta(ubetamap[i, 1])
parityb_vec[:] = 1 - 2 * ubetamap[:, 2]
if fqe.settings.use_accelerated_code:
_apply_individual_nbody1_accumulate(coeff, self.coeff, idata.coeff,
alphamap, targetb_vec,
sourceb_vec, parityb_vec)
else:
FqeData._apply_individual_nbody1_accumulate_python(
coeff, self.coeff, idata.coeff, alphamap, targetb_vec,
sourceb_vec, parityb_vec)
@staticmethod
def _apply_individual_nbody1_accumulate_python(
coeff: 'Nparray', ocoeff: 'Nparray', icoeff: 'Nparray',
amap: 'Nparray', btarget: 'Nparray', bsource: 'Nparray',
bparity: 'Nparray') -> None:
"""
Python version of _apply_individual_nbody1_accumulate
ported from C from fqe_data.c for compatibility
"""
for sourcea, targeta, paritya in amap:
ocoeff[targeta, btarget] += coeff * paritya * numpy.multiply(
icoeff[sourcea, bsource], bparity)
def rdm1(self, bradata: Optional['FqeData'] = None) -> Tuple['Nparray']:
"""
API for calculating 1-particle RDMs given a wave function. When bradata
is given, it calculates transition RDMs. Depending on the filling, the
code selects an optimal algorithm.
Args:
bradata (optional, FqeData): FqeData for the bra wavefunction. When \
not given, the ket function is also used for the bra wavefunction
Returns:
Tuple[Nparray]: tuple of length 1 that contains numpy array for 1RDM
"""
return self._rdm1_blocked(bradata)
def _rdm1_blocked(self,
bradata: Optional['FqeData'] = None,
max_states: int = 100) -> Tuple['Nparray']:
"""
API for calculating 1-particle RDMs given a wave function. When bradata
is given, it calculates transition RDMs. Depending on the filling, the
code selects an optimal algorithm.
"""
bradata = self if bradata is None else bradata
if fqe.settings.use_accelerated_code:
mappings = bradata._core._get_block_mappings(max_states=max_states)
norb = bradata.norb()
coeff_a = bradata.coeff
coeff_b = bradata.coeff.T.copy()
coeffconj = self.coeff.conj()
rdm = numpy.zeros((norb, norb), dtype=bradata._dtype)
for alpha_range, beta_range, alpha_maps, beta_maps in mappings:
dvec = _make_dvec_part(coeff_a, alpha_maps, alpha_range,
beta_range, norb, self.lena(),
self.lenb(), True)
dvec = _make_dvec_part(coeff_b,
beta_maps,
alpha_range,
beta_range,
norb,
self.lena(),
self.lenb(),
False,
out=dvec)
rdm[:, :] += numpy.tensordot(
dvec, coeffconj[alpha_range.start:alpha_range.
stop, beta_range.start:beta_range.stop])
return (numpy.transpose(rdm.conj()),)
else:
dvec2 = self.calculate_dvec_spatial()
return (numpy.transpose(
numpy.tensordot(dvec2.conj(), self.coeff,
axes=((2, 3), (0, 1)))),)
def rdm12(self, bradata: Optional['FqeData'] = None
) -> Tuple['Nparray', 'Nparray']:
"""
API for calculating 1- and 2-particle RDMs given a wave function.
When bradata is given, it calculates transition RDMs. Depending on the
filling, the code selects an optimal algorithm.
Args:
bradata (optional, FqeData): FqeData for the bra wavefunction. When \
not given, the ket function is also used for the bra wavefunction
Returns:
Tuple[Nparray]: tuple of length 2 that contains numpy array for 1 \
and 2RDM
"""
norb = self.norb()
nalpha = self.nalpha()
nbeta = self.nbeta()
thresh = self._low_thresh
if nalpha < norb * thresh and nbeta < norb * thresh:
graphset = FciGraphSet(2, 2)
graphset.append(self._core)
if nalpha - 2 >= 0:
graphset.append(FciGraph(nalpha - 2, nbeta, norb))
if nalpha - 1 >= 0 and nbeta - 1 >= 0:
graphset.append(FciGraph(nalpha - 1, nbeta - 1, norb))
if nbeta - 2 >= 0:
graphset.append(FciGraph(nalpha, nbeta - 2, norb))
if fqe.settings.use_accelerated_code:
return self._rdm12_lowfilling(bradata)
else:
return self._rdm12_lowfilling_python(bradata)
return self._rdm12_halffilling(bradata)
def _rdm12_halffilling(self, bradata: Optional['FqeData'] = None
) -> Tuple['Nparray', 'Nparray']:
"""
Standard code for calculating 1- and 2-particle RDMs given a
wavefunction. When bradata is given, it calculates transition RDMs.
"""
if fqe.settings.use_accelerated_code:
return self._rdm12_halffilling_blocked(bradata)
else:
dvec = self.calculate_dvec_spatial()
dvec2 = dvec if bradata is None \
else bradata.calculate_dvec_spatial()
out1 = numpy.transpose(numpy.tensordot(dvec2.conj(), self.coeff))
out2 = numpy.transpose(numpy.tensordot(
dvec2.conj(), dvec, axes=((2, 3), (2, 3))),
axes=(1, 2, 0, 3)) * (-1.0)
for i in range(self.norb()):
out2[:, i, i, :] += out1[:, :]
return out1, out2
def _rdm12_halffilling_blocked(self,
bradata: Optional['FqeData'] = None,
max_states: int = 100
) -> Tuple['Nparray', 'Nparray']:
"""
Standard code for calculating 1- and 2-particle RDMs given a
wavefunction. When bradata is given, it calculates transition RDMs.
"""
bradata = self if bradata is None else bradata
mappings = self._core._get_block_mappings(max_states=max_states)
norb = bradata.norb()
coeff_a = self.coeff
coeff_b = self.coeff.T.copy()
bcoeff_a = bradata.coeff
bcoeff_b = bradata.coeff.T.copy()
rdm1 = numpy.zeros((norb,) * 2, dtype=bradata._dtype)
rdm2 = numpy.zeros((norb,) * 4, dtype=bradata._dtype)
for alpha_range, beta_range, alpha_maps, beta_maps in mappings:
dvec = _make_dvec_part(coeff_a, alpha_maps, alpha_range, beta_range,
norb, self.lena(), self.lenb(), True)
dvec = _make_dvec_part(coeff_b,
beta_maps,
alpha_range,
beta_range,
norb,
self.lena(),
self.lenb(),
False,
out=dvec)
dvec2 = _make_dvec_part(bcoeff_a, alpha_maps,
alpha_range, beta_range, norb, self.lena(),
self.lenb(), True)
dvec2 = _make_dvec_part(bcoeff_b,
beta_maps,
alpha_range,
beta_range,
norb,
self.lena(),
self.lenb(),
False,
out=dvec2)
dvec2conj = dvec2.conj()
rdm1[:, :] += numpy.tensordot(
dvec2conj, self.coeff[alpha_range.start:alpha_range.
stop, beta_range.start:beta_range.stop])
rdm2[:, :, :, :] += \
numpy.tensordot(dvec2conj, dvec, axes=((2, 3), (2, 3)))
rdm2 = -rdm2.transpose(1, 2, 0, 3)
for i in range(self.norb()):
rdm2[:, i, i, :] += rdm1[:, :]
return (numpy.transpose(rdm1), rdm2)
def _rdm12_lowfilling_python(self, bradata: Optional['FqeData'] = None
) -> Tuple['Nparray', 'Nparray']:
"""
Low-filling specialization of the code for Calculating 1- and 2-particle
RDMs given a wave function. When bradata is given, it calculates
transition RDMs.
"""
norb = self.norb()
nalpha = self.nalpha()
nbeta = self.nbeta()
lena = self.lena()
lenb = self.lenb()
nlt = norb * (norb + 1) // 2
outpack = numpy.zeros((nlt, nlt), dtype=self.coeff.dtype)
outunpack = numpy.zeros((norb, norb, norb, norb),
dtype=self.coeff.dtype)
if nalpha - 2 >= 0:
alpha_map, _ = self._core.find_mapping(-2, 0)
def compute_intermediate0(coeff):
tmp = numpy.zeros((nlt, int(binom(norb, nalpha - 2)), lenb),
dtype=self.coeff.dtype)
for i in range(norb):
for j in range(i + 1, norb):
for source, target, parity in alpha_map[(i, j)]:
tmp[i + j * (j + 1) //
2, target, :] += coeff[source, :] * parity
return tmp
inter = compute_intermediate0(self.coeff)
inter2 = inter if bradata is None else compute_intermediate0(
bradata.coeff)
outpack += numpy.tensordot(inter2.conj(),
inter,
axes=((1, 2), (1, 2)))
if self.nalpha() - 1 >= 0 and self.nbeta() - 1 >= 0:
alpha_map, beta_map = self._core.find_mapping(-1, -1)
def compute_intermediate1(coeff):
tmp = numpy.zeros((norb, norb, int(binom(
norb, nalpha - 1)), int(binom(norb, nbeta - 1))),
dtype=self.coeff.dtype)
for i in range(norb):
for j in range(norb):
for sourcea, targeta, paritya in alpha_map[(i,)]:
paritya *= (-1)**(nalpha - 1)
for sourceb, targetb, parityb in beta_map[(j,)]:
work = coeff[sourcea,
sourceb] * paritya * parityb
tmp[i, j, targeta, targetb] += work
return tmp
inter = compute_intermediate1(self.coeff)
inter2 = inter if bradata is None else compute_intermediate1(
bradata.coeff)
outunpack += numpy.tensordot(inter2.conj(),
inter,
axes=((2, 3), (2, 3)))
if self.nbeta() - 2 >= 0:
_, beta_map = self._core.find_mapping(0, -2)
def compute_intermediate2(coeff):
tmp = numpy.zeros((nlt, lena, int(binom(norb, nbeta - 2))),
dtype=self.coeff.dtype)
for i in range(norb):
for j in range(i + 1, norb):
for source, target, parity in beta_map[(i, j)]:
tmp[i + j * (j + 1) //
2, :, target] += coeff[:, source] * parity
return tmp
inter = compute_intermediate2(self.coeff)
inter2 = inter if bradata is None else compute_intermediate2(
bradata.coeff)
outpack += numpy.tensordot(inter2.conj(),
inter,
axes=((1, 2), (1, 2)))
out = numpy.zeros_like(outunpack)
for i in range(norb):
for j in range(norb):
ij = min(i, j) + max(i, j) * (max(i, j) + 1) // 2
parityij = 1.0 if i < j else -1.0
for k in range(norb):
for l in range(norb):
parity = parityij * (1.0 if k < l else -1.0)
out[i, j, k,
l] -= outunpack[i, j, k, l] + outunpack[j, i, l, k]
mnkl, mxkl = min(k, l), max(k, l)
work = outpack[ij, mnkl + mxkl * (mxkl + 1) // 2]
out[i, j, k, l] -= work * | |
running cannot both be true', owner='nbaker')
FACTORY_TUNABLES = {'description':'\n A test that verifies if any of the users of the selected participant are\n running a specific interaction.\n ',
'participant':TunableEnumEntry(description='\n The participant of the interaction used to fetch the users against\n which the test is run.\n ',
tunable_type=ParticipantType,
default=ParticipantType.Object),
'affordances':TunableList(TunableReference(description='\n If any of the participants are running any of these affordances,\n this test will pass.\n ',
manager=(services.affordance_manager()),
class_restrictions='SuperInteraction',
pack_safe=True)),
'affordance_lists':TunableList(description='\n If any of the participants are running any of the affordances in\n these lists, this test will pass.\n ',
tunable=snippets.TunableAffordanceListReference()),
'test_for_not_running':Tunable(description='\n Changes this test to check for the opposite case, as in verifying that this interaction is not running.\n ',
tunable_type=bool,
default=False),
'all_participants_running':Tunable(description='\n Returns true only if *all* valid particpants are running a valid \n affordance.\n \n Incompatible with test for not running being true',
tunable_type=bool,
default=False),
'verify_tunable_callback':_verify_tunable_callback}
def __init__(self, *args, **kwargs):
(super().__init__)(*args, **kwargs)
self.update_all_affordances()
def update_all_affordances(self):
self.all_affordances = set(self.affordances)
for affordance_list in self.affordance_lists:
self.all_affordances.update(affordance_list)
def get_expected_args(self):
return {'test_targets': self.participant}
def matching_interaction_in_si_state(self, si_state):
return any((si.get_interaction_type() in self.all_affordances for si in si_state))
@cached_test
def __call__(self, test_targets=()):
interaction_is_running = False
for target in test_targets:
if target.is_sim:
if target.get_sim_instance(allow_hidden_flags=ALL_HIDDEN_REASONS) is None:
return TestResult(False, '{} is not an instanced object', target, tooltip=(self.tooltip))
target = target.get_sim_instance(allow_hidden_flags=ALL_HIDDEN_REASONS)
if self.matching_interaction_in_si_state(target.si_state):
interaction_is_running = True
else:
if self.all_participants_running:
return TestResult(False, 'Target sim is not running one of {} and test specifies all participants running', (self.all_affordances), tooltip=(self.tooltip))
if target.is_part:
target = target.part_owner
for user in target.get_users(sims_only=True):
if self.matching_interaction_in_si_state(user.si_state):
interaction_is_running = True
if not self.all_participants_running:
break
elif self.all_participants_running:
return TestResult(False, 'user {} is not running one of {} and test specifies all participants running', user, (self.all_affordances), tooltip=(self.tooltip))
if interaction_is_running and not self.all_participants_running:
break
if self.test_for_not_running:
if interaction_is_running:
return TestResult(False, 'User is running one of {}', (self.all_affordances), tooltip=(self.tooltip))
return TestResult.TRUE
if interaction_is_running:
return TestResult.TRUE
return TestResult(False, 'No user found running one of {}', (self.all_affordances), tooltip=(self.tooltip))
class ParticipantRunningInteractionTest(HasTunableSingletonFactory, AutoFactoryInit, event_testing.test_base.BaseTest):
FACTORY_TUNABLES = {'participant':TunableEnumEntry(description='\n The participant of the interaction to test. The test will pass if any participant\n is running any of the affordances.\n ',
tunable_type=ParticipantType,
default=ParticipantType.Actor),
'affordances':TunableList(TunableReference(description='\n The affordances to test. The test will pass if any participant is running any of \n the affordances.\n ',
manager=(services.affordance_manager()),
class_restrictions='SuperInteraction',
pack_safe=True)),
'affordance_lists':TunableList(description='\n The affordances to test. The test will pass if any participant is running any of \n the affordances.\n ',
tunable=snippets.TunableAffordanceListReference()),
'test_for_not_running':Tunable(description='\n Changes this test to check for the opposite case, as in verifying that none of these \n affordances are being run by any of the participants.',
tunable_type=bool,
default=False)}
def get_expected_args(self):
return {'test_targets': self.participant}
@cached_test
def __call__(self, test_targets=()):
all_affordances = set(self.affordances)
for affordance_list in self.affordance_lists:
all_affordances.update(affordance_list)
found_sim = False
for sim_info in test_targets:
if not sim_info.is_sim:
continue
found_sim = True
sim = sim_info.get_sim_instance()
if sim is None:
continue
for interaction in sim.si_state:
if interaction.is_finishing:
continue
if interaction.get_interaction_type() in all_affordances:
if self.test_for_not_running:
return TestResult(False, 'Sim {} is running one of {}', sim, all_affordances, tooltip=(self.tooltip))
return TestResult.TRUE
transition_controller = sim.transition_controller
if transition_controller is not None:
if transition_controller.interaction is not None and transition_controller.interaction.get_interaction_type() in all_affordances:
if self.test_for_not_running:
return TestResult(False, 'Sim {} is transitioning to one of {}', sim, all_affordances, tooltip=(self.tooltip))
return TestResult.TRUE
if not found_sim:
return TestResult(False, 'No sim found in participant type: {}', test_targets, tooltip=(self.tooltip))
if self.test_for_not_running:
return TestResult.TRUE
return TestResult(False, 'No sim was running one of {}', all_affordances, tooltip=(self.tooltip))
class AchievementEarnedFactory(TunableFactory):
@staticmethod
def factory(sim, tooltip, unlocked, achievement, negate=False):
if achievement is None:
if hasattr(unlocked, 'aspiration_type'):
return TestResult(False,
'UnlockedTest: non-achievement object {} passed to AchievementEarnedFactory.',
unlocked,
tooltip=tooltip)
return TestResult.TRUE
milestone_unlocked = sim.account.achievement_tracker.milestone_completed(achievement)
if milestone_unlocked != negate:
return TestResult.TRUE
return TestResult(False, 'UnlockedTest: Sim has not unlocked achievement {} or unexpectedly did so.', achievement, tooltip=tooltip)
FACTORY_TYPE = factory
def __init__(self, **kwargs):
(super().__init__)(description='\n This option tests for completion of a tuned Achievement.\n ',
achievement=TunableReference(description='\n The achievement we want to test.\n ',
manager=(services.get_instance_manager(sims4.resources.Types.ACHIEVEMENT))),
negate=Tunable(description='\n If enabled, we will require that the achievement is NOT unlocked.\n ',
tunable_type=bool,
default=False), **kwargs)
class AspirationEarnedFactory(TunableFactory):
@staticmethod
def factory(sim_info, tooltip, unlocked, aspiration, negate=False):
if sim_info.aspiration_tracker is None:
return TestResult(False,
'UnlockedTest: aspiration tracker not present on Sim info {}.',
sim_info,
tooltip=tooltip)
milestone_unlocked = sim_info.aspiration_tracker.milestone_completed(aspiration)
if milestone_unlocked != negate:
return TestResult.TRUE
return TestResult(False, 'UnlockedTest: Sim has not unlocked aspiration {} or unexpectedly did so.', aspiration, tooltip=tooltip)
FACTORY_TYPE = factory
def __init__(self, **kwargs):
(super().__init__)(description='\n This option tests for completion of a tuned Aspiration.\n ',
aspiration=TunableReference(description='\n The aspiration we want to test.\n ',
manager=(services.get_instance_manager(sims4.resources.Types.ASPIRATION))),
negate=Tunable(description='\n If enabled, we will require that the aspiration is NOT unlocked.\n ',
tunable_type=bool,
default=False), **kwargs)
class TestHouseholdMilestoneEarned(HasTunableSingletonFactory, AutoFactoryInit):
FACTORY_TUNABLES = {'household_milestone':TunableReference(description='\n The household milestone to check unlock status against. \n ',
pack_safe=True,
manager=services.get_instance_manager(sims4.resources.Types.HOUSEHOLD_MILESTONE)),
'negate':Tunable(description="\n If checked then this test will pass if the milestone is not\n complete otherwise it will pass if it's been earned.\n ",
tunable_type=bool,
default=False)}
def __call__(self, sim_info, tooltip, unlocked):
if self.household_milestone is None:
return TestResult(False, 'Tuned milestone on {} cannot be None.', self, tooltip=tooltip)
if sim_info.household is not None:
if sim_info.household.household_milestone_tracker is not None:
milestone_unlocked = sim_info.household.household_milestone_tracker.milestone_completed(self.household_milestone)
if milestone_unlocked == self.negate:
return TestResult(False, 'UnlockedTest: milestone ({}) has an unlocked status of ({}), which is not the required value.', (self.household_milestone), milestone_unlocked, tooltip=tooltip)
return TestResult.TRUE
class TestAspirationUnlock(HasTunableSingletonFactory, AutoFactoryInit):
FACTORY_TUNABLES = {'check_aspiration_type':OptionalTunable(description='\n If enabled then we will check the aspiration type of the aspiration\n that was just unlocked.\n ',
tunable=TunableEnumEntry(description='\n The aspiration type that we are checking if it just completed.\n ',
tunable_type=AspriationType,
default=(AspriationType.FULL_ASPIRATION))),
'check_complete_only_in_sequence':OptionalTunable(description='\n If enabled then we will check that the aspiration that was just\n unlocked has a specific value of complete only in sequence set.\n ',
tunable=Tunable(description='\n If complete only in sequence should be also be set or not.\n ',
tunable_type=bool,
default=True))}
def __call__(self, sim_info, tooltip, unlocked):
if unlocked is None:
return TestResult(False, 'UnlockedTest: No aspiration Unlocked.',
tooltip=tooltip)
aspiration_type = getattr(unlocked, 'aspiration_type', None)
if aspiration_type is None:
return TestResult(False, 'UnlockedTest: non-aspiration object {} passed to TestAspirationUnlock.',
unlocked,
tooltip=tooltip)
else:
if self.check_aspiration_type is not None:
if aspiration_type != self.check_aspiration_type:
return TestResult(False, "UnlockedTest: aspiration object {} passed in isn't of type {}.",
unlocked,
(self.check_aspiration_type),
tooltip=tooltip)
if self.check_complete_only_in_sequence is not None and unlocked.do_not_register_events_on_load != self.check_complete_only_in_sequence:
return TestResult(False, 'UnlockedTest: aspiration object {} does not have do_not_register_events_on_load equal to {}.',
unlocked,
(self.check_complete_only_in_sequence),
tooltip=tooltip)
return TestResult.TRUE
class TestAspirationsAvailable(HasTunableSingletonFactory, AutoFactoryInit):
FACTORY_TUNABLES = {'negate': Tunable(description='\n If checked then this test will pass if all aspirations are\n complete otherwise it will pass if there is a still an aspiration\n that can be unlocked.\n ',
tunable_type=bool,
default=False)}
def __call__(self, sim_info, tooltip, unlocked):
if sim_info.is_toddler_or_younger:
return TestResult(False, "Todders and below can't have primary aspirations.",
tooltip=tooltip)
aspiration_tracker = sim_info.aspiration_tracker
for aspiration_track in services.get_instance_manager(sims4.resources.Types.ASPIRATION_TRACK).types.values():
if sim_info.is_child != aspiration_track.is_child_aspiration_track:
continue
for aspiration in aspiration_track.aspirations.values():
if aspiration_tracker.milestone_completed(aspiration) or self.negate:
return TestResult(False, 'TestAspirationsAvailable: There is an aspiration {} that has not been completed.',
aspiration,
tooltip=tooltip)
return TestResult.TRUE
if self.negate:
return TestResult.TRUE
return TestResult(False, 'TestAspirationsAvailable: There are no aspirations still to unlock.',
tooltip=tooltip)
class UnlockedTest(event_testing.test_base.BaseTest):
test_events = (
TestEvent.UnlockEvent,)
USES_EVENT_DATA = True
@TunableFactory.factory_option
def unlock_type_override(allow_achievment=True):
kwargs = {}
default = 'aspiration'
kwargs['aspiration'] = AspirationEarnedFactory()
kwargs['aspiration_unlocked'] = TestAspirationUnlock.TunableFactory()
kwargs['aspirations_available'] = TestAspirationsAvailable.TunableFactory()
kwargs['household_milestone_earned'] = TestHouseholdMilestoneEarned.TunableFactory()
if allow_achievment:
default = 'achievement'
kwargs['achievement'] = AchievementEarnedFactory()
return {'unlock_to_test':TunableVariant(description='\n The unlocked aspiration, career, achievement, or milestone want to test for.\n ',
default=default, **kwargs),
'participant':TunableEnumEntry(ParticipantType, ParticipantType.Actor, description='The subject of this test.')}
def __init__(self, *, unlock_to_test, participant, **kwargs):
(super().__init__)(**kwargs)
self.unlock_to_test = unlock_to_test
self.participant = participant
def get_expected_args(self):
return {'sims':self.participant,
'unlocked':event_testing.test_constants.FROM_EVENT_DATA}
@cached_test
def __call__(self, sims=None, unlocked=None):
for sim in sims:
return self.unlock_to_test(sim, self.tooltip, unlocked)
TunableUnlockedTest = TunableSingletonFactory.create_auto_factory(UnlockedTest)
class DayTimeTest(event_testing.test_base.BaseTest):
FACTORY_TUNABLES = {'description':'\n Test to see if the current time falls within the tuned range\n and/or is on a valid day.\n ',
'days_available':OptionalTunable(scheduler_utils.TunableDayAvailability()),
'time_range':OptionalTunable(TunableTuple(description='\n The time the test is valid. If days_available is tuned and the\n time range spans across two days with the second day tuned as\n unavailable, the test will pass for that day until time range is\n invalid. Example: Time range 20:00 - 4:00, Monday is valid,\n Tuesday is invalid. Tuesday at 2:00 | |
cenrace11cats * votingage", "detailed")
geos_qs_props_dict[CC.QUERIESPROP][level] = (Fr(1,1024),Fr(1,1024),Fr(1,1024),Fr(1,1024),
Fr(1,1024),Fr(1,1024),Fr(1018,1024))
return geos_qs_props_dict
class Strategy2b_ST_CTY_B_isoTot_Ordering:
@staticmethod
def make(levels):
# levels = USGeolevelsNoTractGroup.getLevels()
us_ordering = {
CC.L2_QUERY_ORDERING: {
0: {
0: ('hispanic * cenrace11cats', 'votingage', 'hhinstlevels', 'hhgq',
'hispanic * cenrace11cats * votingage', 'detailed'),
},
},
CC.L2_CONSTRAIN_TO_QUERY_ORDERING:{
0: {
0: ('hispanic * cenrace11cats', 'votingage', 'hhinstlevels', 'hhgq',
'hispanic * cenrace11cats * votingage', 'detailed'),
},
},
CC.ROUNDER_QUERY_ORDERING: {
0: {
0: ('total', 'hhgq', 'hhgq * hispanic', 'hhgq * hispanic * cenrace', 'hhgq * votingage * hispanic * cenrace',
'detailed'),
},
},
}
st_cty_b_ordering = {
CC.L2_QUERY_ORDERING: {
0: {
0: ('total',),
1: ('hispanic * cenrace11cats', 'votingage', 'hhinstlevels', 'hhgq',
'hispanic * cenrace11cats * votingage', 'detailed'),
},
},
CC.L2_CONSTRAIN_TO_QUERY_ORDERING: {
0: {
0: ('total',),
1: ('hispanic * cenrace11cats', 'votingage', 'hhinstlevels', 'hhgq',
'hispanic * cenrace11cats * votingage', 'detailed'),
},
},
CC.ROUNDER_QUERY_ORDERING: {
0: {
0: ('total',),
1: ('hhgq', 'hhgq * hispanic', 'hhgq * hispanic * cenrace', 'hhgq * votingage * hispanic * cenrace',
'detailed'),
},
},
}
default_ordering = {
CC.L2_QUERY_ORDERING: {
0: {
0: ('total', 'hispanic * cenrace11cats', 'votingage', 'hhinstlevels', 'hhgq',
'hispanic * cenrace11cats * votingage', 'detailed'),
},
},
CC.L2_CONSTRAIN_TO_QUERY_ORDERING: {
0: {
0: ('total', 'hispanic * cenrace11cats', 'votingage', 'hhinstlevels', 'hhgq',
'hispanic * cenrace11cats * votingage', 'detailed'),
},
},
CC.ROUNDER_QUERY_ORDERING: {
0: {
0: ('total', 'hhgq', 'hhgq * hispanic', 'hhgq * hispanic * cenrace', 'hhgq * votingage * hispanic * cenrace',
'detailed'),
},
},
}
query_ordering = {}
for geolevel in levels:
if geolevel == "US":
query_ordering[geolevel] = us_ordering
elif geolevel in ("State", "County", "Block"):
query_ordering[geolevel] = st_cty_b_ordering
else:
query_ordering[geolevel] = default_ordering
return query_ordering
class Strategy2b_ST_CTY_BG_isoTot_Ordering:
@staticmethod
def make(levels):
# levels = USGeolevelsNoTractGroup.getLevels()
us_ordering = {
CC.L2_QUERY_ORDERING: {
0: {
0: ('hispanic * cenrace11cats', 'votingage', 'hhinstlevels', 'hhgq',
'hispanic * cenrace11cats * votingage', 'detailed'),
},
},
CC.L2_CONSTRAIN_TO_QUERY_ORDERING:{
0: {
0: ('hispanic * cenrace11cats', 'votingage', 'hhinstlevels', 'hhgq',
'hispanic * cenrace11cats * votingage', 'detailed'),
},
},
CC.ROUNDER_QUERY_ORDERING: {
0: {
0: ('total', 'hhgq', 'hhgq * hispanic', 'hhgq * hispanic * cenrace', 'hhgq * votingage * hispanic * cenrace',
'detailed'),
},
},
}
st_cty_bg_ordering = {
CC.L2_QUERY_ORDERING: {
0: {
0: ('total',),
1: ('hispanic * cenrace11cats', 'votingage', 'hhinstlevels', 'hhgq',
'hispanic * cenrace11cats * votingage', 'detailed'),
},
},
CC.L2_CONSTRAIN_TO_QUERY_ORDERING: {
0: {
0: ('total',),
1: ('hispanic * cenrace11cats', 'votingage', 'hhinstlevels', 'hhgq',
'hispanic * cenrace11cats * votingage', 'detailed'),
},
},
CC.ROUNDER_QUERY_ORDERING: {
0: {
0: ('total',),
1: ('hhgq', 'hhgq * hispanic', 'hhgq * hispanic * cenrace', 'hhgq * votingage * hispanic * cenrace',
'detailed'),
},
},
}
default_ordering = {
CC.L2_QUERY_ORDERING: {
0: {
0: ('total', 'hispanic * cenrace11cats', 'votingage', 'hhinstlevels', 'hhgq',
'hispanic * cenrace11cats * votingage', 'detailed'),
},
},
CC.L2_CONSTRAIN_TO_QUERY_ORDERING: {
0: {
0: ('total', 'hispanic * cenrace11cats', 'votingage', 'hhinstlevels', 'hhgq',
'hispanic * cenrace11cats * votingage', 'detailed'),
},
},
CC.ROUNDER_QUERY_ORDERING: {
0: {
0: ('total', 'hhgq', 'hhgq * hispanic', 'hhgq * hispanic * cenrace', 'hhgq * votingage * hispanic * cenrace',
'detailed'),
},
},
}
query_ordering = {}
for geolevel in levels:
if geolevel == "US":
query_ordering[geolevel] = us_ordering
elif geolevel in ("State", "County", "Block_Group"):
query_ordering[geolevel] = st_cty_bg_ordering
else:
query_ordering[geolevel] = default_ordering
return query_ordering
class Strategy2b_St_Cty_isoTot_Ordering:
@staticmethod
def make(levels):
# levels = USGeolevelsNoTractGroup.getLevels()
us_ordering = {
CC.L2_QUERY_ORDERING: {
0: {
0: ('hispanic * cenrace11cats', 'votingage', 'hhinstlevels', 'hhgq',
'hispanic * cenrace11cats * votingage', 'detailed'),
},
},
CC.L2_CONSTRAIN_TO_QUERY_ORDERING:{
0: {
0: ('hispanic * cenrace11cats', 'votingage', 'hhinstlevels', 'hhgq',
'hispanic * cenrace11cats * votingage', 'detailed'),
},
},
CC.ROUNDER_QUERY_ORDERING: {
0: {
0: ('total', 'hhgq', 'hhgq * hispanic', 'hhgq * hispanic * cenrace', 'hhgq * votingage * hispanic * cenrace',
'detailed'),
},
},
}
st_cty_ordering = {
CC.L2_QUERY_ORDERING: {
0: {
0: ('total',),
1: ('hispanic * cenrace11cats', 'votingage', 'hhinstlevels', 'hhgq',
'hispanic * cenrace11cats * votingage', 'detailed'),
},
},
CC.L2_CONSTRAIN_TO_QUERY_ORDERING: {
0: {
0: ('total',),
1: ('hispanic * cenrace11cats', 'votingage', 'hhinstlevels', 'hhgq',
'hispanic * cenrace11cats * votingage', 'detailed'),
},
},
CC.ROUNDER_QUERY_ORDERING: {
0: {
0: ('total',),
1: ('hhgq', 'hhgq * hispanic', 'hhgq * hispanic * cenrace', 'hhgq * votingage * hispanic * cenrace',
'detailed'),
},
},
}
subCounty_ordering = {
CC.L2_QUERY_ORDERING: {
0: {
0: ('total', 'hispanic * cenrace11cats', 'votingage', 'hhinstlevels', 'hhgq',
'hispanic * cenrace11cats * votingage', 'detailed'),
},
},
CC.L2_CONSTRAIN_TO_QUERY_ORDERING: {
0: {
0: ('total', 'hispanic * cenrace11cats', 'votingage', 'hhinstlevels', 'hhgq',
'hispanic * cenrace11cats * votingage', 'detailed'),
},
},
CC.ROUNDER_QUERY_ORDERING: {
0: {
0: ('total', 'hhgq', 'hhgq * hispanic', 'hhgq * hispanic * cenrace', 'hhgq * votingage * hispanic * cenrace',
'detailed'),
},
},
}
query_ordering = {}
for geolevel in levels:
if geolevel == "US":
query_ordering[geolevel] = us_ordering
elif geolevel in ("State", "County"):
query_ordering[geolevel] = st_cty_ordering
else:
query_ordering[geolevel] = subCounty_ordering
return query_ordering
class Strategy2b(PL94Strategy, USLevelStrategy):
@staticmethod
def make(levels):
strategy2b = defaultdict(lambda: defaultdict(dict))
strategy2b.update({
CC.GEODICT_GEOLEVELS: levels,
CC.DPQUERIES + "default": Strategies2.getDPQNames(),
CC.QUERIESPROP + "default": tuple(Fr(num, 1024) for num in (1,) * 6 + (1018,)),
})
for level in strategy2b[CC.GEODICT_GEOLEVELS]:
strategy2b[CC.DPQUERIES][level] = strategy2b[CC.DPQUERIES + "default"]
strategy2b[CC.QUERIESPROP][level] = strategy2b[CC.QUERIESPROP + "default"]
return strategy2b
class RedistrictingStrategiesRegularOrdering1a:
@staticmethod
def make(levels):
# levels = USGeolevelsNoTractGroup.getLevels()
ordering = {
CC.L2_QUERY_ORDERING: {
0: {
0: ('total',
'cenrace',
'hispanic',
'votingage',
'hhinstlevels',
'hhgq',
'votingage * hispanic'),
1: ('hhgq', 'hispanic * cenrace', 'votingage * cenrace', 'votingage * hispanic'),
2: ("votingage * hispanic * cenrace",),
3: ("detailed",),
},
},
CC.L2_CONSTRAIN_TO_QUERY_ORDERING: {
0: {
0: ('total', 'cenrace', 'hispanic', 'votingage', 'hhinstlevels'),
1: ('hhgq', 'hispanic * cenrace', 'votingage * cenrace', 'votingage * hispanic'),
2: ("votingage * hispanic * cenrace",),
3: ("detailed",),
},
},
CC.ROUNDER_QUERY_ORDERING: StandardRedistrictingRounderOrdering.get()
}
query_ordering = {}
for geolevel in levels:
query_ordering[geolevel] = {
CC.L2_QUERY_ORDERING: ordering[CC.L2_QUERY_ORDERING],
CC.L2_CONSTRAIN_TO_QUERY_ORDERING: ordering[CC.L2_CONSTRAIN_TO_QUERY_ORDERING],
CC.ROUNDER_QUERY_ORDERING: ordering[CC.ROUNDER_QUERY_ORDERING]
}
return query_ordering
class RedistrictingStrategiesRegularOrdering1b:
@staticmethod
def make(levels):
# levels = USGeolevelsNoTractGroup.getLevels()
ordering = {
CC.L2_QUERY_ORDERING: {
0: {
0: Strategies1.getDPQNames()
},
},
CC.L2_CONSTRAIN_TO_QUERY_ORDERING: None,
CC.ROUNDER_QUERY_ORDERING: StandardRedistrictingRounderOrdering.get()
}
query_ordering = {}
for geolevel in levels:
query_ordering[geolevel] = {
CC.L2_QUERY_ORDERING: ordering[CC.L2_QUERY_ORDERING],
CC.L2_CONSTRAIN_TO_QUERY_ORDERING: ordering[CC.L2_CONSTRAIN_TO_QUERY_ORDERING],
CC.ROUNDER_QUERY_ORDERING: ordering[CC.ROUNDER_QUERY_ORDERING]
}
return query_ordering
class RedistrictingStrategiesRegularOrdering2a:
@staticmethod
def make(levels):
# levels = USGeolevelsNoTractGroup.getLevels()
ordering = {
CC.L2_QUERY_ORDERING: {
0: {
0: Strategies2.getDPQNames()
},
},
CC.L2_CONSTRAIN_TO_QUERY_ORDERING: None,
CC.ROUNDER_QUERY_ORDERING: StandardRedistrictingRounderOrdering.get()
}
query_ordering = {}
for geolevel in levels:
query_ordering[geolevel] = {
CC.L2_QUERY_ORDERING: ordering[CC.L2_QUERY_ORDERING],
CC.L2_CONSTRAIN_TO_QUERY_ORDERING: ordering[CC.L2_CONSTRAIN_TO_QUERY_ORDERING],
CC.ROUNDER_QUERY_ORDERING: ordering[CC.ROUNDER_QUERY_ORDERING]
}
return query_ordering
class RedistrictingStrategiesRegularOrdering2b:
@staticmethod
def make(levels):
# levels = USGeolevelsNoTractGroup.getLevels()
ordering = {
CC.L2_QUERY_ORDERING: {
0: {
0: ('total', 'hispanic * cenrace11cats', 'votingage', 'hhinstlevels', 'hhgq'),
1: ('hhgq', ),
2: ("hispanic * cenrace11cats * votingage",),
3: ("detailed",),
},
},
CC.L2_CONSTRAIN_TO_QUERY_ORDERING: {
0: {
0: ('total', 'hispanic * cenrace11cats', 'votingage', 'hhinstlevels'),
1: ('hhgq',),
2: ("hispanic * cenrace11cats * votingage",),
3: ("detailed",),
},
},
CC.ROUNDER_QUERY_ORDERING: StandardRedistrictingRounderOrdering.get()
}
query_ordering = {}
for geolevel in levels:
query_ordering[geolevel] = {
CC.L2_QUERY_ORDERING: ordering[CC.L2_QUERY_ORDERING],
CC.L2_CONSTRAIN_TO_QUERY_ORDERING: ordering[CC.L2_CONSTRAIN_TO_QUERY_ORDERING],
CC.ROUNDER_QUERY_ORDERING: ordering[CC.ROUNDER_QUERY_ORDERING]
}
return query_ordering
class DetailedOnly(USLevelStrategy):
schema = CC.SCHEMA_PL94 # Can be used with any schema. This attribute is only for unit tested impact gaps. Detailed query doesn't have impact gaps in any schema.
@staticmethod
def make(levels):
test_strategy = defaultdict(lambda: defaultdict(dict)) # So as to avoid having to specify empty dicts and defaultdicts
test_strategy.update({
CC.GEODICT_GEOLEVELS: levels,
CC.DPQUERIES + "default": (
"detailed",),
CC.QUERIESPROP + "default": (1,),
})
for level in test_strategy[CC.GEODICT_GEOLEVELS]:
test_strategy[CC.DPQUERIES][level] = test_strategy[CC.DPQUERIES + "default"]
test_strategy[CC.QUERIESPROP][level] = test_strategy[CC.QUERIESPROP + "default"]
return test_strategy
class DetailedOnlyQueryOrderingOuterPass:
@staticmethod
def make(levels):
ordering = {
CC.L2_QUERY_ORDERING: {
0: {
0: ("detailed",),
},
},
CC.L2_CONSTRAIN_TO_QUERY_ORDERING: {
0: {
0: ("detailed",),
},
},
CC.ROUNDER_QUERY_ORDERING: {
0: {
0: ("detailed",),
},
}
}
query_ordering = {}
for geolevel in levels:
query_ordering[geolevel] = {
CC.L2_QUERY_ORDERING: ordering[CC.L2_QUERY_ORDERING],
CC.L2_CONSTRAIN_TO_QUERY_ORDERING: ordering[CC.L2_CONSTRAIN_TO_QUERY_ORDERING],
CC.ROUNDER_QUERY_ORDERING: ordering[CC.ROUNDER_QUERY_ORDERING]
}
return query_ordering
class DetailedOnlyQueryOrdering:
@staticmethod
def make(levels):
ordering = {
CC.L2_QUERY_ORDERING: {0: ("detailed",)},
CC.L2_CONSTRAIN_TO_QUERY_ORDERING: None,
CC.ROUNDER_QUERY_ORDERING: {0: ("detailed",)},
}
query_ordering = {}
for geolevel in levels:
query_ordering[geolevel] = {
CC.L2_QUERY_ORDERING: ordering[CC.L2_QUERY_ORDERING],
CC.L2_CONSTRAIN_TO_QUERY_ORDERING: ordering[CC.L2_CONSTRAIN_TO_QUERY_ORDERING],
CC.ROUNDER_QUERY_ORDERING: ordering[CC.ROUNDER_QUERY_ORDERING]
}
return query_ordering
class StrategySelector:
strategies = {
'strategy1a': Strategy1a,
'strategy1b': Strategy1b,
'strategy2a': Strategy2a,
'strategy2b': Strategy2b,
'test_strategy': TestStrategy,
'DetailedOnly': DetailedOnly,
'Strategy1a_St_Cty_isoTot' : Strategy1a_St_Cty_isoTot,
'Strategy1b_St_Cty_isoTot' : Strategy1b_St_Cty_isoTot,
'Strategy2a_St_Cty_isoTot' : Strategy2a_St_Cty_isoTot,
'Strategy2b_St_Cty_isoTot' : Strategy2b_St_Cty_isoTot,
'Strategy1b_St_Cty_BG_optSpine_ppmfCandidate' : Strategy1b_St_Cty_BG_optSpine_ppmfCandidate,
'ProductionCandidate20210517US' : ProductionCandidate20210517US,
'ProductionCandidate20210527US_mult05' : ProductionCandidate20210527US_mult05,
'ProductionCandidate20210527US_mult1' : ProductionCandidate20210527US_mult1,
'ProductionCandidate20210527US_mult2' : ProductionCandidate20210527US_mult2,
'ProductionCandidate20210527US_mult4' : ProductionCandidate20210527US_mult4,
'ProductionCandidate20210527US_mult8' : ProductionCandidate20210527US_mult8,
'ProductionCandidate20210527US_mult8_add005_dsepJune3' : ProductionCandidate20210527US_mult8_add005_dsepJune3,
'ProductionCandidate20210527US_mult8_add01_dsepJune3' : ProductionCandidate20210527US_mult8_add01_dsepJune3,
'ProductionCandidate20210527US_mult8_add02_dsepJune3' : ProductionCandidate20210527US_mult8_add02_dsepJune3,
'ProductionCandidate20210527US_mult8_add03_dsepJune3' : ProductionCandidate20210527US_mult8_add03_dsepJune3,
'ProductionCandidate20210527US_mult8_add04_dsepJune3' : ProductionCandidate20210527US_mult8_add04_dsepJune3,
| |
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import numpy as np
import torch
def to_tensor(data):
"""
Convert numpy array to PyTorch tensor. For complex arrays, the real and imaginary parts
are stacked along the last dimension.
Args:
data (np.array): Input numpy array
Returns:
torch.Tensor: PyTorch version of data
"""
if np.iscomplexobj(data):
data = np.stack((data.real, data.imag), axis=-1)
return torch.from_numpy(data)
def to_numpy(data):
"""
Convert PyTorch tensor to numpy array. For complex tensor with two channels, the complex numpy arrays are used.
Args:
data (torch.Tensor): Input torch tensor
Returns:
np.array numpy arrays
"""
if data.shape[-1] == 2:
out = np.zeros(data.shape[:-1], dtype=np.complex64)
real = data[..., 0].numpy()
imag = data[..., 1].numpy()
out.real = real
out.imag = imag
else:
out = data.numpy()
return out
def apply_mask(data, mask_func, seed=None):
"""
Subsample given k-space by multiplying with a mask.
Args:
data (torch.Tensor): The input k-space data. This should have at least 3 dimensions, where
dimensions -3 and -2 are the spatial dimensions, and the final dimension has size
2 (for complex values).
mask_func (callable): A function that takes a shape (tuple of ints) and a random
number seed and returns a mask.
seed (int or 1-d array_like, optional): Seed for the random number generator.
Returns:
(tuple): tuple containing:
masked data (torch.Tensor): Subsampled k-space data
mask (torch.Tensor): The generated mask
"""
shape = np.array(data.shape)
shape[:-3] = 1
mask = mask_func(shape, seed)
return data * mask, mask
def fft2(data, normalized=True):
"""
Apply centered 2 dimensional Fast Fourier Transform.
Args:
data (torch.Tensor): Complex valued input data containing at least 3 dimensions: dimensions
-3 & -2 are spatial dimensions and dimension -1 has size 2. All other dimensions are
assumed to be batch dimensions.
Returns:
torch.Tensor: The FFT of the input.
"""
assert data.size(-1) == 2
data = ifftshift(data, dim=(-3, -2))
data = torch.fft(data, 2, normalized=normalized)
data = fftshift(data, dim=(-3, -2))
return data
def rfft2(data):
"""
Apply centered 2 dimensional Fast Fourier Transform.
Args:
data (torch.Tensor): Complex valued input data containing at least 3 dimensions: dimensions
-3 & -2 are spatial dimensions and dimension -1 has size 2. All other dimensions are
assumed to be batch dimensions.
Returns:
torch.Tensor: The FFT of the input.
"""
data = ifftshift(data, dim=(-2, -1))
data = torch.rfft(data, 2, normalized=True, onesided=False)
data = fftshift(data, dim=(-3, -2))
return data
def ifft2(data, normalized=True):
"""
Apply centered 2-dimensional Inverse Fast Fourier Transform.
Args:
data (torch.Tensor): Complex valued input data containing at least 3 dimensions: dimensions
-3 & -2 are spatial dimensions and dimension -1 has size 2. All other dimensions are
assumed to be batch dimensions.
Returns:
torch.Tensor: The IFFT of the input.
"""
assert data.size(-1) == 2
data = ifftshift(data, dim=(-3, -2))
data = torch.ifft(data, 2, normalized=normalized)
data = fftshift(data, dim=(-3, -2))
return data
def irfft2(data):
"""
Apply centered 2-dimensional Inverse Fast Fourier Transform.
Args:
data (torch.Tensor): Complex valued input data containing at least 3 dimensions: dimensions
-3 & -2 are spatial dimensions and dimension -1 has size 2. All other dimensions are
assumed to be batch dimensions.
Returns:
torch.Tensor: The IFFT of the input.
"""
data = ifftshift(data, dim=(-3, -2))
data = torch.irfft(data, 2, normalized=True, onesided=False)
data = fftshift(data, dim=(-2, -1))
return data
def complex_to_mag_phase(data):
"""
:param data (torch.Tensor): A complex valued tensor, where the size of the third last dimension should be 2
:return: Mag and Phase (torch.Tensor): tensor of same size as input
"""
assert data.size(-3) == 2
mag = (data ** 2).sum(dim=-3).sqrt()
phase = torch.atan2(data[:, 1, :, :], data[:, 0, :, :])
return torch.stack((mag, phase), dim=-3)
def mag_phase_to_complex(data):
"""
:param data (torch.Tensor): Mag and Phase (torch.Tensor):
:return: A complex valued tensor, where the size of the third last dimension is 2
"""
assert data.size(-3) == 2
real = data[:, 0, :, :] * torch.cos(data[:, 1, :, :])
imag = data[:, 0, :, :] * torch.sin(data[:, 1, :, :])
return torch.stack((real, imag), dim=-3)
def partial_fourier(data):
"""
:param data:
:return:
"""
def complex_abs(data):
"""
Compute the absolute value of a complex valued input tensor.
Args:
data (torch.Tensor): A complex valued tensor, where the size of the final dimension
should be 2.
Returns:
torch.Tensor: Absolute value of data
"""
assert data.size(-1) == 2 or data.size(-3) == 2
return (data ** 2).sum(dim=-1).sqrt() if data.size(-1) == 2 else (data ** 2).sum(dim=-3).sqrt()
def root_sum_of_squares(data, dim=0):
"""
Compute the Root Sum of Squares (RSS) transform along a given dimension of a tensor.
Args:
data (torch.Tensor): The input tensor
dim (int): The dimensions along which to apply the RSS transform
Returns:
torch.Tensor: The RSS value
"""
return torch.sqrt((data ** 2).sum(dim))
def center_crop(data, shape):
"""
Apply a center crop to the input real image or batch of real images.
Args:
data (torch.Tensor): The input tensor to be center cropped. It should have at
least 2 dimensions and the cropping is applied along the last two dimensions.
shape (int, int): The output shape. The shape should be smaller than the
corresponding dimensions of data.
Returns:
torch.Tensor: The center cropped image
"""
assert 0 < shape[0] <= data.shape[-2]
assert 0 < shape[1] <= data.shape[-1]
w_from = (data.shape[-2] - shape[0]) // 2
h_from = (data.shape[-1] - shape[1]) // 2
w_to = w_from + shape[0]
h_to = h_from + shape[1]
return data[..., w_from:w_to, h_from:h_to]
def complex_center_crop(data, shape):
"""
Apply a center crop to the input image or batch of complex images.
Args:
data (torch.Tensor): The complex input tensor to be center cropped. It should
have at least 3 dimensions and the cropping is applied along dimensions
-3 and -2 and the last dimensions should have a size of 2.
shape (int, int): The output shape. The shape should be smaller than the
corresponding dimensions of data.
Returns:
torch.Tensor: The center cropped image
"""
assert 0 < shape[0] <= data.shape[-3]
assert 0 < shape[1] <= data.shape[-2]
w_from = (data.shape[-3] - shape[0]) // 2
h_from = (data.shape[-2] - shape[1]) // 2
w_to = w_from + shape[0]
h_to = h_from + shape[1]
return data[..., w_from:w_to, h_from:h_to, :]
def normalize(data, mean, stddev, eps=0.):
"""
Normalize the given tensor using:
(data - mean) / (stddev + eps)
Args:
data (torch.Tensor): Input data to be normalized
mean (float): Mean value
stddev (float): Standard deviation
eps (float): Added to stddev to prevent dividing by zero
Returns:
torch.Tensor: Normalized tensor
"""
return (data - mean) / (stddev + eps)
def normalize_instance(data, eps=0.):
"""
Normalize the given tensor using:
(data - mean) / (stddev + eps)
where mean and stddev are computed from the data itself.
Args:
data (torch.Tensor): Input data to be normalized
eps (float): Added to stddev to prevent dividing by zero
Returns:
torch.Tensor: Normalized tensor
"""
mean = data.mean()
std = data.std()
return normalize(data, mean, std, eps), mean, std
def normalize_volume(data, mean, std, eps=0.):
"""
Normalize the given tensor using:
(data - mean) / (stddev + eps)
where mean and stddev are provided and computed from volume.
Args:
data (torch.Tensor): Input data to be normalized
mean: mean of whole volume
std: std of whole volume
eps (float): Added to stddev to prevent dividing by zero
Returns:
torch.Tensor: Normalized tensor
"""
return normalize(data, mean, std, eps), mean, std
def normalize_complex(data, eps=0.):
"""
Normalize the given complex tensor using:
(data - mean) / (stddev + eps)
where mean and stddev are computed from magnitude of data.
Note that data is centered by complex mean so that the result centered data have average zero magnitude.
Args:
data (torch.Tensor): Input data to be normalized (*, 2)
mean: mean of image magnitude
std: std of image magnitude
eps (float): Added to stddev to prevent dividing by zero
Returns:
torch.Tensor: Normalized complex tensor with 2 channels (*, 2)
"""
mag = complex_abs(data)
mag_mean = mag.mean()
mag_std = mag.std()
temp = mag_mean/mag
mean_real = data[..., 0] * temp
mean_imag = data[..., 1] * temp
mean_complex = torch.stack((mean_real, mean_imag), dim=-1)
stddev = mag_std
return (data - mean_complex) / (stddev + eps), mag_mean, | |
<filename>sedenoss/sedenoss/models.py
from torchtools.optim import Ranger
from torchtools.nn import Mish
import pytorch_lightning as pl
from sedenoss.utils import *
from sedenoss.loss import SI_SDR_Loss
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
EPS = 1e-8
class Conv2d(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True):
"""Custom Conv2d module. Refer to nn.Conv2d documentation
"""
super(Conv2d, self).__init__(in_channels, out_channels, kernel_size, stride,
padding, dilation, groups, bias)
def forward(self, x):
weight = self.weight
print(weight.size())
weight_mean = weight.mean(dim=1, keepdim=True).mean(dim=2,
keepdim=True).mean(dim=3, keepdim=True)
weight = weight - weight_mean
std = weight.view(weight.size(0), -1).std(dim=1).view(-1, 1, 1, 1) + 1e-5
weight = weight / std.expand_as(weight)
return F.conv2d(x, weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
class Conv1d(nn.Conv1d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True):
"""Custom Conv1d module. Refer to nn.Conv1d documentation
"""
super(Conv1d, self).__init__(in_channels, out_channels, kernel_size, stride,
padding, dilation, groups, bias)
def forward(self, x):
weight = self.weight
weight_mean = weight.mean(dim=1, keepdim=True).mean(dim=2,
keepdim=True)
weight = weight - weight_mean
std = weight.view(weight.size(0), -1).std(dim=1).view(-1, 1, 1) + 1e-5
weight = weight / std.expand_as(weight)
return F.conv1d(x, weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
# Based on https://github.com/ShiZiqiang/dual-path-RNNs-DPRNNs-based-speech-separation implementation
class Encoder(nn.Module):
"""Estimation of the nonnegative mixture weight by a 1-D conv layer.
Args:
mixture: [B, T], B is batch size, T is #samples
Returns:
mixture_w: [B, N, L], where L = (T-W)/(W/2)+1 = 2T/W-1
L is the number of time steps
"""
def __init__(self, W=2, N=64):
super(Encoder, self).__init__()
# Hyper-parameter
self.W, self.N = W, N
# Components
# 50% overlap
self.conv1d_U = nn.Sequential(nn.Conv1d(1, N, kernel_size=W, stride=W // 2, bias=False),
Mish(),
)
self.conv1d_U = nn.Conv1d(1, N, kernel_size=W, stride=W // 2, bias=False)
self.mish = Mish()
def forward(self, mixture):
mixture = torch.unsqueeze(mixture, 1) # [B, 1, T]
mixture_w = self.mish(self.conv1d_U(mixture)) # [B, N, L]
return mixture_w
class Decoder(nn.Module):
"""
Decoder module.
Args:
mixture_w: [B, E, L]
est_mask: [B, C, E, L]
Returns:
est_source: [B, C, T]
"""
def __init__(self, E, W):
super(Decoder, self).__init__()
# Hyper-parameter
self.E, self.W = E, W
# Components
self.basis_signals = nn.Linear(E, W, bias=False)
def forward(self, mixture_w, est_mask):
source_w = torch.unsqueeze(mixture_w, 1) * est_mask # [B, C, E, L]
source_w = torch.transpose(source_w, 2, 3) # [B, C, L, E]
est_source = self.basis_signals(source_w) # [B, C, L, W]
est_source = overlap_and_add(est_source, self.W // 2) # B x C x T
return est_source
class SingleRNN(nn.Module):
"""
Container module for a single RNN layer.
args:
rnn_type: string, select from 'RNN', 'LSTM' and 'GRU'.
input_size: int, dimension of the input feature. The input should have shape
(batch, seq_len, input_size).
hidden_size: int, dimension of the hidden state.
dropout: float, dropout ratio. Default is 0.
bidirectional: bool, whether the RNN layers are bidirectional. Default is False.
"""
def __init__(self, rnn_type, input_size, hidden_size, dropout=0.2, bidirectional=False):
super(SingleRNN, self).__init__()
self.rnn_type = rnn_type
self.input_size = input_size
self.hidden_size = hidden_size
self.num_direction = int(bidirectional) + 1
self.rnn = getattr(nn, rnn_type)(input_size, hidden_size, 1, dropout=dropout, batch_first=True,
bidirectional=bidirectional)
# linear projection layer
self.proj = nn.Linear(hidden_size * self.num_direction, input_size)
def forward(self, input):
# input shape: batch, seq, dim
output = input
rnn_output, hidden = self.rnn(output)
rnn_output = self.proj(rnn_output.contiguous().view(-1, rnn_output.shape[2])).view(output.shape)
return rnn_output
# dual-path RNN
class DPRNN(nn.Module):
"""
Deep duaL-path RNN.
args:
rnn_type: string, select from 'RNN', 'LSTM' and 'GRU'.
input_size: int, dimension of the input feature. The input should have shape
(batch, seq_len, input_size).
hidden_size: int, dimension of the hidden state.
output_size: int, dimension of the output size.
dropout: float, dropout ratio. Default is 0.
num_layers: int, number of stacked RNN layers. Default is 1.
bidirectional: bool, whether the RNN layers are bidirectional. Default is False.
"""
def __init__(self, rnn_type, input_size, hidden_size, output_size,
dropout=0, num_layers=1, bidirectional=True):
super(DPRNN, self).__init__()
self.input_size = input_size
self.output_size = output_size
self.hidden_size = hidden_size
# dual-path RNN
self.row_rnn = nn.ModuleList([])
self.col_rnn = nn.ModuleList([])
self.row_norm = nn.ModuleList([])
self.col_norm = nn.ModuleList([])
for i in range(num_layers):
self.row_rnn.append(SingleRNN(rnn_type, input_size, hidden_size, dropout,
bidirectional=True)) # intra-segment RNN is always noncausal
self.col_rnn.append(SingleRNN(rnn_type, input_size, hidden_size, dropout, bidirectional=bidirectional))
self.row_norm.append(nn.GroupNorm(1, input_size, eps=1e-8))
# default is to use noncausal LayerNorm for inter-chunk RNN. For causal setting change it to causal normalization techniques accordingly.
self.col_norm.append(nn.GroupNorm(1, input_size, eps=1e-8))
# output layer
self.output = nn.Sequential(Mish(),
nn.Conv2d(input_size, output_size, 1))
def forward(self, input):
# input shape: batch, N, dim1, dim2
# apply RNN on dim1 first and then dim2
# output shape: B, output_size, dim1, dim2
batch_size, _, dim1, dim2 = input.shape
output = input
for i in range(len(self.row_rnn)):
row_input = output.permute(0, 3, 2, 1).contiguous().view(batch_size * dim2, dim1, -1) # B*dim2, dim1, N
# row_input = self.attn(row_input.permute(0,2,1)).view(batch_size * dim2, dim1, -1)
row_output = self.row_rnn[i](row_input) # B*dim2, dim1, H
row_output = row_output.view(batch_size, dim2, dim1, -1).permute(0, 3, 2,
1).contiguous() # B, N, dim1, dim2
row_output = self.row_norm[i](row_output)
output = output + row_output
col_input = output.permute(0, 2, 3, 1).contiguous().view(batch_size * dim1, dim2, -1) # B*dim1, dim2, N
# col_input = self.attn(col_input.permute(1,2,0)).view(batch_size * dim1, dim2, -1)
col_output = self.col_rnn[i](col_input) # B*dim1, dim2, H
col_output = col_output.view(batch_size, dim1, dim2, -1).permute(0, 3, 1,
2).contiguous() # B, N, dim1, dim2
col_output = self.col_norm[i](col_output)
output = output + col_output
output = self.output(output) # B, output_size, dim1, dim2
return output
# base module for deep DPRNN
class DPRNN_base(nn.Module):
def __init__(self, input_dim, feature_dim, hidden_dim, num_spk=2,
layer=4, segment_size=100, bidirectional=True, rnn_type='LSTM'):
"""DPRNN base module.
Args:
input_dim: int
feature_dim: int
hidden_dim: int
num_spk: int, refers to number of speakers
layer: int, refers to number of layers,
segment_size: int,
bidirectional: bool,
rnn_type: str, e.g. 'LSTM'
"""
super(DPRNN_base, self).__init__()
self.input_dim = input_dim
self.feature_dim = feature_dim
self.hidden_dim = hidden_dim
self.layer = layer
self.segment_size = segment_size
self.num_spk = num_spk
self.eps = 1e-8
# bottleneck
self.BN = nn.Conv1d(self.input_dim, self.feature_dim, 1, bias=False)
# DPRNN model
self.DPRNN = DPRNN(rnn_type, self.feature_dim, self.hidden_dim,
self.feature_dim * self.num_spk,
num_layers=layer, bidirectional=bidirectional)
def pad_segment(self, input, segment_size):
# input is the features: (B, N, T)
batch_size, dim, seq_len = input.shape
segment_stride = segment_size // 2
rest = segment_size - (segment_stride + seq_len % segment_size) % segment_size
if rest > 0:
pad = Variable(torch.zeros(batch_size, dim, rest)).type(input.type())
input = torch.cat([input, pad], 2)
pad_aux = Variable(torch.zeros(batch_size, dim, segment_stride)).type(input.type())
input = torch.cat([pad_aux, input, pad_aux], 2)
return input, rest
def split_feature(self, input, segment_size):
# split the feature into chunks of segment size
# input is the features: (B, N, T)
input, rest = self.pad_segment(input, segment_size)
batch_size, dim, seq_len = input.shape
segment_stride = segment_size // 2
segments1 = input[:, :, :-segment_stride].contiguous().view(batch_size, dim, -1, segment_size)
segments2 = input[:, :, segment_stride:].contiguous().view(batch_size, dim, -1, segment_size)
segments = torch.cat([segments1, segments2], 3).view(batch_size, dim, -1, segment_size).transpose(2, 3)
return segments.contiguous(), rest
def merge_feature(self, input, rest):
# merge the splitted features into full utterance
# input is the features: (B, N, L, K)
batch_size, dim, segment_size, _ = input.shape
segment_stride = segment_size // 2
input = input.transpose(2, 3).contiguous().view(batch_size, dim, -1, segment_size * 2) # B, N, K, L
input1 = input[:, :, :, :segment_size].contiguous().view(batch_size, dim, -1)[:, :, segment_stride:]
input2 = input[:, :, :, segment_size:].contiguous().view(batch_size, dim, -1)[:, :, :-segment_stride]
output = input1 + input2
if rest > 0:
output = output[:, :, :-rest]
return output.contiguous() # B, N, T
def forward(self, input):
pass
# DPRNN for beamforming filter estimation
class BF_module(DPRNN_base):
"""Beamforming module
"""
def __init__(self, *args, **kwargs):
super(BF_module, self).__init__(*args, **kwargs)
# gated output layer
self.output = nn.Sequential(nn.Conv1d(self.feature_dim, self.feature_dim, 1),
nn.Tanh()
)
self.output_gate = nn.Sequential(nn.Conv1d(self.feature_dim, self.feature_dim, 1),
nn.Sigmoid()
)
def forward(self, input):
# input = input.to(device)
# input: (B, E, T)
batch_size, E, seq_length = input.shape
enc_feature = self.BN(input) # (B, E, L)-->(B, N, L)
# split the encoder output into overlapped, longer segments
enc_segments, enc_rest = self.split_feature(enc_feature, self.segment_size) # B, N, L, K: L is the segment_size
# print('enc_segments.shape {}'.format(enc_segments.shape))
# pass to DPRNN
output = self.DPRNN(enc_segments).view(batch_size * self.num_spk, self.feature_dim, self.segment_size,
-1) # B*nspk, N, L, K
# overlap-and-add of the outputs
output = self.merge_feature(output, enc_rest) # B*nspk, N, T
# output = self.attn(output)
# gated output layer for filter generation
bf_filter = self.output(output) * self.output_gate(output) # B*nspk, K, T
bf_filter = bf_filter.transpose(1, 2).contiguous().view(batch_size, self.num_spk, -1,
self.feature_dim) # B, nspk, T, N
return bf_filter
class FaSNet_base(pl.LightningModule):
"""Model module used for the study
Args:
enc_dim: int, Encoder dimensions
feature_dim: int, Feature dimensions
hidden_dim: int, Hidden dimensions
layer: int, number of layers to use
segment_size: int, |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.