ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a535d8a613aa9c558a4b7afc28071ba8a2ea246 | import ast
import json
import os
import time
from pathlib import Path
from typing import Optional
import pandas as pd
from gensim import corpora
from gensim.models import LdaModel
from txtai.embeddings import Embeddings
def get_raw_data(datadir: str) -> Optional[pd.DataFrame]:
return get_data(get_raw_data_path(datadir))
def get_raw_data_path(datadir: str) -> Path:
return get_data_path(datadir, "0_raw", "data.csv", True)
def get_data_path(
dir: str,
stage: Optional[str] = None,
filename: Optional[str] = None,
file_exists: bool = False,
) -> Path:
path = Path(dir)
if not path.is_dir():
raise FileNotFoundError(f"Data directory not found: {path}")
if not stage:
return path
path = path.joinpath(Path(stage))
if not path.is_dir():
raise FileNotFoundError(f"Directory not found: {path}")
if not filename:
return path
path = path.joinpath(Path(filename))
if file_exists and not path.is_file():
raise FileNotFoundError(f"File not found: {path}")
return path
def get_data(filename: Path, kwargs: dict = {}) -> Optional[pd.DataFrame]:
try:
return pd.read_csv(filename, **kwargs) # type: ignore
except FileNotFoundError:
return None
def get_clean_data(datadir: str) -> Optional[pd.DataFrame]:
return get_data(
get_clean_data_path(datadir), dict(converters=dict(tags=ast.literal_eval))
)
def get_clean_data_path(datadir: str) -> Path:
return get_data_path(datadir, "1_interim", "cleaned.csv")
def get_scraped_data_path(datadir: str) -> Path:
return get_data_path(datadir, "0_external", "scraped.json")
def get_extracted_data(datadir: str) -> Optional[pd.DataFrame]:
return get_data(get_extracted_data_path(datadir))
def get_extracted_data_path(datadir: str) -> Path:
return get_data_path(datadir, "1_interim", "extracted.csv")
def get_raw_inventory_data_path(datadir: str) -> Path:
return get_data_path(datadir, "0_raw", "inventory.xlsx")
def get_inventory_data_path(datadir: str) -> Path:
return get_data_path(datadir, "1_interim", "inventory.txt")
def get_transformed_data(datadir: str) -> Optional[pd.DataFrame]:
return get_data(
get_transformed_data_path(datadir),
dict(
converters=dict(
tags=ast.literal_eval, url=ast.literal_eval, entities=ast.literal_eval
)
),
)
def get_transformed_data_path(datadir: str) -> Path:
return get_data_path(datadir, "1_interim", "transformed.csv")
def get_descriptions_data_path(datadir: str) -> Path:
return get_data_path(datadir, "1_interim", "descriptions.txt")
def get_text_corpus(datadir: str) -> list[list[str]]:
return load_data(get_text_corpus_path(datadir))
def get_text_corpus_path(datadir: str) -> Path:
return get_data_path(datadir, "1_interim", "corpus_text.json")
def get_dict_corpus(datadir: str) -> corpora.Dictionary:
return corpora.Dictionary.load_from_text(get_dict_corpus_path(datadir))
def get_dict_corpus_path(datadir: str) -> Path:
return get_data_path(datadir, "1_interim", "corpus_dict.txt")
def get_bow_corpus(datadir: str) -> list[list[tuple[int, int]]]:
return load_data(get_bow_corpus_path(datadir))
def get_bow_corpus_path(datadir: str) -> Path:
return get_data_path(datadir, "1_interim", "corpus_bow.json")
def get_model(datadir: str, suffix: str) -> LdaModel:
return LdaModel.load(get_model_path(datadir, suffix).as_posix())
def get_model_path(datadir: str, suffix: str) -> Path:
return get_data_path(datadir, "2_final", f"topic_model_{suffix}")
def get_model_id2word(datadir: str, suffix: str) -> corpora.Dictionary:
return corpora.Dictionary.load(
get_model_path(datadir, f"{suffix}.id2word").as_posix()
)
def get_final_data(datadir: str, suffix: str) -> Optional[pd.DataFrame]:
return get_data(
get_final_data_path(datadir, suffix),
dict(
converters=dict(
tags=ast.literal_eval, url=ast.literal_eval, entities=ast.literal_eval
)
),
)
def get_final_data_path(datadir: str, suffix: str) -> Path:
return get_data_path(datadir, "2_final", f"data_{suffix}.csv")
def list_final_data(datadir: str):
final_data = [
(os.path.getmtime(p), p.stem, lastModified(p))
for p in get_data_path(datadir, "2_final").glob("*.csv")
]
final_data.sort(key=lambda x: x[0], reverse=True)
return [f"{f[1]}, {f[2]}" for f in final_data]
def dump_data(filepath: Path, data):
with open(filepath, "w") as f:
json.dump(data, f, indent=2)
def load_data(filepath: Path):
with open(filepath, "r") as f:
return json.load(f)
def lastModified(filepath: Path) -> str:
modified = os.path.getmtime(filepath)
return (
f"last modified: {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(modified))}"
)
def get_embeddings_path(datadir: str) -> Path:
return get_data_path(datadir, "2_final", "embeddings")
|
py | 1a535dcc39a9f7e87780f3bc9b6c0c69c26859a4 | from django.conf.urls import url
from . import views
app_name = 'prospecting'
urlpatterns = [
# ex: /prospecting/
url(r'^$', views.IndexView.as_view(), name='index'),
# ex: /prospecting/5/
url(r'^(?P<pk>[0-9]+)/$', views.DetailView.as_view(), name='detail'),
# ex: /prospecting/5/results/
url(r'^(?P<pk>[0-9]+)/results/$', views.ResultsView.as_view(), name='results'),
# ex:
url(r'^dashboard/$', views.dashboard, name='dashboard'),
# ex: /prospecting/5/prospect_views/
url(r'^(?P<account_id>[0-9]+)/prospect_view/$', views.ProspectViewView, name='prospect_view'),
# page for adding a new account
url(r'^new_account/$', views.new_account, name='new_account'),
# page for adding a new prospect
url(r'^new_prospect/(?P<account_id>\d+)/$', views.new_prospect, name='new_prospect'),
# editing account information view
url(r'^edit_account/(?P<account_id>\d+)/$', views.edit_account, name='edit_account'),
# editing prospect information view
url(r'^edit_prospect/(?P<prospect_id>\d+)/$', views.edit_prospect, name='edit_prospect'),
]
|
py | 1a535f4ba090178e02163a38994ad2a7eee9d2df | from NekoGram import Neko, Bot
import pytest
neko = Neko(bot=Bot(token='0:0', validate_token=False), validate_text_names=False)
@pytest.mark.asyncio
async def test_build_response():
raw_json = '{"x": {"text": "hello"} }'
neko.add_texts(texts=raw_json, lang='en')
data = await neko.build_text(text='x', user='en')
assert data.data.text == 'hello'
|
py | 1a535fe5d0899378534bb6c39c0a5a528ce59d04 | # Code listing #15
""" Module A (a.py) - Provides string processing functions """
# Note - This is the rewritten version of a_text.py so called a_text2.py
def ntimes(string, char):
""" Return number of times character 'char'
occurs in string """
return string.count(char)
def common(string1, string2):
""" Return common words across strings1 1 & 2 """
s1 = set(string1.lower().split())
s2 = set(string2.lower().split())
return s1.intersection(s2)
# 文字列バージョン
def common_words(text1, text2):
""" Return common words across text1 and text2 """
# A text is a collection of strings split using newlines
strings1 = text1.split("\n")
strings2 = text2.split("\n")
common_w = []
for string1 in strings1:
for string2 in strings2:
common_w += common(string1, string2)
return list(set(common_w))
|
py | 1a53609b0b7adb45f2bf549d54f7657ee1e1c846 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-03-18 18:00
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
import taggit.managers
class Migration(migrations.Migration):
dependencies = [
('taggit', '0002_auto_20150616_2121'),
('v1', '0150_add_excluded_updates_to_home_page'),
]
operations = [
migrations.CreateModel(
name='PortalCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('heading', models.CharField(blank=True, max_length=255)),
('heading_es', models.CharField(blank=True, max_length=255)),
],
options={
'verbose_name_plural': 'portal categories',
},
),
migrations.CreateModel(
name='PortalCategoryTag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content_object', modelcluster.fields.ParentalKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='tagged_portal_category', to='v1.PortalCategory')),
('tag', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='v1_portalcategorytag_items', to='taggit.Tag')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='PortalTopic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('heading', models.CharField(blank=True, max_length=255)),
('heading_es', models.CharField(blank=True, max_length=255)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='PortalTopicTag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content_object', modelcluster.fields.ParentalKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='tagged_portal_topic', to='v1.PortalTopic')),
('tag', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='v1_portaltopictag_items', to='taggit.Tag')),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='portaltopic',
name='tags',
field=taggit.managers.TaggableManager(blank=True, help_text='Tags are used to identify and organize portal topic pages.', through='v1.PortalTopicTag', to='taggit.Tag', verbose_name='Tags'),
),
migrations.AddField(
model_name='portalcategory',
name='tags',
field=taggit.managers.TaggableManager(blank=True, help_text='Tags are used to identify and organize portal see-all pages.', through='v1.PortalCategoryTag', to='taggit.Tag', verbose_name='Tags'),
),
]
|
py | 1a5361bf343941e8e356a422828528def494456d | import torch
import torch.nn as nn
from torch.nn import init
from torchvision import models
from torch.autograd import Variable
import pretrainedmodels
from ghost_net import ghost_net
from torchreid.models import resnet
######################################################################
def weights_init_kaiming(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') # For old pytorch, you may use kaiming_normal.
elif classname.find('Linear') != -1:
init.kaiming_normal_(m.weight.data, a=0, mode='fan_out')
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm1d') != -1:
init.normal_(m.weight.data, 1.0, 0.02)
init.constant_(m.bias.data, 0.0)
def weights_init_classifier(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
init.normal_(m.weight.data, std=0.001)
init.constant_(m.bias.data, 0.0)
# Defines the new fc layer and classification layer
# |--Linear--|--bn--|--relu--|--Linear--|
class ClassBlock(nn.Module):
def __init__(self, input_dim, class_num, droprate, relu=False, bnorm=True, num_bottleneck=512, linear=True, return_f = False):
super(ClassBlock, self).__init__()
self.return_f = return_f
add_block = []
if linear:
add_block += [nn.Linear(input_dim, num_bottleneck)]
else:
num_bottleneck = input_dim
if bnorm:
add_block += [nn.BatchNorm1d(num_bottleneck)]
if relu:
add_block += [nn.LeakyReLU(0.1)]
if droprate>0:
add_block += [nn.Dropout(p=droprate)]
add_block = nn.Sequential(*add_block)
add_block.apply(weights_init_kaiming)
classifier = []
classifier += [nn.Linear(num_bottleneck, class_num)]
classifier = nn.Sequential(*classifier)
classifier.apply(weights_init_classifier)
self.add_block = add_block
self.classifier = classifier
def forward(self, x):
x = self.add_block(x)
if self.return_f:
f = x
x = self.classifier(x)
return x,f
else:
x = self.classifier(x)
return x
# Define the ResNet50-based Model
class ft_net(nn.Module):
def __init__(self, class_num, droprate=0.5, stride=2):
super(ft_net, self).__init__()
model_ft = models.resnet50(pretrained=True)
# avg pooling to global pooling
if stride == 1:
model_ft.layer4[0].downsample[0].stride = (1,1)
model_ft.layer4[0].conv2.stride = (1,1)
model_ft.avgpool = nn.AdaptiveAvgPool2d((1,1))
self.model = model_ft
self.classifier = ClassBlock(2048, class_num, droprate)
def forward(self, x):
x = self.model.conv1(x)
x = self.model.bn1(x)
x = self.model.relu(x)
x = self.model.maxpool(x)
x = self.model.layer1(x)
x = self.model.layer2(x)
x = self.model.layer3(x)
x = self.model.layer4(x)
x = self.model.avgpool(x)
x = x.view(x.size(0), x.size(1))
x = self.classifier(x)
return x
# Define the DenseNet121-based Model
class ft_net_dense(nn.Module):
def __init__(self, class_num, droprate=0.5):
super().__init__()
model_ft = models.densenet121(pretrained=True)
model_ft.features.avgpool = nn.AdaptiveAvgPool2d((1,1))
model_ft.fc = nn.Sequential()
self.model = model_ft
# For DenseNet, the feature dim is 1024
self.classifier = ClassBlock(1024, class_num, droprate)
def forward(self, x):
x = self.model.features(x)
x = x.view(x.size(0), x.size(1))
x = self.classifier(x)
return x
# Define the NAS-based Model
class ft_net_NAS(nn.Module):
def __init__(self, class_num, droprate=0.5):
super().__init__()
model_name = 'nasnetalarge'
# pip install pretrainedmodels
model_ft = pretrainedmodels.__dict__[model_name](num_classes=1000, pretrained='imagenet')
model_ft.avg_pool = nn.AdaptiveAvgPool2d((1,1))
model_ft.dropout = nn.Sequential()
model_ft.last_linear = nn.Sequential()
self.model = model_ft
# For DenseNet, the feature dim is 4032
self.classifier = ClassBlock(4032, class_num, droprate)
def forward(self, x):
x = self.model.features(x)
x = self.model.avg_pool(x)
x = x.view(x.size(0), x.size(1))
x = self.classifier(x)
return x
# Define the ResNet50-based Model (Middle-Concat)
# In the spirit of "The Devil is in the Middle: Exploiting Mid-level Representations for Cross-Domain Instance Matching." Yu, Qian, et al. arXiv:1711.08106 (2017).
class ft_net_middle(nn.Module):
def __init__(self, class_num, droprate=0.5):
super(ft_net_middle, self).__init__()
model_ft = models.resnet50(pretrained=True)
# avg pooling to global pooling
model_ft.avgpool = nn.AdaptiveAvgPool2d((1,1))
self.model = model_ft
self.classifier = ClassBlock(2048+1024, class_num, droprate)
def forward(self, x):
x = self.model.conv1(x)
x = self.model.bn1(x)
x = self.model.relu(x)
x = self.model.maxpool(x)
x = self.model.layer1(x)
x = self.model.layer2(x)
x = self.model.layer3(x)
# x0 n*1024*1*1
x0 = self.model.avgpool(x)
x = self.model.layer4(x)
# x1 n*2048*1*1
x1 = self.model.avgpool(x)
x = torch.cat((x0,x1),1)
x = x.view(x.size(0), x.size(1))
x = self.classifier(x)
return x
# Part Model proposed in Yifan Sun etal. (2018)
class PCB(nn.Module):
def __init__(self, class_num ):
super(PCB, self).__init__()
self.part = 6 # We cut the pool5 to 6 parts
model_ft = models.resnet50(pretrained=True)
self.model = model_ft
self.avgpool = nn.AdaptiveAvgPool2d((self.part,1))
self.dropout = nn.Dropout(p=0.5)
# remove the final downsample
self.model.layer4[0].downsample[0].stride = (1,1)
self.model.layer4[0].conv2.stride = (1,1)
# define 6 classifiers
for i in range(self.part):
name = 'classifier'+str(i)
setattr(self, name, ClassBlock(2048, class_num, droprate=0.5, relu=False, bnorm=True, num_bottleneck=256))
def forward(self, x):
x = self.model.conv1(x)
x = self.model.bn1(x)
x = self.model.relu(x)
x = self.model.maxpool(x)
x = self.model.layer1(x)
x = self.model.layer2(x)
x = self.model.layer3(x)
x = self.model.layer4(x)
x = self.avgpool(x)
x = self.dropout(x)
part = {}
predict = {}
# get six part feature batchsize*2048*6
for i in range(self.part):
part[i] = torch.squeeze(x[:,:,i])
name = 'classifier'+str(i)
c = getattr(self,name)
predict[i] = c(part[i])
# sum prediction
#y = predict[0]
#for i in range(self.part-1):
# y += predict[i+1]
y = []
for i in range(self.part):
y.append(predict[i])
return y
class PCB_test(nn.Module):
def __init__(self,model):
super(PCB_test,self).__init__()
self.part = 6
self.model = model.model
self.avgpool = nn.AdaptiveAvgPool2d((self.part,1))
# remove the final downsample
self.model.layer4[0].downsample[0].stride = (1,1)
self.model.layer4[0].conv2.stride = (1,1)
def forward(self, x):
x = self.model.conv1(x)
x = self.model.bn1(x)
x = self.model.relu(x)
x = self.model.maxpool(x)
x = self.model.layer1(x)
x = self.model.layer2(x)
x = self.model.layer3(x)
x = self.model.layer4(x)
x = self.avgpool(x)
y = x.view(x.size(0),x.size(1),x.size(2))
return y
# Define the ghost_net-based Model
class GhostNet(nn.Module):
def __init__(self, class_num, droprate=0.5, stride=2):
super(GhostNet, self).__init__()
model_gh = ghost_net(width_mult=1.0)
model_gh.avgpool = nn.AdaptiveAvgPool2d((1,1))
#model_gh.fc = nn.Sequential()
self.model = model_gh
self.classifier = ClassBlock(960, class_num, droprate)
def forward(self, x):
x = self.model.features(x)
x = self.model.squeeze(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
'''
# debug model structure
# Run this code with:
python model.py
'''
if __name__ == '__main__':
# Here I left a simple forward function.
# Test the model, before you train it.
net = GhostNet(751, stride=1)
net.classifier = nn.Sequential()
print(net)
input = Variable(torch.FloatTensor(8, 3, 256, 128))
output = net(input)
print('net output size:')
print(output.shape)
|
py | 1a5363b17c59da74c461150b2dfd54005fa43416 | # Copyright 2013 Nebula Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Object client"""
import logging
from openstackclient.common import utils
LOG = logging.getLogger(__name__)
DEFAULT_OBJECT_API_VERSION = '1'
API_VERSION_OPTION = 'os_object_api_version'
API_NAME = 'object'
API_VERSIONS = {
'1': 'openstackclient.object.client.ObjectClientv1',
}
def make_client(instance):
"""Returns an object service client."""
object_client = utils.get_client_class(
API_NAME,
instance._api_version[API_NAME],
API_VERSIONS)
if instance._url:
endpoint = instance._url
else:
endpoint = instance.get_endpoint_for_service_type(API_NAME)
LOG.debug('instantiating object client')
client = object_client(
endpoint=endpoint,
token=instance._token,
)
return client
def build_option_parser(parser):
"""Hook to add global options"""
parser.add_argument(
'--os-object-api-version',
metavar='<object-api-version>',
default=utils.env(
'OS_OBJECT_API_VERSION',
default=DEFAULT_OBJECT_API_VERSION),
help='Object API version, default=' +
DEFAULT_OBJECT_API_VERSION +
' (Env: OS_OBJECT_API_VERSION)')
return parser
class ObjectClientv1(object):
def __init__(
self,
endpoint_type='publicURL',
endpoint=None,
token=None,
):
self.endpoint_type = endpoint_type
self.endpoint = endpoint
self.token = token
|
py | 1a5363b7b22c56254abdbcbad9f97ac18bebe3ca | # Generated by Django 3.0.8 on 2020-08-06 07:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('patientInformation', '0004_auto_20200806_0052'),
]
operations = [
migrations.AlterField(
model_name='patientinformation',
name='slug',
field=models.SlugField(blank=True, null=True, unique=True),
),
]
|
py | 1a5363c3a9af5658afb42df1795e6c1eb108a891 | import re
import sys
RE = re.compile(r'-?\d+')
ingredients = []
for line in open(sys.argv[1]).readlines():
ingredients.append([int(v) for v in RE.findall(line)])
def score(amounts):
negative = False
product = 1
for i in range(len(ingredients[0])-1):
iscore = sum([amounts[j]*ingredients[j][i] for j in range(len(ingredients))])
product *= abs(iscore)
if iscore < 0:
negative = True
return -product if negative else product
maxscore = 0
iter = 0
for a in range(101):
for b in range(101):
for c in range(101):
for d in range(101):
iter += 1
#if iter % 1000000 == 0:
# print(iter, maxscore)
if a+b+c+d != 100:
continue
maxscore = max(score([a,b,c,d]), maxscore)
print(maxscore)
|
py | 1a53669b950c580e78c03df2a4bbd4d427c42c4b | from .test_sorting import *
from .test_graph import *
from .test_dp import *
from .util import *
|
py | 1a5366fa87007a79506d9b457d1d97d1702acd41 | from django.http import HttpResponse
from django.test import RequestFactory, TestCase
from test.utils import encode_jwt
class TestDecorators(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.private_key = b"""-----BEGIN RSA PRIVATE KEY-----
MIICWwIBAAKBgQDdlatRjRjogo3WojgGHFHYLugdUWAY9iR3fy4arWNA1KoS8kVw
33cJibXr8bvwUAUparCwlvdbH6dvEOfou0/gCFQsHUfQrSDv+MuSUMAe8jzKE4qW
+jK+xQU9a03GUnKHkkle+Q0pX/g6jXZ7r1/xAK5Do2kQ+X5xK9cipRgEKwIDAQAB
AoGAD+onAtVye4ic7VR7V50DF9bOnwRwNXrARcDhq9LWNRrRGElESYYTQ6EbatXS
3MCyjjX2eMhu/aF5YhXBwkppwxg+EOmXeh+MzL7Zh284OuPbkglAaGhV9bb6/5Cp
uGb1esyPbYW+Ty2PC0GSZfIXkXs76jXAu9TOBvD0ybc2YlkCQQDywg2R/7t3Q2OE
2+yo382CLJdrlSLVROWKwb4tb2PjhY4XAwV8d1vy0RenxTB+K5Mu57uVSTHtrMK0
GAtFr833AkEA6avx20OHo61Yela/4k5kQDtjEf1N0LfI+BcWZtxsS3jDM3i1Hp0K
Su5rsCPb8acJo5RO26gGVrfAsDcIXKC+bQJAZZ2XIpsitLyPpuiMOvBbzPavd4gY
6Z8KWrfYzJoI/Q9FuBo6rKwl4BFoToD7WIUS+hpkagwWiz+6zLoX1dbOZwJACmH5
fSSjAkLRi54PKJ8TFUeOP15h9sQzydI8zJU+upvDEKZsZc/UhT/SySDOxQ4G/523
Y0sz/OZtSWcol/UMgQJALesy++GdvoIDLfJX5GBQpuFgFenRiRDabxrE9MNUZ2aP
FaFp+DyAe+b4nDwuJaW2LURbr8AEZga7oQj0uYxcYw==
-----END RSA PRIVATE KEY-----"""
self.public_key = b"""-----BEGIN PUBLIC KEY-----
MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDdlatRjRjogo3WojgGHFHYLugd
UWAY9iR3fy4arWNA1KoS8kVw33cJibXr8bvwUAUparCwlvdbH6dvEOfou0/gCFQs
HUfQrSDv+MuSUMAe8jzKE4qW+jK+xQU9a03GUnKHkkle+Q0pX/g6jXZ7r1/xAK5D
o2kQ+X5xK9cipRgEKwIDAQAB
-----END PUBLIC KEY-----"""
self.settings = {
'audience': 'audience',
'issuer': 'issuer',
}
from ridi.django_jwt.config import configure
configure(
key=self.public_key,
audience='audience',
issuer='issuer',
algorithm='RS256',
)
def test_jwt_required(self):
from ridi.django_jwt.decorators import jwt_required
@jwt_required()
def view(request):
return HttpResponse('view')
encoded_jwt = encode_jwt({
'aud': self.settings['audience'],
'iss': self.settings['issuer'],
}, self.private_key)
request = self.factory.get('/', HTTP_AUTHORIZATION='Bearer ' + encoded_jwt)
response = view(request)
self.assertEqual(response.status_code, 200)
def test_jwt_required_mixin(self):
from ridi.django_jwt.decorators import JWTRequiredMixin
from django.views.generic import View
class IndexView(JWTRequiredMixin, View):
def get(self, request):
return HttpResponse()
encoded_jwt = encode_jwt({
'aud': self.settings['audience'],
'iss': self.settings['issuer'],
}, self.private_key)
request = self.factory.get('/', HTTP_AUTHORIZATION='Bearer ' + encoded_jwt)
response = IndexView.as_view()(request)
self.assertEqual(response.status_code, 200)
|
py | 1a5367005df674a7d5c76634c7b931103172d987 | import numpy as np
from openea.modules.finding.alignment import greedy_alignment
def valid(embeds1, embeds2, mapping, top_k, threads_num, metric='inner', normalize=False, csls_k=0, accurate=False):
if mapping is None:
_, hits1_12, mr_12, mrr_12 = greedy_alignment(embeds1, embeds2, top_k, threads_num,
metric, normalize, csls_k, accurate)
else:
test_embeds1_mapped = np.matmul(embeds1, mapping)
_, hits1_12, mr_12, mrr_12 = greedy_alignment(test_embeds1_mapped, embeds2, top_k, threads_num,
metric, normalize, csls_k, accurate)
return hits1_12, mrr_12
def test(embeds1, embeds2, mapping, top_k, threads_num, metric='inner', normalize=False, csls_k=0, accurate=True):
if mapping is None:
alignment_rest_12, hits1_12, mr_12, mrr_12 = greedy_alignment(embeds1, embeds2, top_k, threads_num,
metric, normalize, csls_k, accurate)
else:
test_embeds1_mapped = np.matmul(embeds1, mapping)
alignment_rest_12, hits1_12, mr_12, mrr_12 = greedy_alignment(test_embeds1_mapped, embeds2, top_k, threads_num,
metric, normalize, csls_k, accurate)
return alignment_rest_12, hits1_12, mrr_12
def early_stop(flag1, flag2, flag):
if flag <= flag2 <= flag1:
print("\n == should early stop == \n")
return flag2, flag, True
else:
return flag2, flag, False
|
py | 1a53670b62c352782a14825c6b5fb07b1b9caab8 | import pickle
import sys, os.path
parent_dir = os.path.dirname(os.path.dirname(__file__))
sys.path.insert(0, parent_dir)
from blinker._utilities import symbol
def test_symbols():
foo = symbol('foo')
assert foo.name == 'foo'
assert foo is symbol('foo')
bar = symbol('bar')
assert foo is not bar
assert foo != bar
assert not foo == bar
assert repr(foo) == 'foo'
def test_pickled_symbols():
foo = symbol('foo')
for protocol in 0, 1, 2:
roundtrip = pickle.loads(pickle.dumps(foo))
assert roundtrip is foo
|
py | 1a5367acb37b31d07d9c17d118220fbc944376dd | import os
import sys
import glob
import numpy as np
import torch
import logging
import argparse
import torch.nn as nn
import genotypes
import torch.utils
import torchvision.datasets as dset
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from thop import profile
sys.path.insert(0, '../darts-minus')
from src import utils
from src.eval.model import NetworkCIFAR as Network
parser = argparse.ArgumentParser("cifar")
parser.add_argument('--data', type=str, default='../data', help='location of the data corpus')
parser.add_argument('--batch_size', type=int, default=96, help='batch size')
parser.add_argument('--report_freq', type=float, default=50, help='report frequency')
parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
parser.add_argument('--init_channels', type=int, default=36, help='num of init channels')
parser.add_argument('--layers', type=int, default=20, help='total number of layers')
parser.add_argument('--model_path', type=str, default='EXP/model.pt', help='path of pretrained model')
parser.add_argument('--auxiliary', action='store_true', default=False, help='use auxiliary tower')
parser.add_argument('--cutout', action='store_true', default=False, help='use cutout')
parser.add_argument('--cutout_length', type=int, default=16, help='cutout length')
parser.add_argument('--drop_path_prob', type=float, default=0.2, help='drop path probability')
parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument('--arch', type=str, default='DARTS_MINUS_CIFAR10_S5_Best', help='which architecture to use')
args = parser.parse_args()
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
CIFAR_CLASSES = 10
def main():
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
np.random.seed(args.seed)
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled=True
torch.cuda.manual_seed(args.seed)
logging.info('gpu device = %d' % args.gpu)
logging.info("args = %s", args)
genotype = eval("genotypes.%s" % args.arch)
model = Network(args.init_channels, CIFAR_CLASSES, args.layers, args.auxiliary, genotype)
model.drop_path_prob = args.drop_path_prob * 0 / args.epochs
flops, params = profile(model, inputs=(torch.randn(1, 3, 32, 32),), verbose=False)
logging.info('flops = %fM', flops / 1e6)
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
model = model.cuda()
utils.load(model, args.model_path)
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
_, test_transform = utils._data_transforms_cifar10(args)
test_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=test_transform)
test_queue = torch.utils.data.DataLoader(
test_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2)
model.drop_path_prob = args.drop_path_prob
with torch.no_grad():
test_acc, test_obj = infer(test_queue, model, criterion)
logging.info('test_acc %f', test_acc)
def infer(test_queue, model, criterion):
objs = utils.AverageMeter()
top1 = utils.AverageMeter()
top5 = utils.AverageMeter()
model.eval()
for step, (input, target) in enumerate(test_queue):
input = Variable(input).cuda()
target = Variable(target).cuda(async=True)
logits, _ = model(input)
loss = criterion(logits, target)
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.item(), n)
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
if step % args.report_freq == 0:
logging.info('test %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
return top1.avg, objs.avg
if __name__ == '__main__':
main()
|
py | 1a53698be3186e2e02d4c37de3b7f71987c12916 | from collections import defaultdict
from typing import List
import config
def number_game(numbers: List[str], max_turns: int = 2020) -> int:
"""Simulate the number game.
Args:
numbers (List[str]): starting numbers for the game
Returns:
int: the 2020th number spoken
"""
last_turns = defaultdict(list)
times_spoken = defaultdict(int)
numbers = numbers.split(',')
starting_turn = len(numbers) + 1
for turn, number in enumerate(numbers, start=1):
last_number = int(number)
last_turns[last_number].append(turn)
times_spoken[last_number] += 1
for turn in range(starting_turn, max_turns + 1):
if times_spoken[last_number] == 1:
last_number = 0
else:
last_number = (
last_turns[last_number][-1] - last_turns[last_number][-2])
last_turns[last_number].append(turn)
times_spoken[last_number] += 1
return last_number
def main() -> None:
"""Simulate the number game described in day 15."""
# Part A
test_answer = 436
file = config.TestFile(test_answer)
test = number_game(file.contents[0])
file.test(test)
# Part B
file.answer = 175594
test = number_game(file.contents[0], max_turns=30000000)
file.test(test)
# Part A
file = config.File()
result = number_game(file.contents[0])
config.log_part_info('A', result)
# Part B
result = number_game(file.contents[0], max_turns=30000000)
config.log_part_info('B', result)
if __name__ == '__main__':
main()
|
py | 1a536ae18097c7be351a70ae8e7dc2b0c2910b87 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .client import RemarketingActionServiceClient
__all__ = ("RemarketingActionServiceClient",)
|
py | 1a536c6e94b0ace7ea684582eb196b03a9b5227a | """
Utils function.
"""
import sys
import os
import logging
from glob import glob
def add_pyspark_path_if_needed():
"""Add PySpark to the library path based on the value of SPARK_HOME if
pyspark is not already in our path"""
try:
from pyspark import context
except ImportError:
# We need to add PySpark, try findspark if we can but it has an
# undeclared IPython dep.
try:
import findspark
findspark.init()
except ImportError:
add_pyspark_path()
def add_pyspark_path():
"""Add PySpark to the library path based on the value of SPARK_HOME."""
try:
spark_home = os.environ['SPARK_HOME']
sys.path.append(os.path.join(spark_home, 'python'))
py4j_src_zip = glob(os.path.join(spark_home, 'python',
'lib', 'py4j-*-src.zip'))
if len(py4j_src_zip) == 0:
raise ValueError('py4j source archive not found in %s'
% os.path.join(spark_home, 'python', 'lib'))
else:
py4j_src_zip = sorted(py4j_src_zip)[::-1]
sys.path.append(py4j_src_zip[0])
except KeyError:
print("""SPARK_HOME was not set. please set it. e.g.
SPARK_HOME='/home/...' ./bin/pyspark [program]""")
exit(-1)
def quiet_py4j():
logger = logging.getLogger('py4j')
logger.setLevel(logging.INFO)
|
py | 1a536d3d52fe1dcc693cb07b16ae08797f45c21c | from django.urls import path
from . import views
urlpatterns = [
path('home',views.index,name="index"),
path('',views.landing,name="landing"),
path('register',views.register,name="register"),
path('logout',views.logout,name="logout"),
path('profile/edit', views.profile, name="profile"),
path('profile/<int:id>/', views.myprofile, name="myprofile"),
path('search', views.SearchResultsView.as_view(),name="search")
]
|
py | 1a536d580a614528a24362b1c2f0b6044829314a | from .a2c import A2C
from pprint import pprint |
py | 1a536e87a1c98e1a9b98230dd99dc13a433df6f7 | # coding=utf-8
import datetime
import os
import gym
import numpy
import matplotlib.pyplot as plt
import pandas
from dateutil import relativedelta
from gym import spaces
class FxEnv(gym.Env):
metadata = {'render.modes': ['human', 'ohlc_array']}
def __init__(self):
# 定数
self.STAY = 0
self.BUY = 1
self.SELL = 2
self.CLOSE = 3
# 対象となる通貨ペアの最大値
self.MAX_VALUE = 2
# 初期の口座資金
self.initial_balance = 10000
# CSVファイルのパス配列(最低4ヶ月分を昇順で)
self.csv_file_paths = []
now = datetime.datetime.now()
for _ in range(4):
now = now - relativedelta.relativedelta(months=1)
filename = 'DAT_MT_EURUSD_M1_{}.csv'.format(now.strftime('%Y%m'))
if not os.path.exists(filename):
print('ファイルが存在していません。下記からダウンロードしてください。', filename)
print('http://www.histdata.com/download-free-forex-historical-data/?/metatrader/1-minute-bar-quotes/EURUSD/')
else:
self.csv_file_paths.append(filename)
# スプレッド
self.spread = 0.5
# Point(1pipsの値)
self.point = 0.0001
# 利食いpips
self.take_profit_pips = 30
# 損切りpips
self.stop_loss_pips = 15
# ロット数
self.lots = 0.01
# ロット数
self.lot_base = 100000
# 0~3のアクション。定数に詳細は記載している
self.action_space = gym.spaces.Discrete(4)
# 観測できる足数
self.visible_bar = 32
# 1分足、5分足、30分足、4時間足の5時系列データを足数分作る
self._reset()
self.observation_space = spaces.Box(low=0, high=self.MAX_VALUE, shape=numpy.shape(self.make_obs('ohlc_array')))
self.m5 = []
self.m30 = []
self.h4 = []
def _reset(self):
self.info = AccountInformation(self.initial_balance)
# CSVを読み込む
self.data = pandas.DataFrame()
for path in self.csv_file_paths:
csv = pandas.read_csv(path,
names=['date', 'time', 'open', 'high', 'low', 'close', 'v'],
parse_dates={'datetime': ['date', 'time']},
)
csv.index = csv['datetime']
csv = csv.drop('datetime', axis=1)
csv = csv.drop('v', axis=1)
self.data = self.data.append(csv)
# 最後に読んだCSVのインデックスを開始インデックスとする
self.read_index = len(self.data) - len(csv)
# そこから開始位置をランダムにずらす(5日分(7220分)は残す)
# self.read_index += numpy.random.randint(0, (len(csv) - 7220))
# チケット一覧
self.tickets = []
return self.make_obs('ohlc_array')
def _step(self, action):
current_data = self.data.iloc[self.read_index]
ask = current_data['close'] + self.spread * self.point
bid = current_data['close'] - self.spread * self.point
if action == self.STAY:
for ticket in self.tickets:
if ticket.order_type == self.BUY:
if bid > ticket.take_profit:
# 買いチケットを利確
profit = (ticket.take_profit - ticket.open_price) * ticket.lots * self.lot_base
self.info.balance += profit
self.info.total_pips_buy += profit
elif bid < ticket.stop_loss:
# 買いチケットを損切り
profit = (ticket.stop_loss - ticket.open_price) * ticket.lots * self.lot_base
self.info.balance += profit
self.info.total_pips_buy += profit
elif ticket.order_type == self.SELL:
if ask < ticket.take_profit:
# 売りチケットを利確
profit = (ticket.open_price - ticket.take_profit) * ticket.lots * self.lot_base
self.info.balance += profit
self.info.total_pips_sell += profit
elif bid < ticket.stop_loss:
# 売りチケットを損切り
profit = (ticket.open_price - ticket.stop_loss) * ticket.lots * self.lot_base
self.info.balance += profit
self.info.total_pips_sell += profit
elif action == self.BUY:
ticket = Ticket(self.BUY, ask, ask + self.take_profit_pips * self.point,
ask - self.stop_loss_pips * self.point, self.lots)
self.tickets.append(ticket)
pass
elif action == self.SELL:
ticket = Ticket(self.SELL, bid, bid - self.take_profit_pips * self.point,
bid + self.stop_loss_pips * self.point, self.lots)
self.tickets.append(ticket)
pass
elif action == self.CLOSE:
for ticket in self.tickets:
if ticket.order_type == self.BUY:
# 買いチケットをクローズ
profit = (bid - ticket.open_price) * ticket.lots * self.lot_base
self.info.balance += profit
self.info.total_pips_buy += profit
elif ticket.order_type == self.SELL:
# 売りチケットをクローズ
profit = (ticket.open_price - ask) * ticket.lots * self.lot_base
self.info.balance += profit
self.info.total_pips_sell += profit
# done = self.info.balance <= 0 or self.read_index >= len(self.data)
# reward = 0.00001 * (self.info.total_pips_buy + self.info.total_pips_sell)
done = self.info.balance > self.info.fixed_balance * 2
reward = self.info.balance
# インデックスをインクリメント
self.read_index += 1
# obs, reward, done, infoを返す
return self.make_obs('ohlc_array'), reward, done, self.info
def _render(self, mode='human', close=False):
return self.make_obs(mode)
def make_obs(self, mode):
"""
1分足、5分足、30分足、4時間足の4時系列データを64本分作成する
:return:
"""
target = self.data.iloc[self.read_index - 60 * 4 * 70: self.read_index]
if mode == 'human':
m1 = numpy.array(target.iloc[-1 * self.visible_bar:][target.columns])
m5 = numpy.array(target.resample('5min').agg({'open': 'first',
'high': 'max',
'low': 'min',
'close': 'last'}).dropna().iloc[-1 * self.visible_bar:][target.columns])
m30 = numpy.array(target.resample('30min').agg({'open': 'first',
'high': 'max',
'low': 'min',
'close': 'last'}).dropna().iloc[-1 * self.visible_bar:][target.columns])
h4 = numpy.array(target.resample('4H').agg({'open': 'first',
'high': 'max',
'low': 'min',
'close': 'last'}).dropna().iloc[-1 * self.visible_bar:][target.columns])
return numpy.array([m1, m5, m30, h4])
# humanの場合はmatplotlibでチャートのimgを作成する?
# fig = plt.figure(figsize=(10, 4))
# # ローソク足は全横幅の太さが1である。表示する足数で割ってさらにその1/3の太さにする
# width = 1.0 / 64 / 3
# # 1分足
# ax = plt.subplot(2, 2, 1)
# # y軸のオフセット表示を無効にする。
# ax.get_yaxis().get_major_formatter().set_useOffset(False)
# data = target.iloc[-1 * self.visible_bar:].values
# mpf.candlestick_ohlc(ax, data, width=width, colorup='g', colordown='r')
# # 5分足
# ax = plt.subplot(2, 2, 2)
# ax.get_yaxis().get_major_formatter().set_useOffset(False)
# data = target['close'].resample('5min').ohlc().dropna().iloc[-1 * self.visible_bar:].values
# mpf.candlestick_ohlc(ax, data, width=width, colorup='g', colordown='r')
# # 30分足
# ax = plt.subplot(2, 2, 3)
# ax.get_yaxis().get_major_formatter().set_useOffset(False)
# data = target['close'].resample('30min').ohlc().dropna().iloc[-1 * self.visible_bar:].values
# mpf.candlestick_ohlc(ax, data, width=width, colorup='g', colordown='r')
# # 4時間足
# ax = plt.subplot(2, 2, 4)
# ax.get_yaxis().get_major_formatter().set_useOffset(False)
# data = target['close'].resample('4H').ohlc().dropna().iloc[-1 * self.visible_bar:].values
# mpf.candlestick_ohlc(ax, data, width=width, colorup='g', colordown='r')
# return fig.canvas.buffer_rgba()
elif mode == 'ohlc_array':
m1 = numpy.array(target.iloc[-1 * self.visible_bar:][target.columns])
m5 = numpy.array(target.resample('5min').agg({'open': 'first',
'high': 'max',
'low': 'min',
'close': 'last'}).dropna().iloc[-1 * self.visible_bar:][target.columns])
m30 = numpy.array(target.resample('30min').agg({'open': 'first',
'high': 'max',
'low': 'min',
'close': 'last'}).dropna().iloc[-1 * self.visible_bar:][target.columns])
h4 = numpy.array(target.resample('4H').agg({'open': 'first',
'high': 'max',
'low': 'min',
'close': 'last'}).dropna().iloc[-1 * self.visible_bar:][target.columns])
return numpy.array([m1, m5, m30, h4])
class AccountInformation(object):
"""
口座情報クラス
"""
def __init__(self, initial_balance):
# 口座資金(含み益含む)
self.balance = initial_balance
# 口座資金
self.fixed_balance = initial_balance
# 総獲得pips(買い)
self.total_pips_buy = 0
# 総獲得pips(売り)
self.total_pips_sell = 0
def items(self):
'''
rl\core.py line 172 で呼び出される
:return: 口座情報
'''
return [('balance', self.balance), ('fixed_balance', self.fixed_balance), ('total_pips_buy', self.total_pips_buy), ('total_pips_sell', self.total_pips_sell)]
class Ticket(object):
"""
チケット
"""
def __init__(self, order_type, open_price, take_profit, stop_loss, lots):
# タイプ
self.order_type = order_type
# 約定価格
self.open_price = open_price
# 利食い価格
self.take_profit = take_profit
# 損切り価格
self.stop_loss = stop_loss
# ロット
self.lots = lots
|
py | 1a536eab82d37bf7d48d470e7e6f3f313eed35a6 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
import tushare_easy
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [
'arrow',
'unipath',
'pandas',
'lxml',
'tushare',
]
setup_requirements = [
'lxml',
'pandas',
]
test_requirements = [
'lxml',
'pandas',
'flake8',
'tox',
]
setup(
name='tushare_easy',
version=tushare_easy.__version__,
description='make tushare easyer',
long_description=readme + '\n\n' + history,
author='yingnn',
author_email='[email protected]',
url='https://github.com/yingnn/tushare_easy',
packages=find_packages(include=['tushare_easy']),
include_package_data=True,
install_requires=requirements,
license="MIT license",
zip_safe=False,
keywords='tushare_easy, tushare',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
],
test_suite='tests',
tests_require=test_requirements,
setup_requires=setup_requirements,
scripts = ['scripts/tushare_easy'],
)
|
py | 1a536ffba68d08b0e4401a28e73cb39363cbdd6f | import unittest
from esdlvalidator.validation.tests import get_test_schema_data, get_test_schema_id, get_test_dataset_ameland, get_test_dataset_hybrid
from esdlvalidator.validation.validator_esdl import EsdlValidator
class TestValidator(unittest.TestCase):
"""Tests for the validator"""
@classmethod
def setUpClass(cls):
super(TestValidator, cls).setUpClass()
cls.schemaOne = get_test_schema_id(get_test_schema_data("testdata/schema_test_1.json"))
cls.schemaTwo = get_test_schema_id(get_test_schema_data("testdata/schema_test_2.json"))
cls.esdlAmeland = get_test_dataset_ameland()
cls.esdlHybrid = get_test_dataset_hybrid()
def test_validate_schema_1(self):
"""test running the validator for test schema 1 and ameland test esdl"""
# prepare
validator = EsdlValidator()
# execute, validate against 1 schema
result = validator.validate(self.esdlAmeland, [self.schemaOne])
validationAreaScope = result.schemas[0].validations[0]
# assert
self.assertEqual(validationAreaScope.checked, 8, "there should be 8 checked")
self.assertEqual(len(validationAreaScope.warnings), 1, "there should be 1 warning")
self.assertEqual(validationAreaScope.warnings[0], "Area does not have a scope: scope cannot be null for entity BU00600007", "Warning should say: Area does not have a scope: scope cannot be null for entity BU00600007")
def test_validate_schema_2(self):
"""test running the validator on test schema 2 on dynamic test esdl with a real world scenario, multiple validations including and + or"""
# prepare
validator = EsdlValidator()
# execute, validate against 1 schema
result = validator.validate(self.esdlHybrid, [self.schemaTwo])
validationProducer = result.schemas[0].validations[0]
validationStorage = result.schemas[0].validations[1]
validationGasHeater = result.schemas[0].validations[2]
validationHeatpump = result.schemas[0].validations[3]
validationCostsInRange = result.schemas[0].validations[4]
# assert
self.assertEqual(validationProducer.checked, 3, "there should be 3 checked since there are only 3 producers")
self.assertEqual(len(validationProducer.errors), 2, "there should be 2 errors since 1 producer validates ok")
self.assertEqual(validationProducer.errors[0], "Consumer missing power and marginal costs or no energy profile connected: property port.profile value is None", "Warning should say: Consumer missing power and marginal costs or no energy profile connected: property port.profile value is None")
self.assertEqual(validationStorage.checked, 1, "there should be 1 checked storage")
self.assertEqual(len(validationStorage.errors), 0, "there should be 0 errors, storage should be correct")
self.assertEqual(validationGasHeater.checked, 1, "there should be 1 checked GasHeater")
self.assertEqual(len(validationGasHeater.warnings), 0, "there should be 0 warnings, gasheater should be correct")
self.assertEqual(validationHeatpump.checked, 1, "there should be 1 checked HeatPump")
self.assertEqual(len(validationHeatpump.warnings), 1, "there should be 1 warnings, heatpump should be missing a control strategy")
self.assertEqual(validationCostsInRange.checked, 3, "there should be 3 checked costs")
self.assertEqual(len(validationCostsInRange.warnings), 1, "there should be 1 warnings")
def test_validate_multiple_schemas(self):
"""Test if the validator works with checking multiple schemas"""
# prepare
validator = EsdlValidator()
# execute, validate against 2 schemas
result = validator.validate(self.esdlHybrid, [self.schemaOne, self.schemaTwo])
# assert
self.assertEqual(len(result.schemas), 2, "there should be 2 schemas in the result")
self.assertEqual(result.valid, False, "There should be errors in the schema's, valid should be false")
self.assertEqual(result.errorCount, 2, "There should be a total of 2 errors")
self.assertEqual(result.warningCount, 3, "There should be 3 warnings in total")
|
py | 1a537020e58a018ec3e04ebd840e7cd349eb51d7 | # Copyright (c) OpenMMLab. All rights reserved.
from .class_names import (cityscapes_classes, coco_classes, dataset_aliases,
get_classes, imagenet_det_classes,
imagenet_vid_classes, voc_classes,
person_classes, triage_classes)
from .eval_hooks import DistEvalHook, EvalHook
from .mean_ap import average_precision, eval_map, print_map_summary
from .recall import (eval_recalls, plot_iou_recall, plot_num_recall,
print_recall_summary)
__all__ = [
'voc_classes', 'imagenet_det_classes', 'imagenet_vid_classes',
'coco_classes', 'cityscapes_classes', 'dataset_aliases', 'get_classes',
'DistEvalHook', 'EvalHook', 'average_precision', 'eval_map',
'print_map_summary', 'eval_recalls', 'print_recall_summary',
'plot_num_recall', 'plot_iou_recall', 'person_classes', 'triage_classes'
]
|
py | 1a53709b026c6bbdef404a5926a22e5726488a5a | # ******************************************************************************
# Copyright 2017-2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
"""Factory functions for all ngraph ops."""
import numpy as np
from ngraph.impl import AxisSet, AxisVector, Coordinate, CoordinateDiff, Function, Node, \
Shape, Strides
from ngraph.impl.op import Abs, Acos, Add, And, Asin, ArgMax, ArgMin, Atan, AvgPool, \
BatchNormTraining, BatchNormInference, Broadcast, Ceiling, Concat, Constant, Convert, \
Convolution, ConvolutionBackpropData, Cos, Cosh, Divide, Dot, Equal, Exp, Floor, \
GetOutputElement, Greater, GreaterEq, Less, LessEq, Log, LRN, Max, Maximum, MaxPool, \
Min, Minimum, Multiply, Negative, Not, NotEqual, OneHot, Or, Pad, Parameter, Product, \
Power, Relu, ReplaceSlice, Reshape, Reverse, Select, Sign, Sin, Sinh, Slice, Softmax, \
Sqrt, Subtract, Sum, Tan, Tanh, TopK
from typing import Callable, Iterable, List, Union
from ngraph.utils.broadcasting import get_broadcast_axes
from ngraph.utils.decorators import nameable_op, binary_op, unary_op
from ngraph.utils.input_validation import assert_list_of_ints
from ngraph.utils.reduction import get_reduction_axes
from ngraph.utils.types import NumericType, NumericData, TensorShape, make_constant_node, \
NodeInput, ScalarData
from ngraph.utils.types import get_element_type
@nameable_op
def parameter(shape, dtype=np.float32, name=None):
# type: (TensorShape, NumericType, str) -> Parameter
"""Return an ngraph Parameter object."""
assert_list_of_ints(shape, 'Parameter shape must be a list of integer values.')
element_type = get_element_type(dtype)
return Parameter(element_type, Shape(shape))
@nameable_op
def constant(value, dtype=None, name=None): # type: (NumericData, NumericType, str) -> Constant
"""Create a Constant node from provided value.
:param value: One of: array of values or scalar to initialize node with.
:param dtype: The data type of provided data.
:param name: Optional name for output node.
:return: The Constant node initialized with provided data.
"""
return make_constant_node(value, dtype)
# Unary ops
@unary_op
def absolute(node, name=None): # type: (NodeInput, str) -> Node
"""Return node which applies f(x) = abs(x) to the input node element-wise.
:param node: One of: input node, array or scalar.
:param name: Optional new name for output node.
:return: New node with Abs operation applied on it.
"""
return Abs(node)
@unary_op
def acos(node, name=None): # type: (NodeInput, str) -> Node
"""Apply inverse cosine function on the input node element-wise.
:param node: One of: input node, array or scalar.
:param name: Optional new name for output node.
:return: New node with arccos operation applied on it.
"""
return Acos(node)
@unary_op
def asin(node, name=None): # type: (NodeInput, str) -> Node
"""Apply inverse sine function on the input node element-wise.
:param node: One of: input node, array or scalar.
:param name: Optional new name for output node.
:return: New node with arcsin operation applied on it.
"""
return Asin(node)
@unary_op
def atan(node, name=None): # type: (NodeInput, str) -> Node
"""Apply inverse tangent function on the input node element-wise.
:param node: One of: input node, array or scalar.
:param name: Optional new name for output node.
:return: New node with arctan operation applied on it.
"""
return Atan(node)
@unary_op
def cos(node, name=None): # type: (NodeInput, str) -> Node
"""Apply cosine function on the input node element-wise.
:param node: One of: input node, array or scalar.
:param name: Optional new name for output node.
:return: New node with cos operation applied on it.
"""
return Cos(node)
@unary_op
def cosh(node, name=None): # type: (NodeInput, str) -> Node
"""Apply hyperbolic cosine function on the input node element-wise.
:param node: One of: input node, array or scalar.
:param name: Optional new name for output node.
:return: New node with cosh operation applied on it.
"""
return Cosh(node)
@unary_op
def sqrt(node, name=None): # type: (NodeInput, str) -> Node
"""Return node which applies square root to the input node element-wise.
:param node: One of: input node, array or scalar.
:param name: Optional new name for output node.
:return: The new node with sqrt operation applied element-wise.
"""
return Sqrt(node)
@unary_op
def exp(node, name=None): # type: (NodeInput, str) -> Node
"""Return node which applies exp to the input node element-wise.
:param node: The node providing data for operation.
:param name: The optional name for new output node.
:return: The new node performing natural exponential operation.
"""
return Exp(node)
@unary_op
def log(node, name=None): # type: (NodeInput, str) -> Node
"""Return node which applies natural logarithm to the input node element-wise.
:param node: The input node providing data for operation.
:param name: The optional new name for output node.
:return: The new node performing log operation element-wise.
"""
return Log(node)
@unary_op
def negative(node, name=None): # type: (NodeInput, str) -> Node
"""Return node which applies f(x) = -x to the input node elementwise."""
return Negative(node)
@unary_op
def floor(node, name=None): # type: (NodeInput, str) -> Node
"""Return node which applies floor to the input node element-wise.
:param node: The input node providing data.
:param name: The optional name for new output node.
:return: The node performing element-wise floor operation.
"""
return Floor(node)
@unary_op
def ceiling(node, name=None): # type: (NodeInput, str) -> Node
"""Return node which applies ceiling to the input node element-wise.
:param node: The node providing data to ceiling operation.
:param name: Optional name for output node.
:return: The node performing element-wise ceiling.
"""
return Ceiling(node)
@unary_op
def reshape(node, output_shape, input_order=None, name=None):
# type: (Node, List[int], List[int], str) -> Node
"""Return reshaped node according to provided parameters.
:param node: The tensor we want to reshape.
:param input_order: The order in which to iterate over input axes of input tensor.
:param output_shape: The new shape for input tensor.
"""
if input_order is None:
input_order = list(range(len(node.shape)))
return Reshape(node, AxisVector(input_order), Shape(output_shape))
@unary_op
def relu(node, name=None): # type: (NodeInput, str) -> Node
"""Perform rectified linear unit operation on input node element-wise.
:param node: One of: input node, array or scalar.
:param name: The optional ouptut node name.
:return: The new node performing relu operation on its input element-wise.
"""
return Relu(node)
@unary_op
def sign(node, name=None): # type: (NodeInput, str) -> Node
"""Perform element-wise sign operation.
:param node: One of: input node, array or scalar.
:param name: The optional new name for ouptut node.
:return: The node with mapped elements of the input tensor to -1 (if it is negative),
0 (if it is zero), or 1 (if it is positive).
"""
return Sign(node)
@unary_op
def sin(node, name=None): # type: (NodeInput, str) -> Node
"""Apply sine function on the input node element-wise.
:param node: One of: input node, array or scalar.
:param name: Optional new name for output node.
:return: New node with sin operation applied on it.
"""
return Sin(node)
@unary_op
def sinh(node, name=None): # type: (NodeInput, str) -> Node
"""Apply hyperbolic sine function on the input node element-wise.
:param node: One of: input node, array or scalar.
:param name: Optional new name for output node.
:return: New node with sin operation applied on it.
"""
return Sinh(node)
@unary_op
def tan(node, name=None): # type: (NodeInput, str) -> Node
"""Apply tangent function on the input node element-wise.
:param node: One of: input node, array or scalar.
:param name: Optional new name for output node.
:return: New node with tan operation applied on it.
"""
return Tan(node)
# Binary ops
@binary_op
def divide(left_node, right_node, name=None): # type: (NodeInput, NodeInput, str) -> Node
"""Return node which applies f(x) = A/B to the input nodes element-wise.
:param left_node: The node providing dividend data.
:param right_node: The node providing divisor data.
:param name: Optional name for output node.
:return: The node performing element-wise division.
"""
return Divide(left_node, right_node)
@binary_op
def multiply(left_node, right_node, name=None): # type: (NodeInput, NodeInput, str) -> Node
"""Return node which applies f(x) = A*B to the input nodes elementwise."""
return Multiply(left_node, right_node)
@binary_op
def subtract(left_node, right_node, name=None): # type: (NodeInput, NodeInput, str) -> Node
"""Return node which applies f(x) = A-B to the input nodes element-wise.
:param left_node: The node providing data for left hand side of operator.
:param right_node: The node providing data for right hand side of operator.
:param name: The optional name for output node.
:return: The new output node performing subtraction operation on both tensors element-wise.
"""
return Subtract(left_node, right_node)
@binary_op
def add(left_node, right_node, name=None): # type: (NodeInput, NodeInput, str) -> Node
"""Return node which applies f(x) = A+B to the input nodes element-wise."""
return Add(left_node, right_node)
@binary_op
def minimum(left_node, right_node, name=None): # type: (NodeInput, NodeInput, str) -> Node
"""Return node which applies the minimum operation to input nodes elementwise."""
return Minimum(left_node, right_node)
@binary_op
def maximum(left_node, right_node, name=None): # type: (NodeInput, NodeInput, str) -> Node
"""Return node which applies the maximum operation to input nodes elementwise."""
return Maximum(left_node, right_node)
@binary_op
def power(left_node, right_node, name=None): # type: (NodeInput, NodeInput, str) -> Node
"""Return node which perform element-wise exponentiation operation.
:param left_node: The node providing the base of operation.
:param right_node: The node providing the exponent of operation.
:param name: The optional name for the new output node.
:return: The new node performing element-wise exponentiation operation on input nodes.
"""
return Power(left_node, right_node)
# Logical ops
@binary_op
def equal(left_node, right_node, name=None): # type: (NodeInput, NodeInput, str) -> Node
"""Return node which checks if input nodes are equal element-wise.
:param left_node: The first input node for equal operation.
:param right_node: The second input node for equal operation.
:param name: The optional name for output new node.
:return: The node performing element-wise equality check.
"""
return Equal(left_node, right_node)
@binary_op
def not_equal(left_node, right_node, name=None): # type: (NodeInput, NodeInput, str) -> Node
"""Return node which checks if input nodes are unequal element-wise.
:param left_node: The first input node for not-equal operation.
:param right_node: The second input node for not-equal operation.
:param name: The optional name for output new node.
:return: The node performing element-wise inequality check.
"""
return NotEqual(left_node, right_node)
@binary_op
def greater(left_node, right_node, name=None): # type: (NodeInput, NodeInput, str) -> Node
"""Return node which checks if left input node is greater than the right node element-wise.
:param left_node: The first input node providing data.
:param right_node: The second input node providing data.
:param name: The optional new name for output node.
:return: The node performing element-wise check whether left_node is greater than right_node.
"""
return Greater(left_node, right_node)
@binary_op
def greater_eq(left_node, right_node, name=None): # type: (NodeInput, NodeInput, str) -> Node
"""Return node which checks if left node is greater or equal to the right node element-wise.
:param left_node: The first input node providing data.
:param right_node: The second input node providing data.
:param name: The optional new name for output node.
:return: The node performing element-wise check whether left_node is greater than or equal
right_node.
"""
return GreaterEq(left_node, right_node)
@binary_op
def less(left_node, right_node, name=None): # type: (NodeInput, NodeInput, str) -> Node
"""Return node which checks if left input node is less than the right node element-wise.
:param left_node: The first input node providing data.
:param right_node: The second input node providing data.
:param name: The optional new name for output node.
:return: The node performing element-wise check whether left_node is less than the right_node.
"""
return Less(left_node, right_node)
@binary_op
def less_eq(left_node, right_node, name=None): # type: (NodeInput, NodeInput, str) -> Node
"""Return node which checks if left input node is less or equal the right node element-wise.
:param left_node: The first input node providing data.
:param right_node: The second input node providing data.
:param name: The optional new name for output node.
:return: The node performing element-wise check whether left_node is less than or equal the
right_node.
"""
return LessEq(left_node, right_node)
@binary_op
def logical_and(left_node, right_node, name=None): # type: (NodeInput, NodeInput, str) -> Node
"""Return node which perform logical and operation on input nodes element-wise.
:param left_node: The first input node providing data.
:param right_node: The second input node providing data.
:param name: The optional new name for output node.
:return: The node performing logical and operation on input nodes corresponding elements.
"""
return And(left_node, right_node)
@binary_op
def logical_or(left_node, right_node, name=None): # type: (NodeInput, NodeInput, str) -> Node
"""Return node which performs logical or operation on input nodes element-wise.
:param left_node: The first input node providing data.
:param right_node: The second input node providing data.
:param name: The optional new name for output node.
:return: The node performing logical or operation on input nodes corresponding elements.
"""
return Or(left_node, right_node)
@unary_op
def logical_not(node, name=None): # type: (Node, str) -> Node
"""Return node which applies logical negation to the input node elementwise."""
return Not(node)
# Extend Node class to support binary operators
Node.__add__ = add
Node.__sub__ = subtract
Node.__mul__ = multiply
Node.__div__ = divide
Node.__truediv__ = divide
Node.__radd__ = lambda left, right: add(right, left)
Node.__rsub__ = lambda left, right: subtract(right, left)
Node.__rmul__ = lambda left, right: multiply(right, left)
Node.__rdiv__ = lambda left, right: divide(right, left)
Node.__rtruediv__ = lambda left, right: divide(right, left)
Node.__eq__ = equal
Node.__ne__ = not_equal
Node.__lt__ = less
Node.__le__ = less_eq
Node.__gt__ = greater
Node.__ge__ = greater_eq
# Custom ops
@nameable_op
def broadcast(node, new_shape, broadcast_axes, name=None):
# type: (Node, TensorShape, Iterable[int], str) -> Node
"""Create a node which broadcasts the input node's values along specified axes to a desired shape.
:param node: The node with input tensor data.
:param new_shape: The new shape we want to broadcast tensor to.
:param broadcast_axes: The axis positions (0-based) in the result that are being broadcast.
:param name: Optional new name for output node.
:return: New node with broadcast shape.
"""
return Broadcast(node, Shape(new_shape), AxisSet(broadcast_axes))
@nameable_op
def broadcast_to(node, new_shape, axis=None, name=None):
# type: (Node, TensorShape, int, str) -> Node
"""Create a node which broadcasts the input node's values to a desired shape.
`broadcast_to` will attempt to automatically determine which axes need broadcasting.
The optional `axis` parameter specifies the starting axis position (0-based) in the output
shape from which the current shape of the tensor matches the desired new shape.
e.g. current_shape: [4, 5], new_shape: [2, 3, 4, 5, 6], axis: 2
By using the `axis` parameter you can control which output axis to broadcast along.
Example:
>>> input_node = ng.constant([1, 2, 3])
>>> current_shape = [3]
>>> new_shape = [3, 3]
>>> ng.broadcast_to(input_node, new_shape, axis=1)
array([[1, 2, 3],
[1, 2, 3],
[1, 2, 3]])
>>> ng.broadcast_to(input_node, new_shape, axis=0)
array([[1, 1, 1],
[2, 2, 2],
[3, 3, 3]])
If the `axis` parameter is not specified, `broadcast_to` will attempt to match shapes,
assuming the current shape matches the rightmost positions of the desired new shape.
This behaviour is similar to NumPy's broadcasting.
i.e. default `axis = len(new_shape) - len(current_shape)`
:param node: The node with input tensor data.
:param new_shape: The new shape we want to broadcast tensor to.
:param axis: The axis along which we perform broadcasting.
:param name: Optional new name for output node.
:return: New node with broadcast shape.
"""
return Broadcast(node, Shape(new_shape), get_broadcast_axes(new_shape, node.shape, axis))
@nameable_op
def convert(node, new_type, name=None): # type: (Node, NumericType, str) -> Node
"""Return node which casts input node values to specified type."""
new_element_type = get_element_type(new_type)
return Convert(node, new_element_type)
@nameable_op
def select(selection_node, input_node1, input_node2, name=None):
# type: (Node, Node, Node, str) -> Node
"""Perform an element-wise selection operation on input tensors.
:param selection_node: The node providing selection values of `bool` type.
:param input_node1: The node providing data to be selected if respective `selection_node`
item value is `True`.
:param input_node2: The node providing data to be selected if respective `selection_node`
item value is `False`.
:param name: The optional new name for output node.
:return: The new node with values selected according to provided arguments.
"""
return Select(selection_node, input_node1, input_node2)
# Non-linear ops
@unary_op
def tanh(node, name=None): # type: (Node, str) -> Node
"""Return node which applies hyperbolic tangent to the input node element-wise.
:param node: One of: input node, array or scalar.
:param name: Optional new name for output node.
:return: New node with tanh operation applied on it.
"""
return Tanh(node)
# matmul ops
@nameable_op
def dot(left_node, right_node, reduction_axes_count=None, name=None):
# type: (Node, Node, int, str) -> Node
"""Return node which performs generalized dot product of two input nodes.
This operation is capable of performing scalar-tensor, matrix-vector product and matrix
multiplication.
:param left_node: The node providing left hand side data.
:param right_node: The node providing right hand side data.
:param reduction_axes_count: The number of axes to reduce during dot-product.
:param name: The optional name for output node.
:return: The new node performing dot-product on input two nodes.
"""
if reduction_axes_count is None:
return Dot(left_node, right_node)
else:
return Dot(left_node, right_node, reduction_axes_count)
# convpool ops
@nameable_op
def convolution(data_batch, # type: Node
filter_weights, # type: Node
filter_strides=None, # type: List[int]
filter_dilation_strides=None, # type: List[int]
padding_below=None, # type: List[int]
padding_above=None, # type: List[int]
data_dilation_strides=None, # type: List[int]
name=None, # type: str
):
# type: (...) -> Node
"""Return node performing batched convolution operation.
:param data_batch: The node providing data batch tensor.
:param filter_weights: The node providing filters tensor.
:param filter_strides: The kernel window movement strides.
:param filter_dilation_strides: The filters dilation strides.
:param padding_below: The number of zero padding elements to add on each axis below 0
coordinate.
:param padding_above: The number of zero padding elements to add on each axis above max
coordinate.
:param data_dilation_strides: The data batch dilation strides.
:param name: The optional new name for output node.
:return: New node performing batched convolution operation.
"""
spatial_dim_count = len(data_batch.shape) - 2
if filter_strides is None:
filter_strides = [1] * spatial_dim_count
if filter_dilation_strides is None:
filter_dilation_strides = [1] * spatial_dim_count
if padding_above is None:
padding_above = [0] * spatial_dim_count
if padding_below is None:
padding_below = [0] * spatial_dim_count
if data_dilation_strides is None:
data_dilation_strides = [1] * spatial_dim_count
return Convolution(data_batch, filter_weights, Strides(filter_strides),
Strides(filter_dilation_strides), CoordinateDiff(padding_below),
CoordinateDiff(padding_above), Strides(data_dilation_strides))
@nameable_op
def convolution_backprop_data(data_batch_shape, # type: TensorShape
filters, # type: Node
output_delta, # type: Node
window_movement_strides_forward=None, # type: List[int]
window_dilation_strides_forward=None, # type: List[int]
padding_below_forward=None, # type: List[int]
padding_above_forward=None, # type: List[int]
data_dilation_strides_forward=None, # type: List[int]
name=None, # type: str
):
# type: (...) -> Node
"""Return node performing a batched-convolution data batch-backprop operation.
:param data_batch_shape: The shape of the data batch from forward-prop.
:param filters: The node producing the filters from forward-prop.
:param output_delta: The node producing output delta.
:param window_movement_strides_forward: The window movement strides from forward-prop.
:param window_dilation_strides_forward: The window dilation strides from forward-prop.
:param padding_below_forward: The padding-below sizes from forward-prop.
:param padding_above_forward: The padding-above sizes from forward-prop.
:param data_dilation_strides_forward: The data dilation strides from forward-prop.
"""
spatial_dim_count = len(data_batch_shape) - 2
if window_movement_strides_forward is None:
window_movement_strides_forward = [1] * spatial_dim_count
if window_dilation_strides_forward is None:
window_dilation_strides_forward = [1] * spatial_dim_count
if padding_below_forward is None:
padding_below_forward = [0] * spatial_dim_count
if padding_above_forward is None:
padding_above_forward = [0] * spatial_dim_count
if data_dilation_strides_forward is None:
data_dilation_strides_forward = [1] * spatial_dim_count
return ConvolutionBackpropData(Shape(data_batch_shape), filters, output_delta,
Strides(window_movement_strides_forward),
Strides(window_dilation_strides_forward),
CoordinateDiff(padding_below_forward),
CoordinateDiff(padding_above_forward),
Strides(data_dilation_strides_forward))
@nameable_op
def avg_pool(data_batch, # type: Node
window_shape, # type: TensorShape
window_strides=None, # type: List[int]
padding_below=None, # type: TensorShape
padding_above=None, # type: TensorShape
include_padding=False, # type: bool
name=None, # type: str
):
# type: (...) -> Node
"""Return average pooling node.
:param data_batch: The input node providing data.
:param window_shape: The pooling window shape.
:param window_strides: The window movement strides.
:param padding_below: The input data optional padding below filled with zeros.
:param padding_above: The input data optional padding below filled with zeros.
:param include_padding: Whether or not to include zero padding in average computations.
:param name: Optional name for the new output node.
:return: New node with AvgPool operation applied on its data.
"""
spatial_dim_count = len(window_shape)
if window_strides is None:
window_strides = [1] * spatial_dim_count
if padding_above is None:
padding_above = [0] * spatial_dim_count
if padding_below is None:
padding_below = [0] * spatial_dim_count
return AvgPool(data_batch, Shape(window_shape), Strides(window_strides), Shape(padding_below),
Shape(padding_above), include_padding)
@nameable_op
def max_pool(x, # type: Node
window_shape, # type: TensorShape
strides=None, # type: List[int]
padding_above=None, # type: List[int]
padding_below=None, # type: List[int]
name=None, # type: str
):
# type: (...) -> Node
"""Return max pooling node."""
if strides is None:
strides = [1] * len(window_shape) # Default to as many 1s as spatial dimensions of input.
if padding_above is None:
padding_above = [0] * len(window_shape)
if padding_below is None:
padding_below = [0] * len(window_shape)
return MaxPool(x, Shape(window_shape), Strides(strides),
Shape(padding_above), Shape(padding_below))
# reduction ops
@nameable_op
def sum(node, reduction_axes=None, name=None):
# type: (Node, Iterable[int], str) -> Node
"""Perform element-wise sums of the input tensor, eliminating the specified reduction axes.
:param node: The node providing data for operation.
:param reduction_axes: The axes to eliminate through summation.
:param name: The optional new name for ouptut node.
:return: The new node performing summation along `reduction_axes` element-wise.
"""
return Sum(node, AxisSet(get_reduction_axes(node, reduction_axes)))
@nameable_op
def max(node, reduction_axes=None, name=None):
# type: (Node, Iterable[int], str) -> Node
"""Max-reduction operation on input tensor, eliminating the specified reduction axes.
:param node: The tensor we want to max-reduce.
:param reduction_axes: The axes to eliminate through max operation.
:param name: Optional name for output node.
"""
return Max(node, AxisSet(get_reduction_axes(node, reduction_axes)))
@nameable_op
def min(node, reduction_axes=None, name=None):
# type: (Node, Iterable[int], str) -> Node
"""Min-reduction operation on input tensor, eliminating the specified reduction axes.
:param node: The tensor we want to min-reduce.
:param reduction_axes: The axes to eliminate through min operation.
:param name: Optional name for output node.
"""
return Min(node, AxisSet(get_reduction_axes(node, reduction_axes)))
@nameable_op
def prod(node, reduction_axes=None, name=None):
# type: (Node, Iterable[int], str) -> Node
"""Product-reduction operation on input tensor, eliminating the specified reduction axes.
:param node: The tensor we want to product-reduce.
:param reduction_axes: The axes to eliminate through product operation.
:param name: Optional name for output node.
:return: The new node performing product-reduction operation.
"""
return Product(node, AxisSet(get_reduction_axes(node, reduction_axes)))
# reshape ops
@nameable_op
def slice(node, lower_bounds, upper_bounds, strides=None, name=None):
# type: (Node, List[int], List[int], List[int], str) -> Node
"""Take a slice of an input tensor, (sub-tensor) that resides within a bounding box.
Optionally this function may be provided with stride along each axis.
:param node: The tensor we want to slice.
:param lower_bounds: The (inclusive) lower-bound coordinates for the tensor slice.
:param upper_bounds: The (exclusive) upper-bound coordinates for the tensor slice.
:param strides: The strides for the tensor slice.
:param name: Optional name for the output node.
:return: Return node that represents a slice of input nodes data.
"""
if strides is None:
return Slice(node, Coordinate(lower_bounds), Coordinate(upper_bounds))
else:
return Slice(node, Coordinate(lower_bounds), Coordinate(upper_bounds), Strides(strides))
@nameable_op
def concat(nodes, axis, name=None): # type: (List[Node], int, str) -> Node
"""Concatenate input nodes into single new node along specified axis.
:param nodes: The nodes we want concatenate into single new node.
:param axis: The axis along which we want to concatenate input nodes.
:param name: The optional new name for output node.
:return: Return new node that is a concatenation of input nodes.
"""
return Concat(nodes, axis)
@nameable_op
def softmax(node, axes, name=None): # type: (Node, Iterable[int], str) -> Node
"""Apply softmax operation on each element of input tensor.
:param node: The tensor providing input data.
:param axes: The list of axes indices which are used to calculate divider of
the softmax function.
:param name: The optional new name for output node.
:return: The new node with softmax operation applied on each element.
"""
if not isinstance(axes, set):
axes = set(axes)
return Softmax(node, AxisSet(axes))
@nameable_op
def pad(data_batch, # type: Node
value, # type: Node
padding_below=None, # type: TensorShape
padding_above=None, # type: TensorShape
padding_in=None, # type: TensorShape
name=None, # type: str
):
# type: (...) -> Node
"""Return padding node.
:param data_batch: The input node providing data.
:param value: The node producing the scalar value to be inserted for padding.
:param padding_below: The padding-below widths.
:param padding_above: The padding-above widths.
:param padding_in: The interior-padding widths.
:param name: The optional new name for output node.
:return: Return node that represents a padding of input nodes data.
"""
dim_count = len(data_batch.shape)
if padding_above is None:
padding_above = [0] * dim_count
if padding_below is None:
padding_below = [0] * dim_count
if padding_in is None:
padding_in = [0] * dim_count
return Pad(data_batch, value, Shape(padding_below), Shape(padding_above), Shape(padding_in))
@nameable_op
def one_hot(node, shape, one_hot_axis, name=None): # type: (Node, TensorShape, int, str) -> Node
"""Create node performing one-hot encoding on input data.
:param node: The input node providing data for operation.
:param shape: The output node shape including the new one-hot axis.
:param one_hot_axis: The index within the output shape of the new one-hot axis.
:param name: The optional name for new output node.
:return: New node performing one-hot operation.
"""
return OneHot(node, Shape(shape), one_hot_axis)
@nameable_op
def replace_slice(dest_node, # type: Node
src_node, # type: Node
lower_bounds, # type: List[int]
upper_bounds, # type: List[int]
strides=None, # type: List[int]
name=None, # type: str
):
# type: (...) -> Node
"""Return a copy of `dest_node` with the specified slice overwritten by the `src_node` data.
:param dest_node: The node providing data to be overwritten by the specified slice.
:param src_node: The node providing data for overwriting.
:param lower_bounds: The (inclusive) lower-bound coordinates for the replaced slice.
:param upper_bounds: The (exclusive) upper-bound coordinates for the replaced slice.
:param strides: The strides for the replaced slice.
:param name: The optional name for the output new node.
:return: The new node with copy of `dest_node` with the specified slice overwritten
by the `src_node`.
"""
if strides is None:
return ReplaceSlice(dest_node, src_node, Coordinate(lower_bounds), Coordinate(upper_bounds))
else:
return ReplaceSlice(dest_node, src_node, Coordinate(lower_bounds), Coordinate(upper_bounds),
Strides(strides))
@nameable_op
def reverse(node, reversed_axes, name=None): # type: (Node, List[int], str) -> Node
"""Perform axis-reverse operation.
:param node: The input node on which operation will be carried out.
:param reversed_axes: The list of indices of axes to be reversed.
:param name: The optional name of the output node.
:return: The new node with reversed axes.
"""
return Reverse(node, AxisSet(reversed_axes))
@nameable_op
def batch_norm(eps, # type: float
gamma, # type: Node
beta, # type: Node
data, # type: Node
mean=None, # type: Node
variance=None, # type: Node
name=None, # type: str
):
# type: (...) -> Node
"""Return batch normalization node."""
if mean is None and variance is None:
return BatchNormTraining(data, gamma, beta, eps)
else:
return BatchNormInference(data, gamma, beta, mean, variance, eps)
@nameable_op
def lrn(data, # type: Node
alpha=1, # type: float
beta=0.5, # type: float
bias=1, # type: float
size=5, # type: int
name=None, # type: str
):
# type: (...) -> Node
"""Return a node which performs element-wise Local Response Normalization (LRN) operation.
:param data: Input data.
:param alpha: A scale factor (usually positive).
:param beta: An exponent.
:param bias: An offset (usually positive) to avoid dividing by 0.
:param size: Width of the 1-D normalization window.
:param name: An optional name of the output node.
:return: The new node which performs LRN.
"""
return LRN(data, alpha, beta, bias, size)
@nameable_op
def argmax(data, # type: Node
axis=0, # type: int
):
# type: (...) -> Node
"""Return a node which performs ArgMax index reduction operation.
:param data: Input data.
:param axis: Reduction Axis.
:return: The new node which performs ArgMax
"""
return ArgMax(data, axis, get_element_type(np.int32))
@nameable_op
def argmin(data, # type: Node
axis=0, # type: int
):
# type: (...) -> Node
"""Return a node which performs ArgMin index reduction operation.
:param data: Input data.
:param axis: Reduction Axis.
:return: The new node which performs ArgMin
"""
return ArgMin(data, axis, get_element_type(np.int32))
@nameable_op
def topk(data, # type: Node
k, # type: int
kaxis=-1, # type: int
cmax=True, # type: bool
):
# type: (...) -> Node
"""Return a node which performs TopK.
:param data: Input data.
:param kaxis: TopK Axis.
:param k: K.
:param cmax: Compute TopK largest (True) or smallest (False)
:return: The new node which performs TopK (both indices and values)
"""
return TopK(data,
len(data.get_shape()) - 1 if kaxis == -1 else kaxis,
get_element_type(np.int32),
k,
cmax)
@nameable_op
def get_output_element(data, index): # type: (Node, int) -> Node
"""Return the n-th element of the input tuple."""
return GetOutputElement(data, index)
|
py | 1a53716d9e5c3f87d0ab5d45427b198d166b33e4 | MaterialSlot.name = None
|
py | 1a5372579900a8822f8ea847a8bad3a1ef18b1da | from core.redis import rds
from core.triage import Triage
from core.parser import ScanParser
class Rule:
def __init__(self):
self.rule = 'CFG_ZEGE'
self.rule_severity = 2
self.rule_description = 'This rule checks for accessible Open API (Swagger) Documentation'
self.rule_confirm = 'Remote Server is exposing Swagger API'
self.rule_details = ''
self.rule_mitigation = '''Swagger API may have been incorrectly configured to allow access to untrusted clients. \
Check whether this can be restricted, as it may lead to attackers identifying your application endpoints.'''
self.rule_match_string = {
'/v2/api-docs':{
'app':'SWAGGER',
'match':['"swagger":"2.0"'],
'title':'REST API Documentation'
},
'/help':{
'app':'ASPNET_WEBAPI_HELP',
'match':['ASP.NET Web API Help Page'],
'title':'ASP.NET API Docs'
},
'/api-docs':{
'app':'SWAGGER',
'match':['"swagger":"2.0"'],
'title':'REST API Documentation'
},
'/swagger/index.html':{
'app':'SWAGGER_ALT1',
'match':['Swagger UI', '"swagger"'],
'title':'REST API Documentation'
},
'/swagger-ui.html':{
'app':'SWAGGER_ALT2',
'match':['Swagger UI', '"swagger"'],
'title':'REST API Documentation'
},
'/api/swagger-ui.html':{
'app':'SWAGGER_ALT3',
'match':['Swagger UI', '"swagger"'],
'title':'REST API Documentation'
},
'/api-docs/swagger.json':{
'app':'SWAGGER_ALT4',
'match':['Swagger UI', '"swagger"'],
'title':'REST API Documentation'
},
'/swagger.json':{
'app':'SWAGGER_ALT5',
'match':['Swagger UI', '"swagger"'],
'title':'REST API Documentation'
},
'/swagger/v1/swagger.json':{
'app':'SWAGGER_ALT6',
'match':['Swagger UI', '"swagger"'],
'title':'REST API Documentation'
},
}
self.intensity = 3
def check_rule(self, ip, port, values, conf):
t = Triage()
p = ScanParser(port, values)
domain = p.get_domain()
module = p.get_module()
if 'http' not in module:
return
for uri, values in self.rule_match_string.items():
app_title = values['title']
resp = t.http_request(ip, port, uri=uri)
if resp is not None:
for match in values['match']:
if match in resp.text:
self.rule_details = 'Identified an exposed {} at {}'.format(app_title, resp.url)
rds.store_vuln({
'ip':ip,
'port':port,
'domain':domain,
'rule_id':self.rule,
'rule_sev':self.rule_severity,
'rule_desc':self.rule_description,
'rule_confirm':self.rule_confirm,
'rule_details':self.rule_details,
'rule_mitigation':self.rule_mitigation
})
return
|
py | 1a5372b0ed414d1af11a0cf789ee0a69e64cc12c | """Minimal Python 2 & 3 shim around all Qt bindings
DOCUMENTATION
Qt.py was born in the film and visual effects industry to address
the growing need for the development of software capable of running
with more than one flavour of the Qt bindings for Python - PySide,
PySide2, PyQt4 and PyQt5.
1. Build for one, run with all
2. Explicit is better than implicit
3. Support co-existence
Default resolution order:
- PySide2
- PyQt5
- PySide
- PyQt4
Usage:
>> import sys
>> from Qt import QtWidgets
>> app = QtWidgets.QApplication(sys.argv)
>> button = QtWidgets.QPushButton("Hello World")
>> button.show()
>> app.exec_()
All members of PySide2 are mapped from other bindings, should they exist.
If no equivalent member exist, it is excluded from Qt.py and inaccessible.
The idea is to highlight members that exist across all supported binding,
and guarantee that code that runs on one binding runs on all others.
For more details, visit https://github.com/mottosso/Qt.py
LICENSE
See end of file for license (MIT, BSD) information.
"""
import os
import sys
import types
import shutil
import importlib
__version__ = "1.1.0.b3"
# Enable support for `from Qt import *`
__all__ = []
# Flags from environment variables
QT_VERBOSE = bool(os.getenv("QT_VERBOSE"))
QT_PREFERRED_BINDING = os.getenv("QT_PREFERRED_BINDING", "")
QT_SIP_API_HINT = os.getenv("QT_SIP_API_HINT")
# Reference to Qt.py
Qt = sys.modules[__name__]
Qt.QtCompat = types.ModuleType("QtCompat")
try:
long
except NameError:
# Python 3 compatibility
long = int
"""Common members of all bindings
This is where each member of Qt.py is explicitly defined.
It is based on a "lowest common denominator" of all bindings;
including members found in each of the 4 bindings.
Find or add excluded members in build_membership.py
"""
_common_members = {
"QtGui": [
"QAbstractTextDocumentLayout",
"QActionEvent",
"QBitmap",
"QBrush",
"QClipboard",
"QCloseEvent",
"QColor",
"QConicalGradient",
"QContextMenuEvent",
"QCursor",
"QDoubleValidator",
"QDrag",
"QDragEnterEvent",
"QDragLeaveEvent",
"QDragMoveEvent",
"QDropEvent",
"QFileOpenEvent",
"QFocusEvent",
"QFont",
"QFontDatabase",
"QFontInfo",
"QFontMetrics",
"QFontMetricsF",
"QGradient",
"QHelpEvent",
"QHideEvent",
"QHoverEvent",
"QIcon",
"QIconDragEvent",
"QIconEngine",
"QImage",
"QImageIOHandler",
"QImageReader",
"QImageWriter",
"QInputEvent",
"QInputMethodEvent",
"QIntValidator",
"QKeyEvent",
"QKeySequence",
"QLinearGradient",
"QMatrix2x2",
"QMatrix2x3",
"QMatrix2x4",
"QMatrix3x2",
"QMatrix3x3",
"QMatrix3x4",
"QMatrix4x2",
"QMatrix4x3",
"QMatrix4x4",
"QMouseEvent",
"QMoveEvent",
"QMovie",
"QPaintDevice",
"QPaintEngine",
"QPaintEngineState",
"QPaintEvent",
"QPainter",
"QPainterPath",
"QPainterPathStroker",
"QPalette",
"QPen",
"QPicture",
"QPictureIO",
"QPixmap",
"QPixmapCache",
"QPolygon",
"QPolygonF",
"QQuaternion",
"QRadialGradient",
"QRegExpValidator",
"QRegion",
"QResizeEvent",
"QSessionManager",
"QShortcutEvent",
"QShowEvent",
"QStandardItem",
"QStandardItemModel",
"QStatusTipEvent",
"QSyntaxHighlighter",
"QTabletEvent",
"QTextBlock",
"QTextBlockFormat",
"QTextBlockGroup",
"QTextBlockUserData",
"QTextCharFormat",
"QTextCursor",
"QTextDocument",
"QTextDocumentFragment",
"QTextFormat",
"QTextFragment",
"QTextFrame",
"QTextFrameFormat",
"QTextImageFormat",
"QTextInlineObject",
"QTextItem",
"QTextLayout",
"QTextLength",
"QTextLine",
"QTextList",
"QTextListFormat",
"QTextObject",
"QTextObjectInterface",
"QTextOption",
"QTextTable",
"QTextTableCell",
"QTextTableCellFormat",
"QTextTableFormat",
"QTransform",
"QValidator",
"QVector2D",
"QVector3D",
"QVector4D",
"QWhatsThisClickedEvent",
"QWheelEvent",
"QWindowStateChangeEvent",
"qAlpha",
"qBlue",
"qGray",
"qGreen",
"qIsGray",
"qRed",
"qRgb",
"qRgb",
],
"QtWidgets": [
"QAbstractButton",
"QAbstractGraphicsShapeItem",
"QAbstractItemDelegate",
"QAbstractItemView",
"QAbstractScrollArea",
"QAbstractSlider",
"QAbstractSpinBox",
"QAction",
"QActionGroup",
"QApplication",
"QBoxLayout",
"QButtonGroup",
"QCalendarWidget",
"QCheckBox",
"QColorDialog",
"QColumnView",
"QComboBox",
"QCommandLinkButton",
"QCommonStyle",
"QCompleter",
"QDataWidgetMapper",
"QDateEdit",
"QDateTimeEdit",
"QDesktopWidget",
"QDial",
"QDialog",
"QDialogButtonBox",
"QDirModel",
"QDockWidget",
"QDoubleSpinBox",
"QErrorMessage",
"QFileDialog",
"QFileIconProvider",
"QFileSystemModel",
"QFocusFrame",
"QFontComboBox",
"QFontDialog",
"QFormLayout",
"QFrame",
"QGesture",
"QGestureEvent",
"QGestureRecognizer",
"QGraphicsAnchor",
"QGraphicsAnchorLayout",
"QGraphicsBlurEffect",
"QGraphicsColorizeEffect",
"QGraphicsDropShadowEffect",
"QGraphicsEffect",
"QGraphicsEllipseItem",
"QGraphicsGridLayout",
"QGraphicsItem",
"QGraphicsItemGroup",
"QGraphicsLayout",
"QGraphicsLayoutItem",
"QGraphicsLineItem",
"QGraphicsLinearLayout",
"QGraphicsObject",
"QGraphicsOpacityEffect",
"QGraphicsPathItem",
"QGraphicsPixmapItem",
"QGraphicsPolygonItem",
"QGraphicsProxyWidget",
"QGraphicsRectItem",
"QGraphicsRotation",
"QGraphicsScale",
"QGraphicsScene",
"QGraphicsSceneContextMenuEvent",
"QGraphicsSceneDragDropEvent",
"QGraphicsSceneEvent",
"QGraphicsSceneHelpEvent",
"QGraphicsSceneHoverEvent",
"QGraphicsSceneMouseEvent",
"QGraphicsSceneMoveEvent",
"QGraphicsSceneResizeEvent",
"QGraphicsSceneWheelEvent",
"QGraphicsSimpleTextItem",
"QGraphicsTextItem",
"QGraphicsTransform",
"QGraphicsView",
"QGraphicsWidget",
"QGridLayout",
"QGroupBox",
"QHBoxLayout",
"QHeaderView",
"QInputDialog",
"QItemDelegate",
"QItemEditorCreatorBase",
"QItemEditorFactory",
"QKeyEventTransition",
"QLCDNumber",
"QLabel",
"QLayout",
"QLayoutItem",
"QLineEdit",
"QListView",
"QListWidget",
"QListWidgetItem",
"QMainWindow",
"QMdiArea",
"QMdiSubWindow",
"QMenu",
"QMenuBar",
"QMessageBox",
"QMouseEventTransition",
"QPanGesture",
"QPinchGesture",
"QPlainTextDocumentLayout",
"QPlainTextEdit",
"QProgressBar",
"QProgressDialog",
"QPushButton",
"QRadioButton",
"QRubberBand",
"QScrollArea",
"QScrollBar",
"QShortcut",
"QSizeGrip",
"QSizePolicy",
"QSlider",
"QSpacerItem",
"QSpinBox",
"QSplashScreen",
"QSplitter",
"QSplitterHandle",
"QStackedLayout",
"QStackedWidget",
"QStatusBar",
"QStyle",
"QStyleFactory",
"QStyleHintReturn",
"QStyleHintReturnMask",
"QStyleHintReturnVariant",
"QStyleOption",
"QStyleOptionButton",
"QStyleOptionComboBox",
"QStyleOptionComplex",
"QStyleOptionDockWidget",
"QStyleOptionFocusRect",
"QStyleOptionFrame",
"QStyleOptionGraphicsItem",
"QStyleOptionGroupBox",
"QStyleOptionHeader",
"QStyleOptionMenuItem",
"QStyleOptionProgressBar",
"QStyleOptionRubberBand",
"QStyleOptionSizeGrip",
"QStyleOptionSlider",
"QStyleOptionSpinBox",
"QStyleOptionTab",
"QStyleOptionTabBarBase",
"QStyleOptionTabWidgetFrame",
"QStyleOptionTitleBar",
"QStyleOptionToolBar",
"QStyleOptionToolBox",
"QStyleOptionToolButton",
"QStyleOptionViewItem",
"QStylePainter",
"QStyledItemDelegate",
"QSwipeGesture",
"QSystemTrayIcon",
"QTabBar",
"QTabWidget",
"QTableView",
"QTableWidget",
"QTableWidgetItem",
"QTableWidgetSelectionRange",
"QTapAndHoldGesture",
"QTapGesture",
"QTextBrowser",
"QTextEdit",
"QTimeEdit",
"QToolBar",
"QToolBox",
"QToolButton",
"QToolTip",
"QTreeView",
"QTreeWidget",
"QTreeWidgetItem",
"QTreeWidgetItemIterator",
"QUndoCommand",
"QUndoGroup",
"QUndoStack",
"QUndoView",
"QVBoxLayout",
"QWhatsThis",
"QWidget",
"QWidgetAction",
"QWidgetItem",
"QWizard",
"QWizardPage",
],
"QtCore": [
"QAbstractAnimation",
"QAbstractEventDispatcher",
"QAbstractItemModel",
"QAbstractListModel",
"QAbstractState",
"QAbstractTableModel",
"QAbstractTransition",
"QAnimationGroup",
"QBasicTimer",
"QBitArray",
"QBuffer",
"QByteArray",
"QByteArrayMatcher",
"QChildEvent",
"QCoreApplication",
"QCryptographicHash",
"QDataStream",
"QDate",
"QDateTime",
"QDir",
"QDirIterator",
"QDynamicPropertyChangeEvent",
"QEasingCurve",
"QElapsedTimer",
"QEvent",
"QEventLoop",
"QEventTransition",
"QFile",
"QFileInfo",
"QFileSystemWatcher",
"QFinalState",
"QGenericArgument",
"QGenericReturnArgument",
"QHistoryState",
"QIODevice",
"QLibraryInfo",
"QLine",
"QLineF",
"QLocale",
"QMargins",
"QMetaClassInfo",
"QMetaEnum",
"QMetaMethod",
"QMetaObject",
"QMetaProperty",
"QMetaType",
"QMimeData",
"QModelIndex",
"QMutex",
"QMutexLocker",
"QObject",
"QParallelAnimationGroup",
"QPauseAnimation",
"QPersistentModelIndex",
"QPluginLoader",
"QPoint",
"QPointF",
"QProcess",
"QProcessEnvironment",
"QPropertyAnimation",
"QReadLocker",
"QReadWriteLock",
"QRect",
"QRectF",
"QRegExp",
"QResource",
"QRunnable",
"QSemaphore",
"QSequentialAnimationGroup",
"QSettings",
"QSignalMapper",
"QSignalTransition",
"QSize",
"QSizeF",
"QSocketNotifier",
"QState",
"QStateMachine",
"QSysInfo",
"QSystemSemaphore",
"QTemporaryFile",
"QTextBoundaryFinder",
"QTextCodec",
"QTextDecoder",
"QTextEncoder",
"QTextStream",
"QTextStreamManipulator",
"QThread",
"QThreadPool",
"QTime",
"QTimeLine",
"QTimer",
"QTimerEvent",
"QTranslator",
"QUrl",
"QVariantAnimation",
"QWaitCondition",
"QWriteLocker",
"QXmlStreamAttribute",
"QXmlStreamAttributes",
"QXmlStreamEntityDeclaration",
"QXmlStreamEntityResolver",
"QXmlStreamNamespaceDeclaration",
"QXmlStreamNotationDeclaration",
"QXmlStreamReader",
"QXmlStreamWriter",
"Qt",
"QtCriticalMsg",
"QtDebugMsg",
"QtFatalMsg",
"QtMsgType",
"QtSystemMsg",
"QtWarningMsg",
"qAbs",
"qAddPostRoutine",
"qChecksum",
"qCritical",
"qDebug",
"qFatal",
"qFuzzyCompare",
"qIsFinite",
"qIsInf",
"qIsNaN",
"qIsNull",
"qRegisterResourceData",
"qUnregisterResourceData",
"qVersion",
"qWarning",
"qrand",
"qsrand",
],
"QtXml": [
"QDomAttr",
"QDomCDATASection",
"QDomCharacterData",
"QDomComment",
"QDomDocument",
"QDomDocumentFragment",
"QDomDocumentType",
"QDomElement",
"QDomEntity",
"QDomEntityReference",
"QDomImplementation",
"QDomNamedNodeMap",
"QDomNode",
"QDomNodeList",
"QDomNotation",
"QDomProcessingInstruction",
"QDomText",
"QXmlAttributes",
"QXmlContentHandler",
"QXmlDTDHandler",
"QXmlDeclHandler",
"QXmlDefaultHandler",
"QXmlEntityResolver",
"QXmlErrorHandler",
"QXmlInputSource",
"QXmlLexicalHandler",
"QXmlLocator",
"QXmlNamespaceSupport",
"QXmlParseException",
"QXmlReader",
"QXmlSimpleReader"
],
"QtHelp": [
"QHelpContentItem",
"QHelpContentModel",
"QHelpContentWidget",
"QHelpEngine",
"QHelpEngineCore",
"QHelpIndexModel",
"QHelpIndexWidget",
"QHelpSearchEngine",
"QHelpSearchQuery",
"QHelpSearchQueryWidget",
"QHelpSearchResultWidget"
],
"QtNetwork": [
"QAbstractNetworkCache",
"QAbstractSocket",
"QAuthenticator",
"QHostAddress",
"QHostInfo",
"QLocalServer",
"QLocalSocket",
"QNetworkAccessManager",
"QNetworkAddressEntry",
"QNetworkCacheMetaData",
"QNetworkConfiguration",
"QNetworkConfigurationManager",
"QNetworkCookie",
"QNetworkCookieJar",
"QNetworkDiskCache",
"QNetworkInterface",
"QNetworkProxy",
"QNetworkProxyFactory",
"QNetworkProxyQuery",
"QNetworkReply",
"QNetworkRequest",
"QNetworkSession",
"QSsl",
"QTcpServer",
"QTcpSocket",
"QUdpSocket"
],
"QtOpenGL": [
"QGL",
"QGLContext",
"QGLFormat",
"QGLWidget"
]
}
"""Misplaced members
These members from the original submodule are misplaced relative PySide2
"""
_misplaced_members = {
"PySide2": {
"QtGui.QStringListModel": "QtCore.QStringListModel",
"QtCore.Property": "QtCore.Property",
"QtCore.Signal": "QtCore.Signal",
"QtCore.Slot": "QtCore.Slot",
"QtCore.QAbstractProxyModel": "QtCore.QAbstractProxyModel",
"QtCore.QSortFilterProxyModel": "QtCore.QSortFilterProxyModel",
"QtCore.QItemSelection": "QtCore.QItemSelection",
"QtCore.QItemSelectionModel": "QtCore.QItemSelectionModel",
},
"PyQt5": {
"QtCore.pyqtProperty": "QtCore.Property",
"QtCore.pyqtSignal": "QtCore.Signal",
"QtCore.pyqtSlot": "QtCore.Slot",
"QtCore.QAbstractProxyModel": "QtCore.QAbstractProxyModel",
"QtCore.QSortFilterProxyModel": "QtCore.QSortFilterProxyModel",
"QtCore.QStringListModel": "QtCore.QStringListModel",
"QtCore.QItemSelection": "QtCore.QItemSelection",
"QtCore.QItemSelectionModel": "QtCore.QItemSelectionModel",
},
"PySide": {
"QtGui.QAbstractProxyModel": "QtCore.QAbstractProxyModel",
"QtGui.QSortFilterProxyModel": "QtCore.QSortFilterProxyModel",
"QtGui.QStringListModel": "QtCore.QStringListModel",
"QtGui.QItemSelection": "QtCore.QItemSelection",
"QtGui.QItemSelectionModel": "QtCore.QItemSelectionModel",
"QtCore.Property": "QtCore.Property",
"QtCore.Signal": "QtCore.Signal",
"QtCore.Slot": "QtCore.Slot",
},
"PyQt4": {
"QtGui.QAbstractProxyModel": "QtCore.QAbstractProxyModel",
"QtGui.QSortFilterProxyModel": "QtCore.QSortFilterProxyModel",
"QtGui.QItemSelection": "QtCore.QItemSelection",
"QtGui.QStringListModel": "QtCore.QStringListModel",
"QtGui.QItemSelectionModel": "QtCore.QItemSelectionModel",
"QtCore.pyqtProperty": "QtCore.Property",
"QtCore.pyqtSignal": "QtCore.Signal",
"QtCore.pyqtSlot": "QtCore.Slot",
}
}
""" Compatibility Members
This dictionary is used to build Qt.QtCompat objects that provide a consistent
interface for obsolete members, and differences in binding return values.
{
"binding": {
"classname": {
"targetname": "binding_namespace",
}
}
}
"""
_compatibility_members = {
"PySide2": {
"QHeaderView": {
"sectionsClickable": "QtWidgets.QHeaderView.sectionsClickable",
"setSectionsClickable":
"QtWidgets.QHeaderView.setSectionsClickable",
"sectionResizeMode": "QtWidgets.QHeaderView.sectionResizeMode",
"setSectionResizeMode":
"QtWidgets.QHeaderView.setSectionResizeMode",
"sectionsMovable": "QtWidgets.QHeaderView.sectionsMovable",
"setSectionsMovable": "QtWidgets.QHeaderView.setSectionsMovable",
},
"QFileDialog": {
"getOpenFileName": "QtWidgets.QFileDialog.getOpenFileName",
"getOpenFileNames": "QtWidgets.QFileDialog.getOpenFileNames",
"getSaveFileName": "QtWidgets.QFileDialog.getSaveFileName",
},
},
"PyQt5": {
"QHeaderView": {
"sectionsClickable": "QtWidgets.QHeaderView.sectionsClickable",
"setSectionsClickable":
"QtWidgets.QHeaderView.setSectionsClickable",
"sectionResizeMode": "QtWidgets.QHeaderView.sectionResizeMode",
"setSectionResizeMode":
"QtWidgets.QHeaderView.setSectionResizeMode",
"sectionsMovable": "QtWidgets.QHeaderView.sectionsMovable",
"setSectionsMovable": "QtWidgets.QHeaderView.setSectionsMovable",
},
"QFileDialog": {
"getOpenFileName": "QtWidgets.QFileDialog.getOpenFileName",
"getOpenFileNames": "QtWidgets.QFileDialog.getOpenFileNames",
"getSaveFileName": "QtWidgets.QFileDialog.getSaveFileName",
},
},
"PySide": {
"QHeaderView": {
"sectionsClickable": "QtWidgets.QHeaderView.isClickable",
"setSectionsClickable": "QtWidgets.QHeaderView.setClickable",
"sectionResizeMode": "QtWidgets.QHeaderView.resizeMode",
"setSectionResizeMode": "QtWidgets.QHeaderView.setResizeMode",
"sectionsMovable": "QtWidgets.QHeaderView.isMovable",
"setSectionsMovable": "QtWidgets.QHeaderView.setMovable",
},
"QFileDialog": {
"getOpenFileName": "QtWidgets.QFileDialog.getOpenFileName",
"getOpenFileNames": "QtWidgets.QFileDialog.getOpenFileNames",
"getSaveFileName": "QtWidgets.QFileDialog.getSaveFileName",
},
},
"PyQt4": {
"QHeaderView": {
"sectionsClickable": "QtWidgets.QHeaderView.isClickable",
"setSectionsClickable": "QtWidgets.QHeaderView.setClickable",
"sectionResizeMode": "QtWidgets.QHeaderView.resizeMode",
"setSectionResizeMode": "QtWidgets.QHeaderView.setResizeMode",
"sectionsMovable": "QtWidgets.QHeaderView.isMovable",
"setSectionsMovable": "QtWidgets.QHeaderView.setMovable",
},
"QFileDialog": {
"getOpenFileName": "QtWidgets.QFileDialog.getOpenFileName",
"getOpenFileNames": "QtWidgets.QFileDialog.getOpenFileNames",
"getSaveFileName": "QtWidgets.QFileDialog.getSaveFileName",
},
},
}
def _apply_site_config():
try:
import QtSiteConfig
except ImportError:
# If no QtSiteConfig module found, no modifications
# to _common_members are needed.
pass
else:
# Provide the ability to modify the dicts used to build Qt.py
if hasattr(QtSiteConfig, 'update_members'):
QtSiteConfig.update_members(_common_members)
if hasattr(QtSiteConfig, 'update_misplaced_members'):
QtSiteConfig.update_misplaced_members(members=_misplaced_members)
if hasattr(QtSiteConfig, 'update_compatibility_members'):
QtSiteConfig.update_compatibility_members(
members=_compatibility_members)
def _new_module(name):
return types.ModuleType(__name__ + "." + name)
def _setup(module, extras):
"""Install common submodules"""
Qt.__binding__ = module.__name__
for name in list(_common_members) + extras:
try:
submodule = importlib.import_module(
module.__name__ + "." + name)
except ImportError:
continue
setattr(Qt, "_" + name, submodule)
if name not in extras:
# Store reference to original binding,
# but don't store speciality modules
# such as uic or QtUiTools
setattr(Qt, name, _new_module(name))
def _wrapinstance(func, ptr, base=None):
"""Enable implicit cast of pointer to most suitable class
This behaviour is available in sip per default.
Based on http://nathanhorne.com/pyqtpyside-wrap-instance
Usage:
This mechanism kicks in under these circumstances.
1. Qt.py is using PySide 1 or 2.
2. A `base` argument is not provided.
See :func:`QtCompat.wrapInstance()`
Arguments:
func (function): Original function
ptr (long): Pointer to QObject in memory
base (QObject, optional): Base class to wrap with. Defaults to QObject,
which should handle anything.
"""
assert isinstance(ptr, long), "Argument 'ptr' must be of type <long>"
assert (base is None) or issubclass(base, Qt.QtCore.QObject), (
"Argument 'base' must be of type <QObject>")
if base is None:
q_object = func(long(ptr), Qt.QtCore.QObject)
meta_object = q_object.metaObject()
class_name = meta_object.className()
super_class_name = meta_object.superClass().className()
if hasattr(Qt.QtWidgets, class_name):
base = getattr(Qt.QtWidgets, class_name)
elif hasattr(Qt.QtWidgets, super_class_name):
base = getattr(Qt.QtWidgets, super_class_name)
else:
base = Qt.QtCore.QObject
return func(long(ptr), base)
def _reassign_misplaced_members(binding):
"""Apply misplaced members from `binding` to Qt.py
Arguments:
binding (dict): Misplaced members
"""
for src, dst in _misplaced_members[binding].items():
src_module, src_member = src.split(".")
dst_module, dst_member = dst.split(".")
try:
src_object = getattr(Qt, dst_module)
except AttributeError:
# Skip reassignment of non-existing members.
# This can happen if a request was made to
# rename a member that didn't exist, for example
# if QtWidgets isn't available on the target platform.
continue
dst_value = getattr(getattr(Qt, "_" + src_module), src_member)
setattr(
src_object,
dst_member,
dst_value
)
def _build_compatibility_members(binding, decorators=None):
"""Apply `binding` to QtCompat
Arguments:
binding (str): Top level binding in _compatibility_members.
decorators (dict, optional): Provides the ability to decorate the
original Qt methods when needed by a binding. This can be used
to change the returned value to a standard value. The key should
be the classname, the value is a dict where the keys are the
target method names, and the values are the decorator functions.
"""
decorators = decorators or dict()
# Allow optional site-level customization of the compatibility members.
# This method does not need to be implemented in QtSiteConfig.
try:
import QtSiteConfig
except ImportError:
pass
else:
if hasattr(QtSiteConfig, 'update_compatibility_decorators'):
QtSiteConfig.update_compatibility_decorators(binding, decorators)
_QtCompat = type("QtCompat", (object,), {})
for classname, bindings in _compatibility_members[binding].items():
attrs = {}
for target, binding in bindings.items():
namespaces = binding.split('.')
try:
src_object = getattr(Qt, "_" + namespaces[0])
except AttributeError as e:
_log("QtCompat: AttributeError: %s" % e)
# Skip reassignment of non-existing members.
# This can happen if a request was made to
# rename a member that didn't exist, for example
# if QtWidgets isn't available on the target platform.
continue
# Walk down any remaining namespace getting the object assuming
# that if the first namespace exists the rest will exist.
for namespace in namespaces[1:]:
src_object = getattr(src_object, namespace)
# decorate the Qt method if a decorator was provided.
if target in decorators.get(classname, []):
# staticmethod must be called on the decorated method to
# prevent a TypeError being raised when the decorated method
# is called.
src_object = staticmethod(
decorators[classname][target](src_object))
attrs[target] = src_object
# Create the QtCompat class and install it into the namespace
compat_class = type(classname, (_QtCompat,), attrs)
setattr(Qt.QtCompat, classname, compat_class)
def _pyside2():
"""Initialise PySide2
These functions serve to test the existence of a binding
along with set it up in such a way that it aligns with
the final step; adding members from the original binding
to Qt.py
"""
import PySide2 as module
_setup(module, ["QtUiTools"])
Qt.__binding_version__ = module.__version__
try:
try:
# Before merge of PySide and shiboken
import shiboken2
except ImportError:
# After merge of PySide and shiboken, May 2017
from PySide2 import shiboken2
Qt.QtCompat.wrapInstance = (
lambda ptr, base=None: _wrapinstance(
shiboken2.wrapInstance, ptr, base)
)
Qt.QtCompat.getCppPointer = lambda object: \
shiboken2.getCppPointer(object)[0]
except ImportError:
pass # Optional
if hasattr(Qt, "_QtUiTools"):
Qt.QtCompat.loadUi = _loadUi
if hasattr(Qt, "_QtCore"):
Qt.__qt_version__ = Qt._QtCore.qVersion()
Qt.QtCompat.translate = Qt._QtCore.QCoreApplication.translate
if hasattr(Qt, "_QtWidgets"):
Qt.QtCompat.setSectionResizeMode = \
Qt._QtWidgets.QHeaderView.setSectionResizeMode
_reassign_misplaced_members("PySide2")
_build_compatibility_members("PySide2")
def _pyside():
"""Initialise PySide"""
import PySide as module
_setup(module, ["QtUiTools"])
Qt.__binding_version__ = module.__version__
try:
try:
# Before merge of PySide and shiboken
import shiboken
except ImportError:
# After merge of PySide and shiboken, May 2017
from PySide import shiboken
Qt.QtCompat.wrapInstance = (
lambda ptr, base=None: _wrapinstance(
shiboken.wrapInstance, ptr, base)
)
Qt.QtCompat.getCppPointer = lambda object: \
shiboken.getCppPointer(object)[0]
except ImportError:
pass # Optional
if hasattr(Qt, "_QtUiTools"):
Qt.QtCompat.loadUi = _loadUi
if hasattr(Qt, "_QtGui"):
setattr(Qt, "QtWidgets", _new_module("QtWidgets"))
setattr(Qt, "_QtWidgets", Qt._QtGui)
Qt.QtCompat.setSectionResizeMode = Qt._QtGui.QHeaderView.setResizeMode
if hasattr(Qt, "_QtCore"):
Qt.__qt_version__ = Qt._QtCore.qVersion()
QCoreApplication = Qt._QtCore.QCoreApplication
Qt.QtCompat.translate = (
lambda context, sourceText, disambiguation, n:
QCoreApplication.translate(
context,
sourceText,
disambiguation,
QCoreApplication.CodecForTr,
n
)
)
_reassign_misplaced_members("PySide")
_build_compatibility_members("PySide")
def _pyqt5():
"""Initialise PyQt5"""
import PyQt5 as module
_setup(module, ["uic"])
try:
import sip
Qt.QtCompat.wrapInstance = (
lambda ptr, base=None: _wrapinstance(
sip.wrapinstance, ptr, base)
)
Qt.QtCompat.getCppPointer = lambda object: \
sip.unwrapinstance(object)
except ImportError:
pass # Optional
if hasattr(Qt, "_uic"):
Qt.QtCompat.loadUi = _loadUi
if hasattr(Qt, "_QtCore"):
Qt.__binding_version__ = Qt._QtCore.PYQT_VERSION_STR
Qt.__qt_version__ = Qt._QtCore.QT_VERSION_STR
Qt.QtCompat.translate = Qt._QtCore.QCoreApplication.translate
if hasattr(Qt, "_QtWidgets"):
Qt.QtCompat.setSectionResizeMode = \
Qt._QtWidgets.QHeaderView.setSectionResizeMode
_reassign_misplaced_members("PyQt5")
_build_compatibility_members('PyQt5')
def _pyqt4():
"""Initialise PyQt4"""
print "Initializing PyQt4"
import sip
# Validation of envivornment variable. Prevents an error if
# the variable is invalid since it's just a hint.
try:
hint = int(QT_SIP_API_HINT)
except TypeError:
hint = None # Variable was None, i.e. not set.
except ValueError:
raise ImportError("QT_SIP_API_HINT=%s must be a 1 or 2")
for api in ("QString",
"QVariant",
"QDate",
"QDateTime",
"QTextStream",
"QTime",
"QUrl"):
try:
sip.setapi(api, hint or 2)
except AttributeError:
raise ImportError("PyQt4 < 4.6 isn't supported by Qt.py")
except ValueError:
actual = sip.getapi(api)
if not hint:
raise ImportError("API version already set to %d" % actual)
else:
# Having provided a hint indicates a soft constraint, one
# that doesn't throw an exception.
sys.stderr.write(
"Warning: API '%s' has already been set to %d.\n"
% (api, actual)
)
import PyQt4 as module
_setup(module, ["uic"])
try:
import sip
Qt.QtCompat.wrapInstance = (
lambda ptr, base=None: _wrapinstance(
sip.wrapinstance, ptr, base)
)
Qt.QtCompat.getCppPointer = lambda object: \
sip.unwrapinstance(object)
except ImportError:
pass # Optional
if hasattr(Qt, "_uic"):
Qt.QtCompat.loadUi = _loadUi
if hasattr(Qt, "_QtGui"):
setattr(Qt, "QtWidgets", _new_module("QtWidgets"))
setattr(Qt, "_QtWidgets", Qt._QtGui)
Qt.QtCompat.setSectionResizeMode = \
Qt._QtGui.QHeaderView.setResizeMode
if hasattr(Qt, "_QtCore"):
Qt.__binding_version__ = Qt._QtCore.PYQT_VERSION_STR
Qt.__qt_version__ = Qt._QtCore.QT_VERSION_STR
QCoreApplication = Qt._QtCore.QCoreApplication
Qt.QtCompat.translate = (
lambda context, sourceText, disambiguation, n:
QCoreApplication.translate(
context,
sourceText,
disambiguation,
QCoreApplication.CodecForTr,
n)
)
_reassign_misplaced_members("PyQt4")
# QFileDialog QtCompat decorator
def _standardizeQFileDialog(some_function):
"""Decorator that makes PyQt4 return conform to other bindings"""
def wrapper(*args, **kwargs):
ret = (some_function(*args, **kwargs))
# PyQt4 only returns the selected filename, force it to a
# standard return of the selected filename, and a empty string
# for the selected filter
return ret, ''
wrapper.__doc__ = some_function.__doc__
wrapper.__name__ = some_function.__name__
return wrapper
decorators = {
"QFileDialog": {
"getOpenFileName": _standardizeQFileDialog,
"getOpenFileNames": _standardizeQFileDialog,
"getSaveFileName": _standardizeQFileDialog,
}
}
_build_compatibility_members('PyQt4', decorators)
def _none():
"""Internal option (used in installer)"""
Mock = type("Mock", (), {"__getattr__": lambda Qt, attr: None})
Qt.__binding__ = "None"
Qt.__qt_version__ = "0.0.0"
Qt.__binding_version__ = "0.0.0"
Qt.QtCompat.loadUi = lambda uifile, baseinstance=None: None
Qt.QtCompat.setSectionResizeMode = lambda *args, **kwargs: None
for submodule in _common_members.keys():
setattr(Qt, submodule, Mock())
setattr(Qt, "_" + submodule, Mock())
def _log(text):
if QT_VERBOSE:
sys.stdout.write(text + "\n")
def _loadUi(uifile, baseinstance=None):
"""Dynamically load a user interface from the given `uifile`
This function calls `uic.loadUi` if using PyQt bindings,
else it implements a comparable binding for PySide.
Documentation:
http://pyqt.sourceforge.net/Docs/PyQt5/designer.html#PyQt5.uic.loadUi
Arguments:
uifile (str): Absolute path to Qt Designer file.
baseinstance (QWidget): Instantiated QWidget or subclass thereof
Return:
baseinstance if `baseinstance` is not `None`. Otherwise
return the newly created instance of the user interface.
"""
if hasattr(baseinstance, "layout") and baseinstance.layout():
message = ("QLayout: Attempting to add Layout to %s which "
"already has a layout")
raise RuntimeError(message % (baseinstance))
if hasattr(Qt, "_uic"):
return Qt._uic.loadUi(uifile, baseinstance)
elif hasattr(Qt, "_QtUiTools"):
# Implement `PyQt5.uic.loadUi` for PySide(2)
class _UiLoader(Qt._QtUiTools.QUiLoader):
"""Create the user interface in a base instance.
Unlike `Qt._QtUiTools.QUiLoader` itself this class does not
create a new instance of the top-level widget, but creates the user
interface in an existing instance of the top-level class if needed.
This mimics the behaviour of `PyQt5.uic.loadUi`.
"""
def __init__(self, baseinstance):
super(_UiLoader, self).__init__(baseinstance)
self.baseinstance = baseinstance
def load(self, uifile, *args, **kwargs):
from xml.etree.ElementTree import ElementTree
# For whatever reason, if this doesn't happen then
# reading an invalid or non-existing .ui file throws
# a RuntimeError.
etree = ElementTree()
etree.parse(uifile)
widget = Qt._QtUiTools.QUiLoader.load(
self, uifile, *args, **kwargs)
# Workaround for PySide 1.0.9, see issue #208
widget.parentWidget()
return widget
def createWidget(self, class_name, parent=None, name=""):
"""Called for each widget defined in ui file
Overridden here to populate `baseinstance` instead.
"""
if parent is None and self.baseinstance:
# Supposed to create the top-level widget,
# return the base instance instead
return self.baseinstance
# For some reason, Line is not in the list of available
# widgets, but works fine, so we have to special case it here.
if class_name in self.availableWidgets() + ["Line"]:
# Create a new widget for child widgets
widget = Qt._QtUiTools.QUiLoader.createWidget(self,
class_name,
parent,
name)
else:
raise Exception("Custom widget '%s' not supported"
% class_name)
if self.baseinstance:
# Set an attribute for the new child widget on the base
# instance, just like PyQt5.uic.loadUi does.
setattr(self.baseinstance, name, widget)
return widget
widget = _UiLoader(baseinstance).load(uifile)
Qt.QtCore.QMetaObject.connectSlotsByName(widget)
return widget
else:
raise NotImplementedError("No implementation available for loadUi")
def _convert(lines):
"""Convert compiled .ui file from PySide2 to Qt.py
Arguments:
lines (list): Each line of of .ui file
Usage:
>> with open("myui.py") as f:
.. lines = _convert(f.readlines())
"""
def parse(line):
line = line.replace("from PySide2 import", "from Qt import QtCompat,")
line = line.replace("QtWidgets.QApplication.translate",
"QtCompat.translate")
return line
parsed = list()
for line in lines:
line = parse(line)
parsed.append(line)
return parsed
def _cli(args):
"""Qt.py command-line interface"""
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--convert",
help="Path to compiled Python module, e.g. my_ui.py")
parser.add_argument("--compile",
help="Accept raw .ui file and compile with native "
"PySide2 compiler.")
parser.add_argument("--stdout",
help="Write to stdout instead of file",
action="store_true")
parser.add_argument("--stdin",
help="Read from stdin instead of file",
action="store_true")
args = parser.parse_args(args)
if args.stdout:
raise NotImplementedError("--stdout")
if args.stdin:
raise NotImplementedError("--stdin")
if args.compile:
raise NotImplementedError("--compile")
if args.convert:
sys.stdout.write("#\n"
"# WARNING: --convert is an ALPHA feature.\n#\n"
"# See https://github.com/mottosso/Qt.py/pull/132\n"
"# for details.\n"
"#\n")
#
# ------> Read
#
with open(args.convert) as f:
lines = _convert(f.readlines())
backup = "%s_backup%s" % os.path.splitext(args.convert)
sys.stdout.write("Creating \"%s\"..\n" % backup)
shutil.copy(args.convert, backup)
#
# <------ Write
#
with open(args.convert, "w") as f:
f.write("".join(lines))
sys.stdout.write("Successfully converted \"%s\"\n" % args.convert)
def _install():
# Default order (customise order and content via QT_PREFERRED_BINDING)
default_order = ("PySide2", "PyQt5", "PySide", "PyQt4")
preferred_order = list(
b for b in QT_PREFERRED_BINDING.split(os.pathsep) if b
)
order = preferred_order or default_order
available = {
"PySide2": _pyside2,
"PyQt5": _pyqt5,
"PySide": _pyside,
"PyQt4": _pyqt4,
"None": _none
}
_log("Order: '%s'" % "', '".join(order))
# Allow site-level customization of the available modules.
_apply_site_config()
found_binding = False
for name in order:
_log("Trying %s" % name)
try:
available[name]()
found_binding = True
break
except ImportError as e:
_log("ImportError: %s" % e)
except KeyError:
_log("ImportError: Preferred binding '%s' not found." % name)
if not found_binding:
# If not binding were found, throw this error
raise ImportError("No Qt binding were found.")
# Install individual members
for name, members in _common_members.items():
try:
their_submodule = getattr(Qt, "_%s" % name)
except AttributeError:
continue
our_submodule = getattr(Qt, name)
# Enable import *
__all__.append(name)
# Enable direct import of submodule,
# e.g. import Qt.QtCore
sys.modules[__name__ + "." + name] = our_submodule
for member in members:
# Accept that a submodule may miss certain members.
try:
their_member = getattr(their_submodule, member)
except AttributeError:
_log("'%s.%s' was missing." % (name, member))
continue
setattr(our_submodule, member, their_member)
# Backwards compatibility
if hasattr(Qt.QtCompat, 'loadUi'):
Qt.QtCompat.load_ui = Qt.QtCompat.loadUi
_install()
# Setup Binding Enum states
Qt.IsPySide2 = Qt.__binding__ == 'PySide2'
Qt.IsPyQt5 = Qt.__binding__ == 'PyQt5'
Qt.IsPySide = Qt.__binding__ == 'PySide'
Qt.IsPyQt4 = Qt.__binding__ == 'PyQt4'
"""Augment QtCompat
QtCompat contains wrappers and added functionality
to the original bindings, such as the CLI interface
and otherwise incompatible members between bindings,
such as `QHeaderView.setSectionResizeMode`.
"""
Qt.QtCompat._cli = _cli
Qt.QtCompat._convert = _convert
# Enable command-line interface
if __name__ == "__main__":
_cli(sys.argv[1:])
# The MIT License (MIT)
#
# Copyright (c) 2016-2017 Marcus Ottosson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# In PySide(2), loadUi does not exist, so we implement it
#
# `_UiLoader` is adapted from the qtpy project, which was further influenced
# by qt-helpers which was released under a 3-clause BSD license which in turn
# is based on a solution at:
#
# - https://gist.github.com/cpbotha/1b42a20c8f3eb9bb7cb8
#
# The License for this code is as follows:
#
# qt-helpers - a common front-end to various Qt modules
#
# Copyright (c) 2015, Chris Beaumont and Thomas Robitaille
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
# * Neither the name of the Glue project nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Which itself was based on the solution at
#
# https://gist.github.com/cpbotha/1b42a20c8f3eb9bb7cb8
#
# which was released under the MIT license:
#
# Copyright (c) 2011 Sebastian Wiesner <[email protected]>
# Modifications by Charl Botha <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files
# (the "Software"),to deal in the Software without restriction,
# including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
py | 1a5373411769041c40785b6aef8d7ac9c6336516 | """
Copyright (c) 2018-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from argparse import ArgumentParser
from collections import namedtuple
from ..topology_types import GenericTopology
from ..config import ConfigValidator, StringField, PathField, ConfigError
from ..dependency import ClassProvider
from ..utils import format_key, get_parameter_value_from_config
ConverterReturn = namedtuple('ConverterReturn', ['annotations', 'meta', 'content_check_errors'])
class BaseFormatConverter(ClassProvider):
__provider_type__ = 'converter'
topology_types = (GenericTopology, )
@classmethod
def parameters(cls):
return {
'converter': StringField(description="Converter name.")
}
@property
def config_validator(self):
return ConfigValidator(
'{}_converter_config'.format(self.get_name()), fields=self.parameters(),
on_extra_argument=ConfigValidator.ERROR_ON_EXTRA_ARGUMENT
)
def __init__(self, config=None):
self.config = config
if config:
self.validate_config()
self.configure()
def get_value_from_config(self, key):
return get_parameter_value_from_config(self.config, self.parameters(), key)
def convert(self, check_content=False, progress_callback=None, progress_interval=100, **kwargs):
"""
Converts specific annotation format to the ResultRepresentation specific for current dataset/task.
Arguments:
check_content: bool flag which enable dataset files (e. g. images, gt segmentation masks) existence checking
progress_callback: callback function for handling conversion progress status
Returns:
instance of ConverterReturn, where:
annotations is list of AnnotationRepresentations for current dataset
meta is dataset specific attributes e. g. label_map (can be None if dataset does not have specific info)
content_check_errors: list of error string messages for content check (can be None if check_content=False)
"""
raise NotImplementedError
@classmethod
def get_name(cls):
return cls.__provider__
def get_argparser(self):
parser = ArgumentParser(add_help=False)
config_validator = self.config_validator
fields = config_validator.fields
for field_name, field in fields.items():
if field_name == 'converter':
# it is base argument. Main argparser already use it to get argparser from specific converter.
# Converter argparser should contain only converter specific arguments.
continue
kwargs = {'required': not field.optional}
data_type = field.type
if data_type is not None:
kwargs['type'] = data_type
parser.add_argument(format_key(field_name), **kwargs)
return parser
def validate_config(self):
self.config_validator.validate(self.config)
def configure(self):
pass
class FileBasedAnnotationConverter(BaseFormatConverter):
@classmethod
def parameters(cls):
parameters = super().parameters()
parameters.update({
'annotation_file': PathField(description="Path to annotation file.")
})
return parameters
def configure(self):
self.annotation_file = self.get_value_from_config('annotation_file')
def convert(self, check_content=False, **kwargs):
pass
class DirectoryBasedAnnotationConverter(BaseFormatConverter):
@classmethod
def parameters(cls):
parameters = super().parameters()
parameters.update({
'data_dir': PathField(is_directory=True, description="Path to data directory.")
})
return parameters
def configure(self):
self.data_dir = self.get_value_from_config('data_dir')
def convert(self, check_content=False, **kwargs):
pass
def verify_label_map(label_map):
valid_label_map = {}
for class_id, class_name in label_map.items():
try:
int_class_id = int(class_id)
valid_label_map[int_class_id] = class_name
except ValueError:
raise ConfigError(
'class_id {} is invalid. `label_map` should have integer keys.'.format(class_id)
)
return valid_label_map
|
py | 1a5373a6f501dc71c96932f97a5829db233c1778 | # MIT License
#
# Copyright (c) 2020 Yu Zhang
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import torch
import torch.nn as nn
from torch.nn.modules.rnn import apply_permutation
from torch.nn.utils.rnn import PackedSequence, pack_padded_sequence, pad_packed_sequence
from hanlp.common.structure import ConfigTracker
from hanlp.layers.dropout import SharedDropout
class VariationalLSTM(nn.Module):
r"""
LSTM is an variant of the vanilla bidirectional LSTM adopted by Biaffine Parser
with the only difference of the dropout strategy.
It drops nodes in the LSTM layers (input and recurrent connections)
and applies the same dropout mask at every recurrent timesteps.
APIs are roughly the same as :class:`~torch.nn.LSTM` except that we only allows
:class:`~torch.nn.utils.rnn.PackedSequence` as input.
References:
- Timothy Dozat and Christopher D. Manning. 2017.
`Deep Biaffine Attention for Neural Dependency Parsing`_.
Args:
input_size (int):
The number of expected features in the input.
hidden_size (int):
The number of features in the hidden state `h`.
num_layers (int):
The number of recurrent layers. Default: 1.
bidirectional (bool):
If ``True``, becomes a bidirectional LSTM. Default: ``False``
dropout (float):
If non-zero, introduces a :class:`SharedDropout` layer on the outputs of each LSTM layer except the last layer.
Default: 0.
.. _Deep Biaffine Attention for Neural Dependency Parsing:
https://openreview.net/forum?id=Hk95PK9le
"""
def __init__(self, input_size, hidden_size, num_layers=1, bidirectional=False, dropout=0):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bidirectional = bidirectional
self.dropout = dropout
self.num_directions = 1 + self.bidirectional
self.f_cells = nn.ModuleList()
if bidirectional:
self.b_cells = nn.ModuleList()
for _ in range(self.num_layers):
self.f_cells.append(nn.LSTMCell(input_size=input_size, hidden_size=hidden_size))
if bidirectional:
self.b_cells.append(nn.LSTMCell(input_size=input_size, hidden_size=hidden_size))
input_size = hidden_size * self.num_directions
self.reset_parameters()
def __repr__(self):
s = f"{self.input_size}, {self.hidden_size}"
if self.num_layers > 1:
s += f", num_layers={self.num_layers}"
if self.bidirectional:
s += f", bidirectional={self.bidirectional}"
if self.dropout > 0:
s += f", dropout={self.dropout}"
return f"{self.__class__.__name__}({s})"
def reset_parameters(self):
for param in self.parameters():
# apply orthogonal_ to weight
if len(param.shape) > 1:
nn.init.orthogonal_(param)
# apply zeros_ to bias
else:
nn.init.zeros_(param)
def permute_hidden(self, hx, permutation):
if permutation is None:
return hx
h = apply_permutation(hx[0], permutation)
c = apply_permutation(hx[1], permutation)
return h, c
def layer_forward(self, x, hx, cell, batch_sizes, reverse=False):
hx_0 = hx_i = hx
hx_n, output = [], []
steps = reversed(range(len(x))) if reverse else range(len(x))
if self.training:
hid_mask = SharedDropout.get_mask(hx_0[0], self.dropout)
for t in steps:
last_batch_size, batch_size = len(hx_i[0]), batch_sizes[t]
if last_batch_size < batch_size:
hx_i = [torch.cat((h, ih[last_batch_size:batch_size])) for h, ih in zip(hx_i, hx_0)]
else:
hx_n.append([h[batch_size:] for h in hx_i])
hx_i = [h[:batch_size] for h in hx_i]
hx_i = [h for h in cell(x[t], hx_i)]
output.append(hx_i[0])
if self.training:
hx_i[0] = hx_i[0] * hid_mask[:batch_size]
if reverse:
hx_n = hx_i
output.reverse()
else:
hx_n.append(hx_i)
hx_n = [torch.cat(h) for h in zip(*reversed(hx_n))]
output = torch.cat(output)
return output, hx_n
def forward(self, sequence, hx=None):
r"""
Args:
sequence (~torch.nn.utils.rnn.PackedSequence):
A packed variable length sequence.
hx (~torch.Tensor, ~torch.Tensor):
A tuple composed of two tensors `h` and `c`.
`h` of shape ``[num_layers*num_directions, batch_size, hidden_size]`` holds the initial hidden state
for each element in the batch.
`c` of shape ``[num_layers*num_directions, batch_size, hidden_size]`` holds the initial cell state
for each element in the batch.
If `hx` is not provided, both `h` and `c` default to zero.
Default: ``None``.
Returns:
~torch.nn.utils.rnn.PackedSequence, (~torch.Tensor, ~torch.Tensor):
The first is a packed variable length sequence.
The second is a tuple of tensors `h` and `c`.
`h` of shape ``[num_layers*num_directions, batch_size, hidden_size]`` holds the hidden state for `t=seq_len`.
Like output, the layers can be separated using ``h.view(num_layers, num_directions, batch_size, hidden_size)``
and similarly for c.
`c` of shape ``[num_layers*num_directions, batch_size, hidden_size]`` holds the cell state for `t=seq_len`.
"""
x, batch_sizes = sequence.data, sequence.batch_sizes.tolist()
batch_size = batch_sizes[0]
h_n, c_n = [], []
if hx is None:
ih = x.new_zeros(self.num_layers * self.num_directions, batch_size, self.hidden_size)
h, c = ih, ih
else:
h, c = self.permute_hidden(hx, sequence.sorted_indices)
h = h.view(self.num_layers, self.num_directions, batch_size, self.hidden_size)
c = c.view(self.num_layers, self.num_directions, batch_size, self.hidden_size)
for i in range(self.num_layers):
x = torch.split(x, batch_sizes)
if self.training:
mask = SharedDropout.get_mask(x[0], self.dropout)
x = [i * mask[:len(i)] for i in x]
x_i, (h_i, c_i) = self.layer_forward(x=x,
hx=(h[i, 0], c[i, 0]),
cell=self.f_cells[i],
batch_sizes=batch_sizes)
if self.bidirectional:
x_b, (h_b, c_b) = self.layer_forward(x=x,
hx=(h[i, 1], c[i, 1]),
cell=self.b_cells[i],
batch_sizes=batch_sizes,
reverse=True)
x_i = torch.cat((x_i, x_b), -1)
h_i = torch.stack((h_i, h_b))
c_i = torch.stack((c_i, c_b))
x = x_i
h_n.append(h_i)
c_n.append(h_i)
x = PackedSequence(x,
sequence.batch_sizes,
sequence.sorted_indices,
sequence.unsorted_indices)
hx = torch.cat(h_n, 0), torch.cat(c_n, 0)
hx = self.permute_hidden(hx, sequence.unsorted_indices)
return x, hx
class VariationalLSTMEncoder(VariationalLSTM, ConfigTracker):
def __init__(self,
input_size,
hidden_size,
num_layers=1,
bidirectional=False,
variational_dropout=0,
word_dropout=0,
):
super().__init__(input_size, hidden_size, num_layers, bidirectional, variational_dropout)
ConfigTracker.__init__(self, locals())
self.lstm_dropout = SharedDropout(p=word_dropout)
# noinspection PyMethodOverriding
def forward(self, embed, mask):
batch_size, seq_len = mask.shape
x = pack_padded_sequence(embed, mask.sum(1), True, False)
x, _ = super().forward(x)
x, _ = pad_packed_sequence(x, True, total_length=seq_len)
x = self.lstm_dropout(x)
return x
def get_output_dim(self):
return self.hidden_size * self.num_directions
|
py | 1a53741efe8e21a0f8bb90df425500b306b8bfc8 | from typing import (
Tuple,
)
import pytest
from helpers import (
DummyRequestPair,
DummyResponse,
Tracker,
)
from lahja import (
BroadcastConfig,
Endpoint,
)
@pytest.mark.asyncio
async def test_broadcasts_to_all_endpoints(
triplet_of_endpoints: Tuple[Endpoint, Endpoint, Endpoint]) -> None:
endpoint1, endpoint2, endpoint3 = triplet_of_endpoints
tracker = Tracker()
endpoint1.subscribe(
DummyRequestPair,
tracker.track_and_broadcast_dummy(1, endpoint1)
)
endpoint2.subscribe(
DummyRequestPair,
tracker.track_and_broadcast_dummy(2, endpoint2)
)
item = DummyRequestPair()
response = await endpoint3.request(item)
print(response.property_of_dummy_response)
assert isinstance(response, DummyResponse)
assert tracker.exists(1)
assert tracker.exists(2)
# Ensure the registration was cleaned up
assert item._id not in endpoint3._futures
@pytest.mark.asyncio
async def test_broadcasts_to_specific_endpoint(
triplet_of_endpoints: Tuple[Endpoint, Endpoint, Endpoint]) -> None:
endpoint1, endpoint2, endpoint3 = triplet_of_endpoints
tracker = Tracker()
endpoint1.subscribe(
DummyRequestPair,
tracker.track_and_broadcast_dummy(1, endpoint1)
)
endpoint2.subscribe(
DummyRequestPair,
tracker.track_and_broadcast_dummy(2, endpoint1)
)
item = DummyRequestPair()
response = await endpoint3.request(item, BroadcastConfig(filter_endpoint=endpoint1.name))
print(response.property_of_dummy_response)
assert isinstance(response, DummyResponse)
assert tracker.exists(1)
assert not tracker.exists(2)
# Ensure the registration was cleaned up
assert item._id not in endpoint3._futures
|
py | 1a53765b268393f77260b184521de20183b64c7a | """Builds the Unlimited Hand - sensor values network. (made from MNIST)
Implements the inference/loss/training pattern for model building.
1. inference() - Builds the model as far as is required for running the network
forward to make predictions.
2. loss() - Adds to the inference model the layers required to generate loss.
3. training() - Adds to the loss model the Ops required to generate and
apply gradients.
This file is used by the various "fully_connected_*.py" files and not meant to
be run.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
import math
import sys
import tensorflow as tf
import numpy as np
def inference(sensor_values, layer_units_array):
"""Build the Unlimited Hand - sensor values model up to where it may be used for inference.
Args:
sensor_values: sensor values placeholder, from inputs().
layer_units_array: layer units count array like hidden1, hidden2 and output layer units count array
Returns:
softmax_linear: Output tensor with the computed logits.
"""
out_layer_units_count = layer_units_array[len(layer_units_array) - 1]
values = sensor_values
logits = None
for layer_index in xrange(len(layer_units_array) - 1):
name = None
if layer_index != (len(layer_units_array) - 2):
name = 'hidden' + str(layer_index + 1)
else:
name = 'softmax_linear'
with tf.name_scope(name):
# weights = tf.Variable(
# tf.truncated_normal([layer_units_array[layer_index], layer_units_array[layer_index + 1]],
# stddev=1.0 / math.sqrt(float(layer_units_array[layer_index]))),
# name='weights')
weights = tf.Variable(
tf.truncated_normal([layer_units_array[layer_index], layer_units_array[layer_index + 1]],
stddev=np.sqrt(2 / np.prod(values.get_shape().as_list()[1:]))),
name='weights')
biases = tf.Variable(tf.zeros([layer_units_array[layer_index + 1]]), name='biases')
if layer_index != (len(layer_units_array) - 2):
values = tf.nn.relu(tf.matmul(values, weights) + biases)
else:
logits = tf.matmul(values, weights) + biases
return logits
def loss(logits, labels):
"""Calculates the loss from the logits and the labels.
Args:
logits: Logits tensor, float - [batch_size, class_count].
labels: Labels tensor, int32 - [batch_size].
Returns:
loss: Loss tensor of type float.
"""
labels = tf.to_int64(labels)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name='xentropy')
return tf.reduce_mean(cross_entropy, name='xentropy_mean')
def training(optimizer_full_class, loss, learning_rate):
"""Sets up the training Ops.
Creates a summarizer to track the loss over time in TensorBoard.
Creates an optimizer and applies the gradients to all trainable variables.
The Op returned by this function is what must be passed to the
`sess.run()` call to cause the model to train.
Args:
loss: Loss tensor, from loss().
learning_rate: The learning rate to use for gradient descent.
Returns:
train_op: The Op for training.
"""
# Add a scalar summary for the snapshot loss.
tf.summary.scalar('loss', loss)
# Create the optimizer with the given learning rate.
optimizer = eval(optimizer_full_class + '(learning_rate)')
# Create a variable to track the global step.
global_step = tf.Variable(0, name='global_step', trainable=False)
# Use the optimizer to apply the gradients that minimize the loss
# (and also increment the global step counter) as a single training step.
train_op = optimizer.minimize(loss, global_step=global_step)
return train_op
def evaluation(logits, labels):
"""Evaluate the quality of the logits at predicting the label.
Args:
logits: Logits tensor, float - [batch_size, class_count].
labels: Labels tensor, int32 - [batch_size], with values in the range [0, class_count).
Returns:
A scalar int32 tensor with the number of examples (out of batch_size)
that were predicted correctly.
"""
# For a classifier model, we can use the in_top_k Op.
# It returns a bool tensor with shape [batch_size] that is true for
# the examples where the label is in the top k (here k=1)
# of all logits for that example.
correct = tf.nn.in_top_k(logits, labels, 1)
# Return the number of true entries.
return tf.reduce_sum(tf.cast(correct, tf.int32), name="eval_correct")
|
py | 1a5376bdb4659e24dd52a8a01083205602a672c9 | """
Given a string, calculate its length
"""
# Iterative approach
def iterative_length(string):
length = 0
for i in string:
length += 1
return length
print(iterative_length('hello worLd'))
# Recursive approach
def recursive_length(string):
if string == "":
return 0
return 1 + recursive_length(string[1:])
print(recursive_length('hello Worlds'))
|
py | 1a5376d8e9df2320a17297a9ca06b76502d15ce0 | import os
from utils.face_proc import FaceProc
import argparse
import pickle
from forensic_test import exam_img, exam_video
def main(args):
all_paths = os.listdir(args.input_dir)
proba_list = []
# initiate face process class, used to detect face and extract landmarks
face_inst = FaceProc()
# initialize SVM classifier for face forensics
with open(args.classifier_path, 'rb') as f:
model = pickle.load(f)
classifier = model[0]
scaler = model[1]
for f_name in all_paths:
f_path = os.path.join(args.input_dir, f_name)
print('_'*20)
print('Testing: ' + f_name)
suffix = f_path.split('.')[-1]
if suffix.lower() in ['jpg', 'png', 'jpeg', 'bmp']:
proba, optout = exam_img(args, f_path, face_inst, classifier, scaler)
elif suffix.lower() in ['mp4', 'avi', 'mov', 'mts']:
proba, optout = exam_video(args, f_path, face_inst, classifier, scaler)
print('fake_proba: {}, optout: {}'.format(str(proba), optout))
tmp_dict = dict()
tmp_dict['file_name'] = f_name
tmp_dict['probability'] = proba
tmp_dict['optout'] = optout
proba_list.append(tmp_dict)
pickle.dump(proba_list, open(args.save_file, 'wb'))
print(proba_list)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="headpose forensics")
parser.add_argument('--input_dir', type=str, default='debug_data')
parser.add_argument('--markID_c', type=str, default='18-36,49,55', help='landmark ids to estimate CENTRAL face region')
parser.add_argument('--markID_a', type=str, default='1-36,49,55', help='landmark ids to estimate WHOLE face region')
parser.add_argument('--classifier_path', type=str, default='svm_model.p')
parser.add_argument('--save_file', type=str, default='proba_list.p')
args = parser.parse_args()
main(args) |
py | 1a5376f3582b5bf041f840437bb24de7a9d58f8d | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_nsister_rancor_grovo.iff"
result.attribute_template_id = 9
result.stfName("monster_name","grovo")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
py | 1a5376fe6ff44b4b0780e9b05e0b7d04b5e38153 | # Cave factory produces a cave-like structure with no disconnected
# rooms. Caves typically have a smooth, twisty appearance with lots of
# alcoves. This is based largely on the cellular automata examples at:
#
# http://roguebasin.roguelikedevelopment.org
#
# It also borrows code for joining disconnected cells from Dana Larose's
# example:
# http://pixelenvy.ca/wa/ca_cave.html
#
# I've tweaked the CA generations a bit to smooth out the cell joins, and added
# support for building connecting edges. I use this to build connected tiles of
# caves and hallways joining to other parts of the dungeon.
import sys
from random import randrange, random, choice
from disjoint_set import DisjointSet
FLOOR = 1
WALL = 2
TUNNEL = 3
class new:
def __init__(self, length, width, walls=0.40):
self.__length = length
self.__width = width
self.__exits = []
self.__map = []
self.__buf_map = []
self.__gen_initial_map(walls)
self.__ds = DisjointSet()
self.__cpt = (int(self.__length/2), int(self.__width/2))
def resize_map(self, new_length, new_width, center=True):
new_map = [[WALL for i in xrange(new_width)]
for j in xrange(new_length)]
ox = int(new_width/2.0-self.__width/2.0+0.5)
oy = int(new_length/2.0-self.__length/2.0+0.5)
for i in xrange(self.__width):
for j in xrange(self.__length):
x2 = ox + i
y2 = oy + j
if (
x2 >= 0 and
y2 >= 0 and
x2 < new_width and
y2 < new_width
):
new_map[x2][y2] = self.__map[i][j]
self.__map = new_map
self.__length = new_length
self.__width = new_width
self.__exits = []
self.__cpt = (int(self.__length/2), int(self.__width/2))
def print_map(self):
for c in xrange(0, self.__width):
for r in xrange(0, self.__length):
if self.__map[r][c] == WALL:
sys.stdout.write('#')
elif self.__map[r][c] == TUNNEL:
sys.stdout.write('+')
else:
sys.stdout.write(' ')
print
print
def iterate_walls(self):
for c in xrange(0, self.__width):
for r in xrange(0, self.__length):
if self.__map[r][c] == WALL:
if (self.__adj_flr_count(r, c) > 0):
yield (c, r)
def iterate_map(self, cell_type):
for c in xrange(0, self.__width):
for r in xrange(0, self.__length):
if self.__map[r][c] == cell_type:
yield (c, r)
def add_exit(self, pt1, pt2):
while (pt1 != pt2):
if (
pt1[0] < 0 or
pt1[0] >= self.__width or
pt1[1] < 0 or
pt1[1] >= self.__length
):
sys.exit('WARN: Exit out of range', pt1)
else:
self.__exits.append(pt1)
pt1 = (pt1[0] + cmp(pt2[0], pt1[0]),
pt1[1] + cmp(pt2[1], pt1[1]))
def purge_exits(self):
self.__exits = []
for c in xrange(0, self.__width):
for r in xrange(0, self.__length):
if (
c == 0 or c == self.__width-1 or
r == 0 or r == self.__length-1
):
self.__map[r][c] == WALL
def grow_map(self):
self.__generation(1, 2, -1)
def reduce_map(self):
self.__generation(1, 7, -1)
def gen_map(self, mode='default'):
if mode == 'room':
# One large cavern room
self.__generation(4, 5, -1)
self.__join_rooms()
self.__generation(1, 5, -1)
else:
# Windey passages.
#Repeat 4: W?(p) = R1(p) ? 5 || R2(p) ? 2
#Repeat 3: W?(p) = R1(p) ? 5
# We do the above, with a cave join pass right before the final
# iteration. This helps smooth out any sharp edges after the join
# pass.
self.__generation(4, 5, 2)
self.__generation(2, 5, -1)
self.__join_rooms()
self.__generation(1, 5, -1)
def __generation(self, count, r1_cutoff, r2_cutoff):
while (count > 0):
self.__buf_map = [[WALL for i in xrange(self.__width)]
for j in xrange(self.__length)]
self.__gen_walls(self.__buf_map)
self.__gen_walls(self.__map)
for r in xrange(1, self.__length-1):
for c in xrange(1, self.__width-1):
adjcount_r1 = self.__adj_wall_count(r, c, 1)
adjcount_r2 = self.__adj_wall_count(r, c, 2)
if(adjcount_r1 >= r1_cutoff or
adjcount_r2 <= r2_cutoff):
self.__buf_map[r][c] = WALL
else:
self.__buf_map[r][c] = FLOOR
self.__map = list(self.__buf_map)
count -= 1
def __gen_initial_map(self, fillprob):
def rwall(fillprob):
if (random() < fillprob):
return WALL
return FLOOR
self.__map = [[rwall(fillprob) for i in xrange(self.__width)]
for j in xrange(self.__length)]
self.__gen_walls(self.__map)
def __gen_walls(self, a_map):
for j in range(0, self.__length):
a_map[j][0] = WALL
a_map[j][self.__width-1] = WALL
for j in range(0, self.__width):
a_map[0][j] = WALL
a_map[self.__length-1][j] = WALL
# Force the exits to be floor. We grow them out from the edge a bit to
# make sure they don't get sealed off.
for pos in self.__exits:
a_map[pos[0]][pos[1]] = FLOOR
for pos2 in ((-1, 0), (1, 0), (0, -1), (0, 1),
(-2, 0), (2, 0), (0, -2), (0, 2)):
p = (pos[0]+pos2[0], pos[1]+pos2[1])
if (p[0] < 1 or p[1] < 1):
continue
if (
p[0] >= self.__width-1 or
p[1] >= self.__length-1
):
continue
a_map[p[0]][p[1]] = FLOOR
def __adj_flr_count(self, sr, sc):
count = 0
for pos in ((-1, 0), (1, 0), (0, -1), (0, 1)):
p = (sr+pos[0], sc+pos[1])
if (p[0] < 0 or p[1] < 0):
continue
if (
p[0] > self.__width-1 or
p[1] > self.__length-1
):
continue
if (self.__map[p[0]][p[1]] == FLOOR):
count += 1
return count
def __adj_wall_count(self, sr, sc, rng=1):
count = 0
for r in xrange(-rng, rng+1):
for c in xrange(-rng, rng+1):
#if (r == 0 and c == 0):
# continue
if (abs(r) == 2 and abs(c) == 2):
continue
if (sr + r < 0 or sc + c < 0):
continue
if (sr + r >= self.__length or sc + c >= self.__width):
continue
if self.__map[sr + r][sc + c] == WALL:
count += 1
return count
def __join_rooms(self):
# Divide all cells into joined sets
for r in xrange(0, self.__length):
for c in xrange(0, self.__width):
if self.__map[r][c] != WALL:
self.__union_adj_sqr(r, c)
all_caves = self.__ds.split_sets()
while len(all_caves) > 1:
self.__join_points(all_caves[choice(all_caves.keys())][0])
all_caves = self.__ds.split_sets()
def __union_adj_sqr(self, sr, sc):
loc = (sr, sc)
root1 = self.__ds.find(loc)
# A cell is connected to other cells only in cardinal directions.
# (diagonals don't count for movement).
for pos in ((-1, 0), (1, 0), (0, -1), (0, 1)):
if (sr+pos[0] < 0 or sc+pos[1] < 0):
continue
if (
sr+pos[0] >= self.__length or
sc+pos[1] >= self.__width
):
continue
nloc = (sr+pos[0], sc+pos[1])
if self.__map[nloc[0]][nloc[1]] == FLOOR:
root2 = self.__ds.find(nloc)
if root1 != root2:
self.__ds.union(root1, root2)
def __join_points(self, pt1):
next_pt = pt1
while 1:
dir = self.__get_tunnel_dir(pt1, self.__cpt)
move = randrange(0, 3)
if move == 0:
next_pt = (pt1[0] + dir[0], pt1[1])
elif move == 1:
next_pt = (pt1[0], pt1[1] + dir[1])
else:
next_pt = (pt1[0] + dir[0], pt1[1] + dir[1])
root1 = self.__ds.find(next_pt)
root2 = self.__ds.find(pt1)
if root1 != root2:
self.__ds.union(root1, root2)
for pos in ((0, 0), (-1, 0), (1, 0), (0, -1), (0, 1)):
if (
next_pt[0]+pos[0] < 0 or next_pt[1]+pos[1] < 0 or
next_pt[0]+pos[0] >= self.__length or
next_pt[1]+pos[1] >= self.__width
):
continue
if (self.__map[next_pt[0]+pos[0]][next_pt[1]+pos[1]] == WALL):
self.__map[next_pt[0]+pos[0]][next_pt[1]+pos[1]] = TUNNEL
if self.__stop_drawing(pt1, next_pt, self.__cpt):
return
pt1 = next_pt
def __stop_drawing(self, pt, npt, cpt):
if self.__ds.find(npt) == self.__ds.find(cpt):
return 1
if (
self.__ds.find(pt) != self.__ds.find(npt) and
self.__map[npt[0]][npt[1]] != WALL
):
return 1
return 0
def __get_tunnel_dir(self, pt1, pt2):
if pt1[0] < pt2[0]:
h_dir = +1
elif pt1[0] > pt2[0]:
h_dir = -1
else:
h_dir = 0
if pt1[1] < pt2[1]:
v_dir = +1
elif pt1[1] > pt2[1]:
v_dir = -1
else:
v_dir = 0
return (h_dir, v_dir)
|
py | 1a537836400e3eecc8d314b4f29638ecdbaf9af9 | #!/usr/bin/env python
# coding=utf-8
# Copyright 2020 The HuggingFace Team All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for question answering.
"""
# You can also adapt this script on your own question answering task. Pointers for this are left as comments.
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import load_dataset, load_metric
import transformers
from trainer_qa import QuestionAnsweringTrainer
from transformers import (
AutoConfig,
AutoModelForQuestionAnswering,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizerFast,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
from transformers.utils import check_min_version
from utils_qa import postprocess_qa_predictions
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.6.0")
logger = logging.getLogger(__name__)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Path to directory to store the pretrained models downloaded from huggingface.co"},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
test_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input test data file to evaluate the perplexity on (a text file)."},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
max_seq_length: int = field(
default=384,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
pad_to_max_length: bool = field(
default=True,
metadata={
"help": "Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch (which can "
"be faster on GPU but will be slower on TPU)."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
max_predict_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
},
)
version_2_with_negative: bool = field(
default=False, metadata={"help": "If true, some of the examples do not have an answer."}
)
null_score_diff_threshold: float = field(
default=0.0,
metadata={
"help": "The threshold used to select the null answer: if the best answer has a score that is less than "
"the score of the null answer minus this threshold, the null answer is selected for this example. "
"Only useful when `version_2_with_negative=True`."
},
)
doc_stride: int = field(
default=128,
metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."},
)
n_best_size: int = field(
default=20,
metadata={"help": "The total number of n-best predictions to generate when looking for an answer."},
)
max_answer_length: int = field(
default=30,
metadata={
"help": "The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
},
)
def __post_init__(self):
if (
self.dataset_name is None
and self.train_file is None
and self.validation_file is None
and self.test_file is None
):
raise ValueError("Need either a dataset name or a training/validation file/test_file.")
else:
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
if self.test_file is not None:
extension = self.test_file.split(".")[-1]
assert extension in ["csv", "json"], "`test_file` should be a csv or a json file."
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info(f"Training/evaluation parameters {training_args}")
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir)
else:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
extension = data_args.train_file.split(".")[-1]
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.validation_file.split(".")[-1]
if data_args.test_file is not None:
data_files["test"] = data_args.test_file
extension = data_args.test_file.split(".")[-1]
datasets = load_dataset(extension, data_files=data_files, field="data", cache_dir=model_args.cache_dir)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=True,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
model = AutoModelForQuestionAnswering.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# Tokenizer check: this script requires a fast tokenizer.
if not isinstance(tokenizer, PreTrainedTokenizerFast):
raise ValueError(
"This example script only works for models that have a fast tokenizer. Checkout the big table of models "
"at https://huggingface.co/transformers/index.html#bigtable to find the model types that meet this "
"requirement"
)
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
if training_args.do_train:
column_names = datasets["train"].column_names
elif training_args.do_eval:
column_names = datasets["validation"].column_names
else:
column_names = datasets["test"].column_names
question_column_name = "question" if "question" in column_names else column_names[0]
context_column_name = "context" if "context" in column_names else column_names[1]
answer_column_name = "answers" if "answers" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
pad_on_right = tokenizer.padding_side == "right"
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
# Training preprocessing
def prepare_train_features(examples):
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
tokenized_examples = tokenizer(
examples[question_column_name if pad_on_right else context_column_name],
examples[context_column_name if pad_on_right else question_column_name],
truncation="only_second" if pad_on_right else "only_first",
max_length=max_seq_length,
stride=data_args.doc_stride,
return_overflowing_tokens=True,
return_offsets_mapping=True,
padding="max_length" if data_args.pad_to_max_length else False,
)
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping")
# The offset mappings will give us a map from token to character position in the original context. This will
# help us compute the start_positions and end_positions.
offset_mapping = tokenized_examples.pop("offset_mapping")
# Let's label those examples!
tokenized_examples["start_positions"] = []
tokenized_examples["end_positions"] = []
for i, offsets in enumerate(offset_mapping):
# We will label impossible answers with the index of the CLS token.
input_ids = tokenized_examples["input_ids"][i]
cls_index = input_ids.index(tokenizer.cls_token_id)
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
sequence_ids = tokenized_examples.sequence_ids(i)
# One example can give several spans, this is the index of the example containing this span of text.
sample_index = sample_mapping[i]
answers = examples[answer_column_name][sample_index]
# If no answers are given, set the cls_index as answer.
if len(answers["answer_start"]) == 0:
tokenized_examples["start_positions"].append(cls_index)
tokenized_examples["end_positions"].append(cls_index)
else:
# Start/end character index of the answer in the text.
start_char = answers["answer_start"][0]
end_char = start_char + len(answers["text"][0])
# Start token index of the current span in the text.
token_start_index = 0
while sequence_ids[token_start_index] != (1 if pad_on_right else 0):
token_start_index += 1
# End token index of the current span in the text.
token_end_index = len(input_ids) - 1
while sequence_ids[token_end_index] != (1 if pad_on_right else 0):
token_end_index -= 1
# Detect if the answer is out of the span (in which case this feature is labeled with the CLS index).
if not (offsets[token_start_index][0] <= start_char and offsets[token_end_index][1] >= end_char):
tokenized_examples["start_positions"].append(cls_index)
tokenized_examples["end_positions"].append(cls_index)
else:
# Otherwise move the token_start_index and token_end_index to the two ends of the answer.
# Note: we could go after the last offset if the answer is the last word (edge case).
while token_start_index < len(offsets) and offsets[token_start_index][0] <= start_char:
token_start_index += 1
tokenized_examples["start_positions"].append(token_start_index - 1)
while offsets[token_end_index][1] >= end_char:
token_end_index -= 1
tokenized_examples["end_positions"].append(token_end_index + 1)
return tokenized_examples
if training_args.do_train:
if "train" not in datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = datasets["train"]
if data_args.max_train_samples is not None:
# We will select sample from whole data if agument is specified
train_dataset = train_dataset.select(range(data_args.max_train_samples))
# Create train feature from dataset
train_dataset = train_dataset.map(
prepare_train_features,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
)
if data_args.max_train_samples is not None:
# Number of samples might increase during Feature Creation, We select only specified max samples
train_dataset = train_dataset.select(range(data_args.max_train_samples))
# Validation preprocessing
def prepare_validation_features(examples):
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
tokenized_examples = tokenizer(
examples[question_column_name if pad_on_right else context_column_name],
examples[context_column_name if pad_on_right else question_column_name],
truncation="only_second" if pad_on_right else "only_first",
max_length=max_seq_length,
stride=data_args.doc_stride,
return_overflowing_tokens=True,
return_offsets_mapping=True,
padding="max_length" if data_args.pad_to_max_length else False,
)
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping")
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
tokenized_examples["example_id"] = []
for i in range(len(tokenized_examples["input_ids"])):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
sequence_ids = tokenized_examples.sequence_ids(i)
context_index = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
sample_index = sample_mapping[i]
tokenized_examples["example_id"].append(examples["id"][sample_index])
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
tokenized_examples["offset_mapping"][i] = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["offset_mapping"][i])
]
return tokenized_examples
if training_args.do_eval:
if "validation" not in datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_examples = datasets["validation"]
if data_args.max_eval_samples is not None:
# We will select sample from whole data
eval_examples = eval_examples.select(range(data_args.max_eval_samples))
# Validation Feature Creation
eval_dataset = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
)
if data_args.max_eval_samples is not None:
# During Feature creation dataset samples might increase, we will select required samples again
eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
if training_args.do_predict:
if "test" not in datasets:
raise ValueError("--do_predict requires a test dataset")
predict_examples = datasets["test"]
if data_args.max_predict_samples is not None:
# We will select sample from whole data
predict_examples = predict_examples.select(range(data_args.max_predict_samples))
# Predict Feature Creation
predict_dataset = predict_examples.map(
prepare_validation_features,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
)
if data_args.max_predict_samples is not None:
# During Feature creation dataset samples might increase, we will select required samples again
predict_dataset = predict_dataset.select(range(data_args.max_predict_samples))
# Data collator
# We have already padded to max length if the corresponding flag is True, otherwise we need to pad in the data
# collator.
data_collator = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8 if training_args.fp16 else None)
)
# Post-processing:
def post_processing_function(examples, features, predictions, stage="eval"):
# Post-processing: we match the start logits and end logits to answers in the original context.
predictions = postprocess_qa_predictions(
examples=examples,
features=features,
predictions=predictions,
version_2_with_negative=data_args.version_2_with_negative,
n_best_size=data_args.n_best_size,
max_answer_length=data_args.max_answer_length,
null_score_diff_threshold=data_args.null_score_diff_threshold,
output_dir=training_args.output_dir,
is_world_process_zero=trainer.is_world_process_zero(),
prefix=stage,
)
# Format the result to the format the metric expects.
if data_args.version_2_with_negative:
formatted_predictions = [
{"id": k, "prediction_text": v, "no_answer_probability": 0.0} for k, v in predictions.items()
]
else:
formatted_predictions = [{"id": k, "prediction_text": v} for k, v in predictions.items()]
references = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=formatted_predictions, label_ids=references)
metric = load_metric("squad_v2" if data_args.version_2_with_negative else "squad")
def compute_metrics(p: EvalPrediction):
return metric.compute(predictions=p.predictions, references=p.label_ids)
# Initialize our Trainer
trainer = QuestionAnsweringTrainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
eval_examples=eval_examples if training_args.do_eval else None,
tokenizer=tokenizer,
data_collator=data_collator,
post_process_function=post_processing_function,
compute_metrics=compute_metrics,
)
# Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model() # Saves the tokenizer too for easy upload
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate()
max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
# Prediction
if training_args.do_predict:
logger.info("*** Predict ***")
results = trainer.predict(predict_dataset, predict_examples)
metrics = results.metrics
max_predict_samples = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(predict_dataset)
)
metrics["predict_samples"] = min(max_predict_samples, len(predict_dataset))
trainer.log_metrics("predict", metrics)
trainer.save_metrics("predict", metrics)
if training_args.push_to_hub:
kwargs = {"finetuned_from": model_args.model_name_or_path, "tags": "question-answering"}
if data_args.dataset_name is not None:
kwargs["dataset_tags"] = data_args.dataset_name
if data_args.dataset_config_name is not None:
kwargs["dataset_args"] = data_args.dataset_config_name
kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}"
else:
kwargs["dataset"] = data_args.dataset_name
trainer.push_to_hub(**kwargs)
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
|
py | 1a53786d20069d144aefafbbdb3fb58f2d7788ef | # -*- coding: utf-8 -*-
"""
This file contains the classes for the AST types and the methods to traverse it.
Note that it was adapted from the interpreter by Jay Conrod at http://jayconrod.com/
----------------------------------------------------------------------------
The most important methods defined for the interaction with the solver are:
def replace(self,x,y):
'''
Method that handles assignment x:=y.
Returns a new instance with the value of x replaced by y.
'''
pass
def exp(self):
'''
Transforms an expression into a form consistent with Z3 solver.
The return value can be:
-> a string (Z3 recognizes strings as expressions with eval command);
-> Z3 BoolRef function, such as And, Or, Implies or Not.
'''
pass
----------------------------------------------------------------------------
"""
# Copyright (c) 2011, Jay Conrod.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Jay Conrod nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL JAY CONROD BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
from equality import *
sys.path.append(".")
from z3 import *
#####################################
class Statement(Equality):
pass
class Aexp(Equality):
pass
class Bexp(Equality):
pass
class ArrayExp(Equality):
pass
######################################
class Skip(Statement):
"""
Class that handles skips in the language
"""
def __init__(self):
pass
def __repr__(self):
return "skip"
class AssignStatement(Statement):
"""
Class that handles assignements
"""
def __init__(self, name, aexp):
self.name = VarAexp(name)
self.aexp = aexp
def __repr__(self):
return '%s:= %s' % (self.name, self.aexp)
class ArrayVar(Aexp):
"""
Class that handles arrays
"""
def __init__(self, name):
self.name=name
self.name = "Array('%s', IntSort(), IntSort())" % name
self.var= name
def __repr__(self):
return self.var
def exp(self):
"""
** Transforms an expression into a form consistent with Z3 solver.
** The return value is a string (Z3 recognizes strings as expressions with eval command);
"""
return self.name # Sends variable to Z3 context
'''
def replace(self, x, y):
"""
** Method that handles assignment x:=y.
** Returns a new instance named y if the current var is x. Otherwise just returns itself.
"""
if x==self:
if isinstance(y, BinopAexp):
return y
#if type(self.var)!=str:
# this verification handles composed types such as (x+10) of type BinopAexp
# return BinopAexp
# elif x.var==self.var:
# return
return self
'''
class Procedure:
"""
Class that handles procedures
"""
def __init__(self,name,body, pre=True, post=True):
self.name =name
self.body =body
self.pre =pre
self.post =post
def __repr__(self):
return "proc %s" % self.name
class ArrayAssignment(Statement):
def __init__(self,name,index,value):
self.name=ArrayVar(name)
self.var=name
self.index=index
self.value=value
self.selec=ArraySelect(name,index)
def __repr__(self):
return '%s[%s]:= %s'%(self.name,self.index,self.value)
def exp(self):
return Store(eval(self.name.exp()), self.index.exp(),self.value.exp())
class ArraySelect(Statement):
def __init__(self,name,index):
self.name=ArrayVar(name)
self.index=index
self.var=name
def __repr__(self):
return '%s[%s]'%(self.name,self.index)
def replace(self,x,y):
"""
** Method that handles assignment x:=y.
** Returns a new instance named y if the current array is x. Otherwise just returns itself.
"""
if isinstance(x,ArrayAssignment):
if x.var==self.var and x.index==self.index:
return y
if self.index==x:
return ArraySelect(self.var,y)
return self
def exp(self):
"""
** Transforms an expression into a form consistent with Z3 solver.
** The return value is a string (Z3 recognizes strings as expressions with eval command);
"""
return '%s[%s]' % (self.name.exp(),self.index.exp())
class CompoundStatement(Statement):
"""
Class that handles separators (with a comma)
"""
def __init__(self, first, second):
self.first = first
self.second = second
def __repr__(self):
return'CompoundStatement(%s, %s)' % (self.first, self.second)
class IfStatement(Statement):
"""
Class that handles conditional statements
"""
def __init__(self, condition, true_stmt, false_stmt):
self.condition = condition
self.true_stmt = true_stmt
self.false_stmt = false_stmt
def __repr__(self):
return '(if %s then %s else %s)' % (self.condition, self.true_stmt, self.false_stmt)
class WhileStatement(Statement):
def __init__(self, condition, body,loop_inv):
self.condition = condition
self.body = body
self.inv=LoopInvariant(loop_inv)
def __repr__(self):
return 'while %s do %s %s end' % (self.condition, self.inv, self.body)
class IntAexp(Aexp):
def __init__(self, i):
self.i = i
def replace(self,x,y):
"""
** Method that handles assignment x:=y.
** Returns the same instance because int values cannot be replaced.
"""
return self
def exp(self):
"""
** Transforms an expression into a form consistent with Z3 solver.
** The return value is a string (Z3 recognizes strings as expressions with eval command);
"""
return str(self.i)
def __repr__(self):
return '%s' % self.i
class VarAexp(Aexp):
def __init__(self, name):
self.name=name
self.name = "Int('%s')" %name
self.var= name
def exp(self):
"""
** Transforms an expression into a form consistent with Z3 solver.
** The return value is a string (Z3 recognizes strings as expressions with eval command);
"""
return self.name # Sends variable to Z3 context
def replace(self, x, y):
# return y
"""
** Method that handles assignment x:=y.
** Returns a new instance named y if the current var is x. Otherwise just returns itself.
"""
if x==self:
return y
return self
def __repr__(self):
return '%s' % self.var
class BinopAexp(Aexp):
"""
Handles arithmetic expressions such as
x/1, 1-8, y+2 ...
"""
def __init__(self, op, left, right):
self.op = op
self.left = left
self.right = right
def replace(self,x,y):
"""
** Method that handles assignment x:=y.
** Returns a new instance with the value of x replaced by y.
"""
return BinopAexp(self.op,self.left.replace(x,y),self.right.replace(x,y))
def exp(self):
"""
** Transforms an expression into a form consistent with Z3 solver.
** The return value is a string (Z3 recognizes strings as expressions with eval command);
"""
return '(%s %s %s)' % ( self.left.exp(), self.op, self.right.exp())
def __repr__(self):
return '(%s %s %s)' % ( self.left, self.op, self.right)
class RelopBexp(Bexp):
"""
Handles boolean expressions such as
x>1, 1<=8, (y+2)<x-1 ...
"""
def __init__(self, op, left, right):
self.op = op
self.left = left
self.right = right
def replace(self,x,y):
"""
** Method that handles assignment x:=y.
** Returns a new instance with the value of x replaced by y.
"""
return RelopBexp(self.op,self.left.replace(x,y),self.right.replace(x,y))
def exp(self):
"""
** Transforms an expression into a form consistent with Z3 solver.
** The return value is a string (Z3 recognizes strings as expressions with eval command);
"""
return '(%s %s %s)' % (self.left.exp(), self.op, self.right.exp())
def __repr__(self):
return '{%s %s %s}' % (self.left, self.op, self.right)
class AndBexp(Bexp):
def __init__(self, left, right):
self.left = left
self.right = right
def replace(self,x,y):
"""
** Method that handles assignment x:=y.
** Returns a new instance with the value of x replaced by y.
"""
return AndBexp(self.left.replace(x,y), self.right.replace(x,y))
def exp(self):
"""
Transforms an expression into a form consistent with Z3 solver.
The return value is the And(x,y) BoolRef funtion, where x and y are of
type Bool or type String and then evaluated to Bool form.
"""
right=self.right.exp()
if type(right)==str:
right=eval(right)
if type(right)==bool:
if right==True:
right=true(str(self.right)).exp()
else:
right=false(str(self.right)).exp()
left=self.left.exp()
if type(left)==str:
left=eval(left)
if type(left)==bool:
if left==True:
left=(true(str(self.left))).exp()
else:
left=false(str(self.left)).exp()
return And(left,right)
def __repr__(self):
return '[%s /\ %s]' % (self.left, self.right)
class OrBexp(Bexp):
def __init__(self, left, right):
self.left = left
self.right = right
def replace(self,x,y):
"""
** Method that handles assignment x:=y.
** Returns a new instance with the value of x replaced by y.
"""
return OrBexp(self.left.replace(x,y), self.right.replace(x,y))
def exp(self):
"""
** Transforms an expression into a form consistent with Z3 solver.
** The return value is the Or(x,y) BoolRef funtion, where x and y are of
type Bool or type String and then evaluated to Bool form.
"""
right=self.right.exp()
if type(right)==str:
right=eval(right)
left=self.left.exp()
if type(left)==str:
left=eval(left)
return Or(left,right)
def __repr__(self):
return '[%s \/ %s]' % (self.left, self.right)
class ImpBexp(Bexp):
def __init__(self, left, right):
self.left = left
self.right = right
def replace(self,x,y):
"""
** Method that handles assignment x:=y.
** Returns a new instance with the value of x replaced by y.
"""
return ImpBexp(self.left.replace(x,y), self.right.replace(x,y))
def exp(self):
"""
** Transforms an expression into a form consistent with Z3 solver.
** The return value is the Implies(x,y) BoolRef funtion, where x and y are of
type Bool or type String and then evaluated to Bool form.
"""
right=self.right.exp()
if type(right)==str:
right=eval(right)
left=self.left.exp()
if type(left)==str:
left=eval(left)
return Implies(left, right)
def __repr__(self):
return '{%s --> %s}' % (self.left,self.right)
class BimpBexp(Bexp):
def __init__(self, left,right):
self.left = left
self.right= right
def replace(self,x,y):
"""
** Method that handles assignment x:=y.
** Returns a new instance with the value of x replaced by y.
"""
return BimpBexp(self.left.replace(x,y), self.right.replace(x,y))
def exp(self):
"""
** Transforms an expression into a form consistent with Z3 solver.
** The return value is the x=y BoolRef funtion, where x and y are of
type Bool or type String and then evaluated to Bool form.
"""
right=self.right.exp()
if type(right)==str:
right=eval(right)
left=self.left.exp()
if type(left)==str:
left=eval(left)
return '%s = %s' %(left,right)
def __repr__(self):
return '(%s <--> %s)' % (self.left,self.right)
class ForallBexp(Bexp):
def __init__(self,var,bexp):
self.var=VarAexp(var)
self.expr=bexp
def replace(self,x,y):
"""
** Method that handles assignment x:=y.
** Returns a new instance with the value of x replaced by y.
"""
return ForallBexp(self.var.replace(x,y),self.expr.replace(x,y))
def __repr__(self):
return "$forall %s : %s$" % (self.var,self.expr)
def exp(self):
"""
** Transforms an expression into a form consistent with Z3 solver.
** The return value is the ForAll(x,bexp) BoolRef funtion, where x is of
type Int and bexp a BoolRef value.
"""
expr=self.expr.exp()
if type(expr)==str:
expr=eval(expr)
return ForAll(eval(self.var.exp()), expr)
class ExistsBexp(Bexp):
def __init__(self,var,bexp):
self.var=VarAexp(var)
self.expr=bexp
def replace(self,x,y):
"""
** Method that handles assignment x:=y.
** Returns a new instance with the value of x replaced by y.
"""
return ExistsBexp(self.var.replace(x,y),self.expr.replace(x,y))
def __repr__(self):
return "$exists %s : %s$" % (self.var,self.expr)
def exp(self):
"""
** Transforms an expression into a form consistent with Z3 solver.
** The return value is the Exists(x,bexp) BoolRef funtion, where x is of
type Int and bexp a BoolRef value.
"""
expr=self.expr.exp()
if type(expr)==str:
expr=eval(expr)
return Exists(eval(self.var.exp()), expr)
class NotBexp(Bexp):
def __init__(self,expr):
self.expr=expr
def replace(self,x,y):
"""
** Method that handles assignment x:=y.
** Returns a new instance with the value of x replaced by y.
"""
return NotBexp(self.expr.replace(x,y))
def exp(self):
"""
** Transforms an expression into a form consistent with Z3 solver.
** The return value is the Not(x) BoolRef funtion, where x is of
type Bool or type String and then evaluated to Bool form.
"""
expr=self.expr.exp()
if type(expr)==str:
expr=eval(expr)
return Not(expr)
def __repr__(self):
return '~(%s)' % self.expr
class true(Bexp):
def __init__(self,expr):
self.expr=expr
def exp(self):
return BoolSort().cast(True)
def __repr__(self):
return 'true(%s)'%self.expr
def replace(self,x,y):
return self
class false(Bexp):
def __init__(self,expr):
self.expr=expr
def exp(self):
return BoolSort().cast(False)
def __repr__(self):
return 'false(%s)'%expr
def replace(self,x,y):
return self
class PreCondition(Statement):
def __init__(self,condition):
self.pre=condition
def replace(self,x,y):
"""
** Method that handles assignment x:=y.
** Returns a new instance with the value of x replaced by y.
"""
return PreCondition(self.pre.replace(x,y))
def exp(self):
"""
** Transforms an expression into a form consistent with Z3 solver.
** Computes the exp value of the precondition.
"""
return self.pre.exp()
def __repr__(self):
return "(%s)" %self.pre
class PostCondition(Statement):
def __init__(self,condition):
self.post=condition
def exp(self):
"""
** Transforms an expression into a form consistent with Z3 solver.
** Computes the exp value of the postcondition.
"""
return self.post.exp()
def replace(self,x,y):
"""
** Method that handles assignment x:=y.
** Returns a new instance with the value of x replaced by y.
"""
return PostCondition(self.post.replace(x,y))
def __repr__(self):
return "(%s)" % self.post
#This is statement to be easier to select in Result
class LoopInvariant(Statement):
def __init__(self, condition):
self.inv=condition
def replace(self,x,y):
"""
** Method that handles assignment x:=y.
** Returns a new instance with the value of x replaced by y.
"""
return LoopInvariant(self.inv.replace(x,y))
def exp(self):
"""
** Transforms an expression into a form consistent with Z3 solver.
** Computes the exp value of the loop invariant.
"""
return self.inv.exp()
def __repr__(self):
return "(%s)"% self.inv
|
py | 1a537871fcab78b8adbad7957d8f37a29cb6a2fb | import torch
import argparse
from pathlib import Path
from pprint import pprint
parser = argparse.ArgumentParser()
parser.add_argument('pthfile',help='path to pth file')
args = parser.parse_args()
pthdict = torch.load(args.pthfile)
# pprint(pthdict)
print(pthdict.keys())
print(pthdict['scheduler'])
# print(len(pthdict['scheduler']['base_lrs']))
# print(len(pthdict['scheduler']['_last_lr']))
# pthdict['scheduler']['milestones'] = ('60100','120000')
# print(pthdict['scheduler'])
# og_path = Path(args.pthfile)
# new_path = og_path.parent / '{}{}{}'.format(og_path.stem,'_lr-modified',og_path.suffix)
# print('Writing new state dict to {}'.format(new_path))
# torch.save( pthdict, str(new_path) ) |
py | 1a5378f765ee1eea9e73d59bf2812b46d47ab45c | import torch
import torch.nn as nn
class Generator(nn.Module):
def __init__(self, channels_noise, features_g=64):
super(Generator, self).__init__()
self.gen = nn.Sequential(
# Input: N x channels_noise x 1 x 1
self._block(channels_noise, features_g * 16, 4, 1, 0), # img: 4x4
self._block(features_g * 16, features_g * 8, 4, 2, 1), # img: 8x8
self._block(features_g * 8, features_g * 4, 4, 2, 1), # img: 16x16
# self._block(features_g * 4, features_g * 2, 4, 2, 1), # img: 32x32
nn.ConvTranspose2d(features_g * 4, 4, kernel_size=4, stride=2, padding=1),
# Output: N x 3 x 64 x 64
)
self.activation = nn.Tanh()
def _block(self, in_channels, out_channels, kernel_size, stride, padding):
return nn.Sequential(
nn.ConvTranspose2d(
in_channels,
out_channels,
kernel_size,
stride,
padding,
bias=False,
),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
)
def forward(self, x):
x = self.gen(x)
return self.activation(x) * 2 |
py | 1a5379096869199a3dc3bea1984e17836550b861 | from com.android.monkeyrunner import MonkeyRunner, MonkeyDevice, MonkeyImage
device = MonkeyRunner.waitForConnection()
from javax.swing import JButton, JFrame, JPanel, ImageIcon
frame = JFrame('Android Display!',
defaultCloseOperation = JFrame.EXIT_ON_CLOSE,
size = (960, 540)
)
def change_text(event):
print 'Clicked!'
def mouseUsageClicked(event):
x = event.getX() * 2
y = event.getY() * 2
print(str(x)+"-"+str(y))
device.touch(x,y,MonkeyDevice.DOWN_AND_UP)
paintScreenshot(pan)
def paintScreenshot(p):
result = device.takeSnapshot()
im = ImageIcon(result.convertToBytes())
p.graphics.drawImage(im.getImage(), 0, 0, 960, 540, p)
pan = JPanel(mouseClicked=mouseUsageClicked) #, super__paintComponent= paintScreenshot)
frame.add(pan)
frame.visible = True
|
py | 1a537968250eab1cfd34b2de46a883aaff31821b | from typing import Any
from eth_utils import (
encode_hex,
is_bytes,
is_integer,
)
from eth_utils.toolz import curry
from eth_keys.constants import (
SECPK1_N,
)
from eth_keys.exceptions import (
ValidationError,
)
def validate_integer(value: Any) -> None:
if not is_integer(value) or isinstance(value, bool):
raise ValidationError("Value must be a an integer. Got: {0}".format(type(value)))
def validate_bytes(value: Any) -> None:
if not is_bytes(value):
raise ValidationError("Value must be a byte string. Got: {0}".format(type(value)))
@curry
def validate_gte(value: Any, minimum: int) -> None:
validate_integer(value)
if value < minimum:
raise ValidationError(
"Value {0} is not greater than or equal to {1}".format(
value, minimum,
)
)
@curry
def validate_lte(value: Any, maximum: int) -> None:
validate_integer(value)
if value > maximum:
raise ValidationError(
"Value {0} is not less than or equal to {1}".format(
value, maximum,
)
)
validate_lt_secpk1n = validate_lte(maximum=SECPK1_N - 1)
def validate_bytes_length(value: bytes, expected_length: int, name: str) -> None:
actual_length = len(value)
if actual_length != expected_length:
raise ValidationError(
"Unexpected {name} length: Expected {expected_length}, but got {actual_length} "
"bytes".format(
name=name,
expected_length=expected_length,
actual_length=actual_length,
)
)
def validate_message_hash(value: Any) -> None:
validate_bytes(value)
validate_bytes_length(value, 32, "message hash")
def validate_uncompressed_public_key_bytes(value: Any) -> None:
validate_bytes(value)
validate_bytes_length(value, 64, "uncompressed public key")
def validate_compressed_public_key_bytes(value: Any) -> None:
validate_bytes(value)
validate_bytes_length(value, 33, "compressed public key")
first_byte = value[0:1]
if first_byte not in (b"\x02", b"\x03"):
raise ValidationError(
"Unexpected compressed public key format: Must start with 0x02 or 0x03, but starts "
"with {first_byte}".format(
first_byte=encode_hex(first_byte),
)
)
def validate_private_key_bytes(value: Any) -> None:
validate_bytes(value)
validate_bytes_length(value, 32, "private key")
def validate_signature_bytes(value: Any) -> None:
validate_bytes(value)
validate_bytes_length(value, 65, "signature")
|
py | 1a537a6e71d8c023aeb308d881e60c5c72798c76 | """
Plugin to extract tables from an invoice.
"""
import re
import logging
logger = logging.getLogger(__name__)
DEFAULT_OPTIONS = {'field_separator': r'\s+', 'line_separator': r'\n'}
def extract(self, content, output):
"""Try to extract tables from an invoice"""
for table in self['tables']:
# First apply default options.
plugin_settings = DEFAULT_OPTIONS.copy()
plugin_settings.update(table)
table = plugin_settings
# Validate settings
assert 'start' in table, 'Table start regex missing'
assert 'end' in table, 'Table end regex missing'
assert 'body' in table, 'Table body regex missing'
start = re.search(table['start'], content)
end = re.search(table['end'], content)
if not start or not end:
logger.warning('no table body found - start %s, end %s', start, end)
continue
table_body = content[start.end(): end.start()]
for line in re.split(table['line_separator'], table_body):
# if the line has empty lines in it , skip them
if not line.strip('').strip('\n') or not line:
continue
match = re.search(table['body'], line)
if match:
for field, value in match.groupdict().items():
# If a field name already exists, do not overwrite it
if field in output:
continue
if field.startswith('date') or field.endswith('date'):
output[field] = self.parse_date(value)
if not output[field]:
logger.error("Date parsing failed on date '%s'", value)
return None
elif field.startswith('amount'):
output[field] = self.parse_number(value)
else:
output[field] = value
logger.debug('ignoring *%s* because it doesn\'t match anything', line)
|
py | 1a537b7b2e54d5856c3e14dd74d0960ed9e7c962 | #!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: James D. McClain
# Mario Motta
# Yang Gao
# Qiming Sun <[email protected]>
# Jason Yu
# Alec White
#
import time
from functools import reduce
import numpy as np
import h5py
from pyscf import lib
from pyscf.lib import logger
from pyscf.pbc import scf
from pyscf.cc import uccsd
from pyscf.pbc.lib import kpts_helper
from pyscf.pbc.lib.kpts_helper import gamma_point
from pyscf.lib.parameters import LOOSE_ZERO_TOL, LARGE_DENOM # noqa
from pyscf.pbc.mp.kump2 import (get_frozen_mask, get_nocc, get_nmo,
padded_mo_coeff, padding_k_idx) # noqa
from pyscf.pbc.cc import kintermediates_uhf
from pyscf import __config__
einsum = lib.einsum
# --- list2array
def mo_c_list_to_array(mo_coeff):
mo_coeff_tmp=[]
for js in range(2):
tmp_nk = len(mo_coeff[js])
tmp_nb = mo_coeff[js][0].shape[0]
tmp_array = np.zeros((tmp_nk,tmp_nb,tmp_nb),dtype=complex)
for ik in range(tmp_nk):
tmp_array[ik,:,:]=mo_coeff[js][ik][:,:]
mo_coeff_tmp.append(tmp_array)
return mo_coeff_tmp
def convert_mo_coeff(mo_coeff):
if isinstance(mo_coeff[0], list):
mo_coeff=mo_c_list_to_array(mo_coeff)
return mo_coeff
def update_amps(cc, t1, t2, eris):
time0 = time.clock(), time.time()
log = logger.Logger(cc.stdout, cc.verbose)
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
Ht1a = np.zeros_like(t1a)
Ht1b = np.zeros_like(t1b)
Ht2aa = np.zeros_like(t2aa)
Ht2ab = np.zeros_like(t2ab)
Ht2bb = np.zeros_like(t2bb)
nkpts, nocca, nvira = t1a.shape
noccb, nvirb = t1b.shape[1:]
#fvv_ = eris.fock[0][:,nocca:,nocca:]
#fVV_ = eris.fock[1][:,noccb:,noccb:]
#foo_ = eris.fock[0][:,:nocca,:nocca]
#fOO_ = eris.fock[1][:,:noccb,:noccb]
fov_ = eris.fock[0][:,:nocca,nocca:]
fOV_ = eris.fock[1][:,:noccb,noccb:]
# Get location of padded elements in occupied and virtual space
nonzero_padding_alpha, nonzero_padding_beta = padding_k_idx(cc, kind="split")
nonzero_opadding_alpha, nonzero_vpadding_alpha = nonzero_padding_alpha
nonzero_opadding_beta, nonzero_vpadding_beta = nonzero_padding_beta
mo_ea_o = [e[:nocca] for e in eris.mo_energy[0]]
mo_eb_o = [e[:noccb] for e in eris.mo_energy[1]]
mo_ea_v = [e[nocca:] + cc.level_shift for e in eris.mo_energy[0]]
mo_eb_v = [e[noccb:] + cc.level_shift for e in eris.mo_energy[1]]
Fvv_, FVV_ = kintermediates_uhf.cc_Fvv(cc, t1, t2, eris)
Foo_, FOO_ = kintermediates_uhf.cc_Foo(cc, t1, t2, eris)
Fov_, FOV_ = kintermediates_uhf.cc_Fov(cc, t1, t2, eris)
# Move energy terms to the other side
for k in range(nkpts):
Fvv_[k][np.diag_indices(nvira)] -= mo_ea_v[k]
FVV_[k][np.diag_indices(nvirb)] -= mo_eb_v[k]
Foo_[k][np.diag_indices(nocca)] -= mo_ea_o[k]
FOO_[k][np.diag_indices(noccb)] -= mo_eb_o[k]
# Get the momentum conservation array
kconserv = cc.khelper.kconserv
# T1 equation
P = kintermediates_uhf.kconserv_mat(cc.nkpts, cc.khelper.kconserv)
Ht1a += fov_.conj()
Ht1b += fOV_.conj()
Ht1a += einsum('xyximae,yme->xia', t2aa, Fov_)
Ht1a += einsum('xyximae,yme->xia', t2ab, FOV_)
Ht1b += einsum('xyximae,yme->xia', t2bb, FOV_)
Ht1b += einsum('yxymiea,yme->xia', t2ab, Fov_)
Ht1a -= einsum('xyzmnae, xzymine->zia', t2aa, eris.ooov)
Ht1a -= einsum('xyzmNaE, xzymiNE->zia', t2ab, eris.ooOV)
#Ht1a -= einsum('xyzmnae,xzymine,xyzw->zia', t2aa, eris.ooov, P)
#Ht1a -= einsum('xyzmNaE,xzymiNE,xyzw->zia', t2ab, eris.ooOV, P)
Ht1b -= einsum('xyzmnae, xzymine->zia', t2bb, eris.OOOV)
#Ht1b -= einsum('xyzmnae,xzymine,xyzw->zia', t2bb, eris.OOOV, P)
Ht1b -= einsum('yxwnmea,xzymine,xyzw->zia', t2ab, eris.OOov, P)
for ka in range(nkpts):
Ht1a[ka] += einsum('ie,ae->ia', t1a[ka], Fvv_[ka])
Ht1b[ka] += einsum('ie,ae->ia', t1b[ka], FVV_[ka])
Ht1a[ka] -= einsum('ma,mi->ia', t1a[ka], Foo_[ka])
Ht1b[ka] -= einsum('ma,mi->ia', t1b[ka], FOO_[ka])
for km in range(nkpts):
# ka == ki; km == kf == km
# <ma||if> = [mi|af] - [mf|ai]
# => [mi|af] - [fm|ia]
Ht1a[ka] += einsum('mf,aimf->ia', t1a[km], eris.voov[ka, ka, km])
Ht1a[ka] -= einsum('mf,miaf->ia', t1a[km], eris.oovv[km, ka, ka])
Ht1a[ka] += einsum('MF,aiMF->ia', t1b[km], eris.voOV[ka, ka, km])
# miaf - mfai => miaf - fmia
Ht1b[ka] += einsum('MF,AIMF->IA', t1b[km], eris.VOOV[ka, ka, km])
Ht1b[ka] -= einsum('MF,MIAF->IA', t1b[km], eris.OOVV[km, ka, ka])
Ht1b[ka] += einsum('mf,fmIA->IA', t1a[km], eris.voOV[km, km, ka].conj())
for kf in range(nkpts):
ki = ka
ke = kconserv[ki, kf, km]
Ht1a[ka] += einsum('imef,fmea->ia', t2aa[ki,km,ke], eris.vovv[kf,km,ke].conj())
Ht1a[ka] += einsum('iMeF,FMea->ia', t2ab[ki,km,ke], eris.VOvv[kf,km,ke].conj())
Ht1b[ka] += einsum('IMEF,FMEA->IA', t2bb[ki,km,ke], eris.VOVV[kf,km,ke].conj())
Ht1b[ka] += einsum('mIfE,fmEA->IA', t2ab[km,ki,kf], eris.voVV[kf,km,ke].conj())
for ki, kj, ka in kpts_helper.loop_kkk(nkpts):
kb = kconserv[ki, ka, kj]
# Fvv equation
Ftmpa_kb = Fvv_[kb] - 0.5 * einsum('mb,me->be', t1a[kb], Fov_[kb])
Ftmpb_kb = FVV_[kb] - 0.5 * einsum('MB,ME->BE', t1b[kb], FOV_[kb])
Ftmpa_ka = Fvv_[ka] - 0.5 * einsum('mb,me->be', t1a[ka], Fov_[ka])
Ftmpb_ka = FVV_[ka] - 0.5 * einsum('MB,ME->BE', t1b[ka], FOV_[ka])
tmp = einsum('ijae,be->ijab', t2aa[ki, kj, ka], Ftmpa_kb)
Ht2aa[ki, kj, ka] += tmp
tmp = einsum('IJAE,BE->IJAB', t2bb[ki, kj, ka], Ftmpb_kb)
Ht2bb[ki, kj, ka] += tmp
tmp = einsum('iJaE,BE->iJaB', t2ab[ki, kj, ka], Ftmpb_kb)
Ht2ab[ki, kj, ka] += tmp
tmp = einsum('iJeB,ae->iJaB', t2ab[ki, kj, ka], Ftmpa_ka)
Ht2ab[ki, kj, ka] += tmp
#P(ab)
tmp = einsum('ijbe,ae->ijab', t2aa[ki, kj, kb], Ftmpa_ka)
Ht2aa[ki, kj, ka] -= tmp
tmp = einsum('IJBE,AE->IJAB', t2bb[ki, kj, kb], Ftmpb_ka)
Ht2bb[ki, kj, ka] -= tmp
# Foo equation
Ftmpa_kj = Foo_[kj] + 0.5 * einsum('je,me->mj', t1a[kj], Fov_[kj])
Ftmpb_kj = FOO_[kj] + 0.5 * einsum('JE,ME->MJ', t1b[kj], FOV_[kj])
Ftmpa_ki = Foo_[ki] + 0.5 * einsum('je,me->mj', t1a[ki], Fov_[ki])
Ftmpb_ki = FOO_[ki] + 0.5 * einsum('JE,ME->MJ', t1b[ki], FOV_[ki])
tmp = einsum('imab,mj->ijab', t2aa[ki, kj, ka], Ftmpa_kj)
Ht2aa[ki, kj, ka] -= tmp
tmp = einsum('IMAB,MJ->IJAB', t2bb[ki, kj, ka], Ftmpb_kj)
Ht2bb[ki, kj, ka] -= tmp
tmp = einsum('iMaB,MJ->iJaB', t2ab[ki, kj, ka], Ftmpb_kj)
Ht2ab[ki, kj, ka] -= tmp
tmp = einsum('mJaB,mi->iJaB', t2ab[ki, kj, ka], Ftmpa_ki)
Ht2ab[ki, kj, ka] -= tmp
#P(ij)
tmp = einsum('jmab,mi->ijab', t2aa[kj, ki, ka], Ftmpa_ki)
Ht2aa[ki, kj, ka] += tmp
tmp = einsum('JMAB,MI->IJAB', t2bb[kj, ki, ka], Ftmpb_ki)
Ht2bb[ki, kj, ka] += tmp
# T2 equation
eris_ovov = np.asarray(eris.ovov)
eris_OVOV = np.asarray(eris.OVOV)
eris_ovOV = np.asarray(eris.ovOV)
Ht2aa += (eris_ovov.transpose(0,2,1,3,5,4,6) - eris_ovov.transpose(2,0,1,5,3,4,6)).conj()
Ht2bb += (eris_OVOV.transpose(0,2,1,3,5,4,6) - eris_OVOV.transpose(2,0,1,5,3,4,6)).conj()
Ht2ab += eris_ovOV.transpose(0,2,1,3,5,4,6).conj()
tauaa, tauab, taubb = kintermediates_uhf.make_tau(cc, t2, t1, t1)
Woooo, WooOO, WOOOO = kintermediates_uhf.cc_Woooo(cc, t1, t2, eris)
# Add the contributions from Wvvvv
for km, ki, kn in kpts_helper.loop_kkk(nkpts):
kj = kconserv[km,ki,kn]
Woooo[km,ki,kn] += .5 * einsum('xmenf, xijef->minj', eris_ovov[km,:,kn], tauaa[ki,kj])
WOOOO[km,ki,kn] += .5 * einsum('xMENF, xIJEF->MINJ', eris_OVOV[km,:,kn], taubb[ki,kj])
WooOO[km,ki,kn] += .5 * einsum('xmeNF, xiJeF->miNJ', eris_ovOV[km,:,kn], tauab[ki,kj])
for km, ki, kn in kpts_helper.loop_kkk(nkpts):
kj = kconserv[km,ki,kn]
Ht2aa[ki,kj,:] += einsum('minj,wmnab->wijab', Woooo[km,ki,kn], tauaa[km,kn]) * .5
Ht2bb[ki,kj,:] += einsum('MINJ,wMNAB->wIJAB', WOOOO[km,ki,kn], taubb[km,kn]) * .5
Ht2ab[ki,kj,:] += einsum('miNJ,wmNaB->wiJaB', WooOO[km,ki,kn], tauab[km,kn])
add_vvvv_(cc, (Ht2aa, Ht2ab, Ht2bb), t1, t2, eris)
Wovvo, WovVO, WOVvo, WOVVO, WoVVo, WOvvO = \
kintermediates_uhf.cc_Wovvo(cc, t1, t2, eris)
#:Ht2ab += einsum('xwzimae,wvumeBJ,xwzv,wuvy->xyziJaB', t2aa, WovVO, P, P)
#:Ht2ab += einsum('xwziMaE,wvuMEBJ,xwzv,wuvy->xyziJaB', t2ab, WOVVO, P, P)
#:Ht2ab -= einsum('xie,zma,uwzBJme,zuwx,xyzu->xyziJaB', t1a, t1a, eris.VOov, P, P)
for kx, kw, kz in kpts_helper.loop_kkk(nkpts):
kv = kconserv[kx, kz, kw]
for ku in range(nkpts):
ky = kconserv[kw, kv, ku]
Ht2ab[kx, ky, kz] += lib.einsum('imae,mebj->ijab', t2aa[kx,kw,kz], WovVO[kw,kv,ku])
Ht2ab[kx, ky, kz] += lib.einsum('imae,mebj->ijab', t2ab[kx,kw,kz], WOVVO[kw,kv,ku])
#for kz, ku, kw in kpts_helper.loop_kkk(nkpts):
# kx = kconserv[kz,kw,ku]
# ky = kconserv[kz,kx,ku]
# continue
# Ht2ab[kx, ky, kz] -= lib.einsum('ie, ma, emjb->ijab', t1a[kx], t1a[kz], eris.voOV[kx,kz,kw].conj())
Ht2ab -= einsum('xie, yma, xyzemjb->xzyijab', t1a, t1a, eris.voOV[:].conj())
#:Ht2ab += einsum('wxvmIeA,wvumebj,xwzv,wuvy->yxujIbA', t2ab, Wovvo, P, P)
#:Ht2ab += einsum('wxvMIEA,wvuMEbj,xwzv,wuvy->yxujIbA', t2bb, WOVvo, P, P)
#:Ht2ab -= einsum('xIE,zMA,uwzbjME,zuwx,xyzu->yxujIbA', t1b, t1b, eris.voOV, P, P)
#for kx, kw, kz in kpts_helper.loop_kkk(nkpts):
# kv = kconserv[kx, kz, kw]
# for ku in range(nkpts):
# ky = kconserv[kw, kv, ku]
#Ht2ab[ky,kx,ku] += lib.einsum('miea, mebj-> jiba', t2ab[kw,kx,kv], Wovvo[kw,kv,ku])
#Ht2ab[ky,kx,ku] += lib.einsum('miea, mebj-> jiba', t2bb[kw,kx,kv], WOVvo[kw,kv,ku])
for km, ke, kb in kpts_helper.loop_kkk(nkpts):
kj = kconserv[km, ke, kb]
Ht2ab[kj,:,kb] += einsum('xmiea, mebj->xjiba', t2ab[km,:,ke], Wovvo[km,ke,kb])
Ht2ab[kj,:,kb] += einsum('xmiea, mebj->xjiba', t2bb[km,:,ke], WOVvo[km,ke,kb])
for kz, ku, kw in kpts_helper.loop_kkk(nkpts):
kx = kconserv[kz, kw, ku]
ky = kconserv[kz, kx, ku]
Ht2ab[ky,kx,ku] -= lib.einsum('ie, ma, bjme->jiba', t1b[kx], t1b[kz], eris.voOV[ku,kw,kz])
#:Ht2ab += einsum('xwviMeA,wvuMebJ,xwzv,wuvy->xyuiJbA', t2ab, WOvvO, P, P)
#:Ht2ab -= einsum('xie,zMA,zwuMJbe,zuwx,xyzu->xyuiJbA', t1a, t1b, eris.OOvv, P, P)
#for kx, kw, kz in kpts_helper.loop_kkk(nkpts):
# kv = kconserv[kx, kz, kw]
# for ku in range(nkpts):
# ky = kconserv[kw, kv, ku]
# Ht2ab[kx,ky,ku] += lib.einsum('imea,mebj->ijba', t2ab[kx,kw,kv],WOvvO[kw,kv,ku])
for km, ke, kb in kpts_helper.loop_kkk(nkpts):
kj = kconserv[km, ke, kb]
Ht2ab[:,kj,kb] += einsum('ximea, mebj->xijba', t2ab[:,km,ke], WOvvO[km,ke,kb])
for kz,ku,kw in kpts_helper.loop_kkk(nkpts):
kx = kconserv[kz, kw, ku]
ky = kconserv[kz, kx, ku]
Ht2ab[kx,ky,ku] -= lib.einsum('ie, ma, mjbe->ijba', t1a[kx], t1b[kz], eris.OOvv[kz, kw, ku])
#:Ht2ab += einsum('wxzmIaE,wvumEBj,xwzv,wuvy->yxzjIaB', t2ab, WoVVo, P, P)
#:Ht2ab -= einsum('xIE,zma,zwumjBE,zuwx,xyzu->yxzjIaB', t1b, t1a, eris.ooVV, P, P)
for kx, kw, kz in kpts_helper.loop_kkk(nkpts):
kv = kconserv[kx, kz, kw]
for ku in range(nkpts):
ky = kconserv[kw, kv, ku]
Ht2ab[ky, kx, kz] += lib.einsum('miae,mebj->jiab', t2ab[kw,kx,kz], WoVVo[kw,kv,ku])
for kz, ku, kw in kpts_helper.loop_kkk(nkpts):
kx = kconserv[kz,kw,ku]
ky = kconserv[kz,kx,ku]
Ht2ab[ky,kx,kz] -= lib.einsum('ie, ma, mjbe->jiab', t1b[kx], t1a[kz], eris.ooVV[kz,kw,ku])
#:u2aa = einsum('xwzimae,wvumebj,xwzv,wuvy->xyzijab', t2aa, Wovvo, P, P)
#:u2aa += einsum('xwziMaE,wvuMEbj,xwzv,wuvy->xyzijab', t2ab, WOVvo, P, P)
#Left this in to keep proper shape, need to replace later
u2aa = np.zeros_like(t2aa)
for kx, kw, kz in kpts_helper.loop_kkk(nkpts):
kv = kconserv[kx, kz, kw]
for ku in range(nkpts):
ky = kconserv[kw, kv, ku]
u2aa[kx,ky,kz] += lib.einsum('imae, mebj->ijab', t2aa[kx,kw,kz], Wovvo[kw,kv,ku])
u2aa[kx,ky,kz] += lib.einsum('imae, mebj->ijab', t2ab[kx,kw,kz], WOVvo[kw,kv,ku])
#:u2aa += einsum('xie,zma,zwumjbe,zuwx,xyzu->xyzijab', t1a, t1a, eris.oovv, P, P)
#:u2aa -= einsum('xie,zma,uwzbjme,zuwx,xyzu->xyzijab', t1a, t1a, eris.voov, P, P)
for kz, ku, kw in kpts_helper.loop_kkk(nkpts):
kx = kconserv[kz,kw,ku]
ky = kconserv[kz,kx,ku]
u2aa[kx,ky,kz] += lib.einsum('ie,ma,mjbe->ijab',t1a[kx],t1a[kz],eris.oovv[kz,kw,ku])
u2aa[kx,ky,kz] -= lib.einsum('ie,ma,bjme->ijab',t1a[kx],t1a[kz],eris.voov[ku,kw,kz])
#:u2aa += np.einsum('xie,uyzbjae,uzyx->xyzijab', t1a, eris.vovv, P)
#:u2aa -= np.einsum('zma,xzyimjb->xyzijab', t1a, eris.ooov.conj())
for ky, kx, ku in kpts_helper.loop_kkk(nkpts):
kz = kconserv[ky, ku, kx]
u2aa[kx, ky, kz] += lib.einsum('ie, bjae->ijab', t1a[kx], eris.vovv[ku,ky,kz])
u2aa[kx, ky, kz] -= lib.einsum('ma, imjb->ijab', t1a[kz], eris.ooov[kx,kz,ky].conj())
u2aa = u2aa - u2aa.transpose(1,0,2,4,3,5,6)
u2aa = u2aa - einsum('xyzijab,xyzu->xyuijba', u2aa, P)
Ht2aa += u2aa
#:u2bb = einsum('xwzimae,wvumebj,xwzv,wuvy->xyzijab', t2bb, WOVVO, P, P)
#:u2bb += einsum('wxvMiEa,wvuMEbj,xwzv,wuvy->xyzijab', t2ab, WovVO, P, P)
#:u2bb += einsum('xie,zma,zwumjbe,zuwx,xyzu->xyzijab', t1b, t1b, eris.OOVV, P, P)
#:u2bb -= einsum('xie,zma,uwzbjme,zuwx,xyzu->xyzijab', t1b, t1b, eris.VOOV, P, P)
u2bb = np.zeros_like(t2bb)
for kx, kw, kz in kpts_helper.loop_kkk(nkpts):
kv = kconserv[kx, kz, kw]
for ku in range(nkpts):
ky = kconserv[kw,kv, ku]
u2bb[kx, ky, kz] += lib.einsum('imae,mebj->ijab', t2bb[kx,kw,kz], WOVVO[kw,kv,ku])
u2bb[kx, ky, kz] += lib.einsum('miea, mebj-> ijab', t2ab[kw,kx,kv],WovVO[kw,kv,ku])
for kz, ku, kw in kpts_helper.loop_kkk(nkpts):
kx = kconserv[kz, kw, ku]
ky = kconserv[kz, kx, ku]
u2bb[kx, ky, kz] += lib.einsum('ie, ma, mjbe->ijab',t1b[kx],t1b[kz],eris.OOVV[kz,kw,ku])
u2bb[kx, ky, kz] -= lib.einsum('ie, ma, bjme->ijab', t1b[kx], t1b[kz],eris.VOOV[ku,kw,kz])
#:u2bb += np.einsum('xie,uzybjae,uzyx->xyzijab', t1b, eris.VOVV, P)
#:u2bb -= np.einsum('zma,xzyimjb->xyzijab', t1b, eris.OOOV.conj())
for ky, kx, ku in kpts_helper.loop_kkk(nkpts):
kz = kconserv[ky, ku, kx]
u2bb[kx,ky,kz] += lib.einsum('ie,bjae->ijab', t1b[kx], eris.VOVV[ku,ky,kz])
#for kx, kz, ky in kpts_helper.loop_kkk(nkpts):
# u2bb[kx,ky,kz] -= lib.einsum('ma, imjb-> ijab', t1b[kz], eris.OOOV[kx,kz,ky].conj())
u2bb -= einsum('zma, xzyimjb->xyzijab', t1b, eris.OOOV[:].conj())
u2bb = u2bb - u2bb.transpose(1,0,2,4,3,5,6)
u2bb = u2bb - einsum('xyzijab,xyzu->xyuijba', u2bb, P)
Ht2bb += u2bb
#:Ht2ab += np.einsum('xie,uyzBJae,uzyx->xyziJaB', t1a, eris.VOvv, P)
#:Ht2ab += np.einsum('yJE,zxuaiBE,zuxy->xyziJaB', t1b, eris.voVV, P)
#:Ht2ab -= np.einsum('zma,xzyimjb->xyzijab', t1a, eris.ooOV.conj())
#:Ht2ab -= np.einsum('umb,yuxjmia,xyuz->xyzijab', t1b, eris.OOov.conj(), P)
for ky, kx, ku in kpts_helper.loop_kkk(nkpts):
kz = kconserv[ky,ku,kx]
Ht2ab[kx,ky,kz] += lib.einsum('ie, bjae-> ijab', t1a[kx], eris.VOvv[ku,ky,kz])
Ht2ab[kx,ky,kz] += lib.einsum('je, aibe-> ijab', t1b[ky], eris.voVV[kz,kx,ku])
#for kx, kz, ky in kpts_helper.loop_kkk(nkpts):
# Ht2ab[kx,ky,kz] -= lib.einsum('ma, imjb->ijab', t1a[kz], eris.ooOV[kx,kz,ky].conj())
Ht2ab -= einsum('zma, xzyimjb->xyzijab', t1a, eris.ooOV[:].conj())
for kx, ky, ku in kpts_helper.loop_kkk(nkpts):
kz = kconserv[kx, ku, ky]
Ht2ab[kx,ky,kz] -= lib.einsum('mb,jmia->ijab',t1b[ku],eris.OOov[ky,ku,kx].conj())
eia = []
eIA = []
for ki in range(nkpts):
tmp_alpha = []
tmp_beta = []
for ka in range(nkpts):
tmp_eia = LARGE_DENOM * np.ones((nocca, nvira), dtype=eris.mo_energy[0][0].dtype)
tmp_eIA = LARGE_DENOM * np.ones((noccb, nvirb), dtype=eris.mo_energy[0][0].dtype)
n0_ovp_ia = np.ix_(nonzero_opadding_alpha[ki], nonzero_vpadding_alpha[ka])
n0_ovp_IA = np.ix_(nonzero_opadding_beta[ki], nonzero_vpadding_beta[ka])
tmp_eia[n0_ovp_ia] = (mo_ea_o[ki][:,None] - mo_ea_v[ka])[n0_ovp_ia]
tmp_eIA[n0_ovp_IA] = (mo_eb_o[ki][:,None] - mo_eb_v[ka])[n0_ovp_IA]
tmp_alpha.append(tmp_eia)
tmp_beta.append(tmp_eIA)
eia.append(tmp_alpha)
eIA.append(tmp_beta)
for ki in range(nkpts):
ka = ki
# Remove zero/padded elements from denominator
Ht1a[ki] /= eia[ki][ka]
Ht1b[ki] /= eIA[ki][ka]
for ki, kj, ka in kpts_helper.loop_kkk(nkpts):
kb = kconserv[ki, ka, kj]
eijab = eia[ki][ka][:,None,:,None] + eia[kj][kb][:,None,:]
Ht2aa[ki,kj,ka] /= eijab
eijab = eia[ki][ka][:,None,:,None] + eIA[kj][kb][:,None,:]
Ht2ab[ki,kj,ka] /= eijab
eijab = eIA[ki][ka][:,None,:,None] + eIA[kj][kb][:,None,:]
Ht2bb[ki,kj,ka] /= eijab
time0 = log.timer_debug1('update t1 t2', *time0)
return (Ht1a, Ht1b), (Ht2aa, Ht2ab, Ht2bb)
def get_normt_diff(cc, t1, t2, t1new, t2new):
'''Calculates norm(t1 - t1new) + norm(t2 - t2new).'''
return (np.linalg.norm(t1new[0] - t1[0])**2 +
np.linalg.norm(t1new[1] - t1[1])**2 +
np.linalg.norm(t2new[0] - t2[0])**2 +
np.linalg.norm(t2new[1] - t2[1])**2 +
np.linalg.norm(t2new[2] - t2[2])**2) ** .5
def energy(cc, t1, t2, eris):
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
kka, noa, nva = t1a.shape
kkb, nob, nvb = t1b.shape
assert(kka == kkb)
nkpts = kka
s = 0.0 + 0j
fa, fb = eris.fock
for ki in range(nkpts):
s += einsum('ia,ia', fa[ki, :noa, noa:], t1a[ki, :, :])
s += einsum('ia,ia', fb[ki, :nob, nob:], t1b[ki, :, :])
t1t1aa = np.zeros(shape=t2aa.shape, dtype=t2aa.dtype)
t1t1ab = np.zeros(shape=t2ab.shape, dtype=t2ab.dtype)
t1t1bb = np.zeros(shape=t2bb.shape, dtype=t2bb.dtype)
for ki in range(nkpts):
ka = ki
for kj in range(nkpts):
t1t1aa[ki, kj, ka, :, :, :, :] = einsum('ia,jb->ijab', t1a[ki, :, :], t1a[kj, :, :])
t1t1ab[ki, kj, ka, :, :, :, :] = einsum('ia,jb->ijab', t1a[ki, :, :], t1b[kj, :, :])
t1t1bb[ki, kj, ka, :, :, :, :] = einsum('ia,jb->ijab', t1b[ki, :, :], t1b[kj, :, :])
tauaa = t2aa + 2*t1t1aa
tauab = t2ab + t1t1ab
taubb = t2bb + 2*t1t1bb
d = 0.0 + 0.j
d += 0.25*(einsum('xzyiajb,xyzijab->',eris.ovov,tauaa)
- einsum('yzxjaib,xyzijab->',eris.ovov,tauaa))
d += einsum('xzyiajb,xyzijab->',eris.ovOV,tauab)
d += 0.25*(einsum('xzyiajb,xyzijab->',eris.OVOV,taubb)
- einsum('yzxjaib,xyzijab->',eris.OVOV,taubb))
e = s + d
e /= nkpts
if abs(e.imag) > 1e-4:
logger.warn(cc, 'Non-zero imaginary part found in KCCSD energy %s', e)
return e.real
#def get_nocc(cc, per_kpoint=False):
# '''See also function get_nocc in pyscf/pbc/mp2/kmp2.py'''
# if cc._nocc is not None:
# return cc._nocc
#
# assert(cc.frozen == 0)
#
# if isinstance(cc.frozen, (int, np.integer)):
# nocca = [(np.count_nonzero(cc.mo_occ[0][k] > 0) - cc.frozen) for k in range(cc.nkpts)]
# noccb = [(np.count_nonzero(cc.mo_occ[1][k] > 0) - cc.frozen) for k in range(cc.nkpts)]
#
# else:
# raise NotImplementedError
#
# if not per_kpoint:
# nocca = np.amax(nocca)
# noccb = np.amax(noccb)
# return nocca, noccb
#
#def get_nmo(cc, per_kpoint=False):
# '''See also function get_nmo in pyscf/pbc/mp2/kmp2.py'''
# if cc._nmo is not None:
# return cc._nmo
#
# assert(cc.frozen == 0)
#
# if isinstance(cc.frozen, (int, np.integer)):
# nmoa = [(cc.mo_occ[0][k].size - cc.frozen) for k in range(cc.nkpts)]
# nmob = [(cc.mo_occ[1][k].size - cc.frozen) for k in range(cc.nkpts)]
#
# else:
# raise NotImplementedError
#
# if not per_kpoint:
# nmoa = np.amax(nmoa)
# nmob = np.amax(nmob)
# return nmoa, nmob
#
#def get_frozen_mask(cc):
# '''See also get_frozen_mask function in pyscf/pbc/mp2/kmp2.py'''
#
# moidxa = [np.ones(x.size, dtype=np.bool) for x in cc.mo_occ[0]]
# moidxb = [np.ones(x.size, dtype=np.bool) for x in cc.mo_occ[1]]
# assert(cc.frozen == 0)
#
# if isinstance(cc.frozen, (int, np.integer)):
# for idx in moidxa:
# idx[:cc.frozen] = False
# for idx in moidxb:
# idx[:cc.frozen] = False
# else:
# raise NotImplementedError
#
# return moidxa, moisxb
def amplitudes_to_vector(t1, t2):
return np.hstack((t1[0].ravel(), t1[1].ravel(),
t2[0].ravel(), t2[1].ravel(), t2[2].ravel()))
def vector_to_amplitudes(vec, nmo, nocc, nkpts=1):
nocca, noccb = nocc
nmoa, nmob = nmo
nvira, nvirb = nmoa - nocca, nmob - noccb
sizes = (nkpts*nocca*nvira, nkpts*noccb*nvirb,
nkpts**3*nocca**2*nvira**2, nkpts**3*nocca*noccb*nvira*nvirb,
nkpts**3*noccb**2*nvirb**2)
sections = np.cumsum(sizes[:-1])
t1a, t1b, t2aa, t2ab, t2bb = np.split(vec, sections)
t1a = t1a.reshape(nkpts,nocca,nvira)
t1b = t1b.reshape(nkpts,noccb,nvirb)
t2aa = t2aa.reshape(nkpts,nkpts,nkpts,nocca,nocca,nvira,nvira)
t2ab = t2ab.reshape(nkpts,nkpts,nkpts,nocca,noccb,nvira,nvirb)
t2bb = t2bb.reshape(nkpts,nkpts,nkpts,noccb,noccb,nvirb,nvirb)
return (t1a,t1b), (t2aa,t2ab,t2bb)
def add_vvvv_(cc, Ht2, t1, t2, eris):
nocca, noccb = cc.nocc
nmoa, nmob = cc.nmo
nkpts = cc.nkpts
kconserv = cc.khelper.kconserv
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
Ht2aa, Ht2ab, Ht2bb = Ht2
if cc.direct and getattr(eris, 'Lpv', None) is not None:
def get_Wvvvv(ka, kc, kb):
kd = kconserv[ka,kc,kb]
Lpv = eris.Lpv
LPV = eris.LPV
Lbd = (Lpv[kb,kd][:,nocca:] -
lib.einsum('Lkd,kb->Lbd', Lpv[kb,kd][:,:nocca], t1a[kb]))
Wvvvv = lib.einsum('Lac,Lbd->acbd', Lpv[ka,kc][:,nocca:], Lbd)
kcbd = lib.einsum('Lkc,Lbd->kcbd', Lpv[ka,kc][:,:nocca],
Lpv[kb,kd][:,nocca:])
Wvvvv -= lib.einsum('kcbd,ka->acbd', kcbd, t1a[ka])
LBD = (LPV[kb,kd][:,noccb:] -
lib.einsum('Lkd,kb->Lbd', LPV[kb,kd][:,:noccb], t1b[kb]))
WvvVV = lib.einsum('Lac,Lbd->acbd', Lpv[ka,kc][:,nocca:], LBD)
kcbd = lib.einsum('Lkc,Lbd->kcbd', Lpv[ka,kc][:,:nocca],
LPV[kb,kd][:,noccb:])
WvvVV -= lib.einsum('kcbd,ka->acbd', kcbd, t1a[ka])
WVVVV = lib.einsum('Lac,Lbd->acbd', LPV[ka,kc][:,noccb:], LBD)
kcbd = lib.einsum('Lkc,Lbd->kcbd', LPV[ka,kc][:,:noccb],
LPV[kb,kd][:,noccb:])
WVVVV -= lib.einsum('kcbd,ka->acbd', kcbd, t1b[ka])
Wvvvv *= (1./nkpts)
WvvVV *= (1./nkpts)
WVVVV *= (1./nkpts)
return Wvvvv, WvvVV, WVVVV
else:
_Wvvvv, _WvvVV, _WVVVV = kintermediates_uhf.cc_Wvvvv_half(cc, t1, t2, eris)
def get_Wvvvv(ka, kc, kb):
return _Wvvvv[ka,kc,kb], _WvvVV[ka,kc,kb], _WVVVV[ka,kc,kb]
#:Ht2aa += np.einsum('xyuijef,zuwaebf,xyuv,zwuv->xyzijab', tauaa, _Wvvvv-_Wvvvv.transpose(2,1,0,5,4,3,6), P, P) * .5
#:Ht2bb += np.einsum('xyuijef,zuwaebf,xyuv,zwuv->xyzijab', taubb, _WVVVV-_WVVVV.transpose(2,1,0,5,4,3,6), P, P) * .5
#:Ht2ab += np.einsum('xyuiJeF,zuwaeBF,xyuv,zwuv->xyziJaB', tauab, _WvvVV, P, P)
for ka, kb, kc in kpts_helper.loop_kkk(nkpts):
kd = kconserv[ka,kc,kb]
Wvvvv, WvvVV, WVVVV = get_Wvvvv(ka, kc, kb)
for ki in range(nkpts):
kj = kconserv[ka,ki,kb]
tauaa = t2aa[ki,kj,kc].copy()
tauab = t2ab[ki,kj,kc].copy()
taubb = t2bb[ki,kj,kc].copy()
if ki == kc and kj == kd:
tauaa += einsum('ic,jd->ijcd', t1a[ki], t1a[kj])
tauab += einsum('ic,jd->ijcd', t1a[ki], t1b[kj])
taubb += einsum('ic,jd->ijcd', t1b[ki], t1b[kj])
if ki == kd and kj == kc:
tauaa -= einsum('id,jc->ijcd', t1a[ki], t1a[kj])
taubb -= einsum('id,jc->ijcd', t1b[ki], t1b[kj])
tmp = lib.einsum('acbd,ijcd->ijab', Wvvvv, tauaa) * .5
Ht2aa[ki,kj,ka] += tmp
Ht2aa[ki,kj,kb] -= tmp.transpose(0,1,3,2)
tmp = lib.einsum('acbd,ijcd->ijab', WVVVV, taubb) * .5
Ht2bb[ki,kj,ka] += tmp
Ht2bb[ki,kj,kb] -= tmp.transpose(0,1,3,2)
Ht2ab[ki,kj,ka] += lib.einsum('acbd,ijcd->ijab', WvvVV, tauab)
Wvvvv = WvvVV = WVVVV = None
_Wvvvv = _WvvVV = _WVVVV = None
# Contractions below are merged to Woooo intermediates
# tauaa, tauab, taubb = kintermediates_uhf.make_tau(cc, t2, t1, t1)
# P = kintermediates_uhf.kconserv_mat(cc.nkpts, cc.khelper.kconserv)
# minj = np.einsum('xwymenf,uvwijef,xywz,uvwz->xuyminj', eris.ovov, tauaa, P, P)
# MINJ = np.einsum('xwymenf,uvwijef,xywz,uvwz->xuyminj', eris.OVOV, taubb, P, P)
# miNJ = np.einsum('xwymeNF,uvwiJeF,xywz,uvwz->xuymiNJ', eris.ovOV, tauab, P, P)
# Ht2aa += np.einsum('xuyminj,xywmnab,xyuv->uvwijab', minj, tauaa, P) * .25
# Ht2bb += np.einsum('xuyminj,xywmnab,xyuv->uvwijab', MINJ, taubb, P) * .25
# Ht2ab += np.einsum('xuymiNJ,xywmNaB,xyuv->uvwiJaB', miNJ, tauab, P) * .5
return (Ht2aa, Ht2ab, Ht2bb)
class KUCCSD(uccsd.UCCSD):
max_space = getattr(__config__, 'pbc_cc_kccsd_uhf_KUCCSD_max_space', 20)
def __init__(self, mf, frozen=None, mo_coeff=None, mo_occ=None):
assert(isinstance(mf, scf.khf.KSCF))
uccsd.UCCSD.__init__(self, mf, frozen, mo_coeff, mo_occ)
self.kpts = mf.kpts
self.mo_energy = mf.mo_energy
self.khelper = kpts_helper.KptsHelper(mf.cell, self.kpts)
self.direct = True # If possible, use GDF to compute Wvvvv on-the-fly
keys = set(['kpts', 'mo_energy', 'khelper', 'max_space', 'direct'])
self._keys = self._keys.union(keys)
@property
def nkpts(self):
return len(self.kpts)
get_normt_diff = get_normt_diff
get_nocc = get_nocc
get_nmo = get_nmo
get_frozen_mask = get_frozen_mask
update_amps = update_amps
energy = energy
def dump_flags(self, verbose=None):
return uccsd.UCCSD.dump_flags(self, verbose)
def ao2mo(self, mo_coeff=None):
from pyscf.pbc.df.df import GDF
cell = self._scf.cell
nkpts = self.nkpts
nmoa, nmob = self.nmo
mem_incore = nkpts**3 * (nmoa**4 + nmob**4) * 8 / 1e6
mem_now = lib.current_memory()[0]
if (mem_incore + mem_now < self.max_memory) or self.mol.incore_anyway:
return _make_eris_incore(self, mo_coeff)
elif (self.direct and type(self._scf.with_df) is GDF
and cell.dimension != 2):
# DFKCCSD does not support MDF
return _make_df_eris(self, mo_coeff)
else:
return _make_eris_outcore(self, mo_coeff)
def init_amps(self, eris):
time0 = time.clock(), time.time()
nocca, noccb = self.nocc
nmoa, nmob = self.nmo
nvira, nvirb = nmoa - nocca, nmob - noccb
nkpts = self.nkpts
t1a = np.zeros((nkpts, nocca, nvira), dtype=np.complex128)
t1b = np.zeros((nkpts, noccb, nvirb), dtype=np.complex128)
t1 = (t1a, t1b)
t2aa = np.zeros((nkpts, nkpts, nkpts, nocca, nocca, nvira, nvira), dtype=np.complex128)
t2ab = np.zeros((nkpts, nkpts, nkpts, nocca, noccb, nvira, nvirb), dtype=np.complex128)
t2bb = np.zeros((nkpts, nkpts, nkpts, noccb, noccb, nvirb, nvirb), dtype=np.complex128)
mo_ea_o = [e[:nocca] for e in eris.mo_energy[0]]
mo_eb_o = [e[:noccb] for e in eris.mo_energy[1]]
mo_ea_v = [e[nocca:] for e in eris.mo_energy[0]]
mo_eb_v = [e[noccb:] for e in eris.mo_energy[1]]
# Get location of padded elements in occupied and virtual space
nonzero_padding_alpha, nonzero_padding_beta = padding_k_idx(self, kind="split")
nonzero_opadding_alpha, nonzero_vpadding_alpha = nonzero_padding_alpha
nonzero_opadding_beta, nonzero_vpadding_beta = nonzero_padding_beta
eia = []
eIA = []
# Create denominators, ignoring padded elements
for ki in range(nkpts):
tmp_alpha = []
tmp_beta = []
for ka in range(nkpts):
tmp_eia = LARGE_DENOM * np.ones((nocca, nvira), dtype=eris.mo_energy[0][0].dtype)
tmp_eIA = LARGE_DENOM * np.ones((noccb, nvirb), dtype=eris.mo_energy[0][0].dtype)
n0_ovp_ia = np.ix_(nonzero_opadding_alpha[ki], nonzero_vpadding_alpha[ka])
n0_ovp_IA = np.ix_(nonzero_opadding_beta[ki], nonzero_vpadding_beta[ka])
tmp_eia[n0_ovp_ia] = (mo_ea_o[ki][:,None] - mo_ea_v[ka])[n0_ovp_ia]
tmp_eIA[n0_ovp_IA] = (mo_eb_o[ki][:,None] - mo_eb_v[ka])[n0_ovp_IA]
tmp_alpha.append(tmp_eia)
tmp_beta.append(tmp_eIA)
eia.append(tmp_alpha)
eIA.append(tmp_beta)
kconserv = kpts_helper.get_kconserv(self._scf.cell, self.kpts)
for ki, kj, ka in kpts_helper.loop_kkk(nkpts):
kb = kconserv[ki, ka, kj]
Daa = eia[ki][ka][:,None,:,None] + eia[kj][kb][:,None,:]
Dab = eia[ki][ka][:,None,:,None] + eIA[kj][kb][:,None,:]
Dbb = eIA[ki][ka][:,None,:,None] + eIA[kj][kb][:,None,:]
t2aa[ki,kj,ka] = eris.ovov[ki,ka,kj].conj().transpose((0,2,1,3)) / Daa
t2aa[ki,kj,ka]-= eris.ovov[kj,ka,ki].conj().transpose((2,0,1,3)) / Daa
t2ab[ki,kj,ka] = eris.ovOV[ki,ka,kj].conj().transpose((0,2,1,3)) / Dab
t2bb[ki,kj,ka] = eris.OVOV[ki,ka,kj].conj().transpose((0,2,1,3)) / Dbb
t2bb[ki,kj,ka]-= eris.OVOV[kj,ka,ki].conj().transpose((2,0,1,3)) / Dbb
t2 = (t2aa,t2ab,t2bb)
d = 0.0 + 0.j
d += 0.25*(einsum('xzyiajb,xyzijab->',eris.ovov,t2aa)
- einsum('yzxjaib,xyzijab->',eris.ovov,t2aa))
d += einsum('xzyiajb,xyzijab->',eris.ovOV,t2ab)
d += 0.25*(einsum('xzyiajb,xyzijab->',eris.OVOV,t2bb)
- einsum('yzxjaib,xyzijab->',eris.OVOV,t2bb))
self.emp2 = d/nkpts
logger.info(self, 'Init t2, MP2 energy = %.15g', self.emp2.real)
logger.timer(self, 'init mp2', *time0)
return self.emp2, t1, t2
def amplitudes_to_vector(self, t1, t2):
return amplitudes_to_vector(t1, t2)
def vector_to_amplitudes(self, vec, nmo=None, nocc=None, nkpts=None):
if nocc is None: nocc = self.nocc
if nmo is None: nmo = self.nmo
if nkpts is None: nkpts = self.nkpts
return vector_to_amplitudes(vec, nmo, nocc, nkpts)
UCCSD = KUCCSD
#######################################
#
# _ERIS.
#
# Note the two electron integrals are stored in different orders from
# kccsd_rhf._ERIS. Integrals (ab|cd) are stored as [ka,kb,kc,a,b,c,d] here
# while the order is [ka,kc,kb,a,c,b,d] in kccsd_rhf._ERIS
#
# TODO: use the same convention as kccsd_rhf
#
def _make_eris_incore(cc, mo_coeff=None):
eris = uccsd._ChemistsERIs()
if mo_coeff is None:
mo_coeff = cc.mo_coeff
mo_coeff = convert_mo_coeff(mo_coeff) # FIXME: Remove me!
mo_coeff = padded_mo_coeff(cc, mo_coeff)
eris.mo_coeff = mo_coeff
eris.nocc = cc.nocc
nkpts = cc.nkpts
nocca, noccb = cc.nocc
nmoa, nmob = cc.nmo
nvira, nvirb = nmoa - nocca, nmob - noccb
if gamma_point(cc.kpts):
dtype = np.double
else:
dtype = np.complex128
dtype = np.result_type(dtype, *mo_coeff[0])
eris.oooo = np.empty((nkpts,nkpts,nkpts,nocca,nocca,nocca,nocca), dtype=dtype)
eris.ooov = np.empty((nkpts,nkpts,nkpts,nocca,nocca,nocca,nvira), dtype=dtype)
eris.oovv = np.empty((nkpts,nkpts,nkpts,nocca,nocca,nvira,nvira), dtype=dtype)
eris.ovov = np.empty((nkpts,nkpts,nkpts,nocca,nvira,nocca,nvira), dtype=dtype)
eris.voov = np.empty((nkpts,nkpts,nkpts,nvira,nocca,nocca,nvira), dtype=dtype)
eris.vovv = np.empty((nkpts,nkpts,nkpts,nvira,nocca,nvira,nvira), dtype=dtype)
eris.OOOO = np.empty((nkpts,nkpts,nkpts,noccb,noccb,noccb,noccb), dtype=dtype)
eris.OOOV = np.empty((nkpts,nkpts,nkpts,noccb,noccb,noccb,nvirb), dtype=dtype)
eris.OOVV = np.empty((nkpts,nkpts,nkpts,noccb,noccb,nvirb,nvirb), dtype=dtype)
eris.OVOV = np.empty((nkpts,nkpts,nkpts,noccb,nvirb,noccb,nvirb), dtype=dtype)
eris.VOOV = np.empty((nkpts,nkpts,nkpts,nvirb,noccb,noccb,nvirb), dtype=dtype)
eris.VOVV = np.empty((nkpts,nkpts,nkpts,nvirb,noccb,nvirb,nvirb), dtype=dtype)
eris.ooOO = np.empty((nkpts,nkpts,nkpts,nocca,nocca,noccb,noccb), dtype=dtype)
eris.ooOV = np.empty((nkpts,nkpts,nkpts,nocca,nocca,noccb,nvirb), dtype=dtype)
eris.ooVV = np.empty((nkpts,nkpts,nkpts,nocca,nocca,nvirb,nvirb), dtype=dtype)
eris.ovOV = np.empty((nkpts,nkpts,nkpts,nocca,nvira,noccb,nvirb), dtype=dtype)
eris.voOV = np.empty((nkpts,nkpts,nkpts,nvira,nocca,noccb,nvirb), dtype=dtype)
eris.voVV = np.empty((nkpts,nkpts,nkpts,nvira,nocca,nvirb,nvirb), dtype=dtype)
eris.OOoo = None
eris.OOov = np.empty((nkpts,nkpts,nkpts,noccb,noccb,nocca,nvira), dtype=dtype)
eris.OOvv = np.empty((nkpts,nkpts,nkpts,noccb,noccb,nvira,nvira), dtype=dtype)
eris.OVov = np.empty((nkpts,nkpts,nkpts,noccb,nvirb,nocca,nvira), dtype=dtype)
eris.VOov = np.empty((nkpts,nkpts,nkpts,nvirb,noccb,nocca,nvira), dtype=dtype)
eris.VOvv = np.empty((nkpts,nkpts,nkpts,nvirb,noccb,nvira,nvira), dtype=dtype)
_kuccsd_eris_common_(cc, eris)
thisdf = cc._scf.with_df
orbva = np.asarray(mo_coeff[0][:,:,nocca:], order='C')
orbvb = np.asarray(mo_coeff[1][:,:,noccb:], order='C')
eris.vvvv = thisdf.ao2mo_7d(orbva, factor=1./nkpts)
eris.VVVV = thisdf.ao2mo_7d(orbvb, factor=1./nkpts)
eris.vvVV = thisdf.ao2mo_7d([orbva,orbva,orbvb,orbvb], factor=1./nkpts)
return eris
def _kuccsd_eris_common_(cc, eris, buf=None):
from pyscf.pbc import tools
from pyscf.pbc.cc.ccsd import _adjust_occ
#if not (cc.frozen is None or cc.frozen == 0):
# raise NotImplementedError('cc.frozen = %s' % str(cc.frozen))
cput0 = (time.clock(), time.time())
log = logger.new_logger(cc)
cell = cc._scf.cell
thisdf = cc._scf.with_df
kpts = cc.kpts
nkpts = cc.nkpts
mo_coeff = eris.mo_coeff
nocca, noccb = eris.nocc
nmoa, nmob = cc.nmo
mo_a, mo_b = mo_coeff
# Re-make our fock MO matrix elements from density and fock AO
dm = cc._scf.make_rdm1(cc.mo_coeff, cc.mo_occ)
hcore = cc._scf.get_hcore()
with lib.temporary_env(cc._scf, exxdiv=None):
vhf = cc._scf.get_veff(cell, dm)
focka = [reduce(np.dot, (mo.conj().T, hcore[k]+vhf[0][k], mo))
for k, mo in enumerate(mo_a)]
fockb = [reduce(np.dot, (mo.conj().T, hcore[k]+vhf[1][k], mo))
for k, mo in enumerate(mo_b)]
eris.fock = (np.asarray(focka), np.asarray(fockb))
eris.e_hf = cc._scf.energy_tot(dm=dm, vhf=vhf)
madelung = tools.madelung(cell, kpts)
mo_ea = [focka[k].diagonal().real for k in range(nkpts)]
mo_eb = [fockb[k].diagonal().real for k in range(nkpts)]
mo_ea = [_adjust_occ(e, nocca, -madelung) for e in mo_ea]
mo_eb = [_adjust_occ(e, noccb, -madelung) for e in mo_eb]
eris.mo_energy = (mo_ea, mo_eb)
orboa = np.asarray(mo_coeff[0][:,:,:nocca], order='C')
orbob = np.asarray(mo_coeff[1][:,:,:noccb], order='C')
#orbva = np.asarray(mo_coeff[0][:,:,nocca:], order='C')
#orbvb = np.asarray(mo_coeff[1][:,:,noccb:], order='C')
dtype = np.result_type(*focka).char
# The momentum conservation array
kconserv = cc.khelper.kconserv
out = None
if isinstance(buf, h5py.Group):
out = buf.create_dataset('tmp', (nkpts,nkpts,nkpts,nocca,nmoa,nmoa,nmoa), dtype)
oppp = thisdf.ao2mo_7d([orboa,mo_coeff[0],mo_coeff[0],mo_coeff[0]], kpts,
factor=1./nkpts, out=out)
for kp, kq, kr in kpts_helper.loop_kkk(nkpts):
ks = kconserv[kp,kq,kr]
tmp = np.asarray(oppp[kp,kq,kr])
eris.oooo[kp,kq,kr] = tmp[:nocca,:nocca,:nocca,:nocca]
eris.ooov[kp,kq,kr] = tmp[:nocca,:nocca,:nocca,nocca:]
eris.oovv[kp,kq,kr] = tmp[:nocca,:nocca,nocca:,nocca:]
eris.ovov[kp,kq,kr] = tmp[:nocca,nocca:,:nocca,nocca:]
eris.voov[kq,kp,ks] = tmp[:nocca,nocca:,nocca:,:nocca].conj().transpose(1,0,3,2)
eris.vovv[kq,kp,ks] = tmp[:nocca,nocca:,nocca:,nocca:].conj().transpose(1,0,3,2)
oppp = None
if isinstance(buf, h5py.Group):
del(buf['tmp'])
out = buf.create_dataset('tmp', (nkpts,nkpts,nkpts,noccb,nmob,nmob,nmob), dtype)
oppp = thisdf.ao2mo_7d([orbob,mo_coeff[1],mo_coeff[1],mo_coeff[1]], kpts,
factor=1./nkpts, out=out)
for kp, kq, kr in kpts_helper.loop_kkk(nkpts):
ks = kconserv[kp,kq,kr]
tmp = np.asarray(oppp[kp,kq,kr])
eris.OOOO[kp,kq,kr] = tmp[:noccb,:noccb,:noccb,:noccb]
eris.OOOV[kp,kq,kr] = tmp[:noccb,:noccb,:noccb,noccb:]
eris.OOVV[kp,kq,kr] = tmp[:noccb,:noccb,noccb:,noccb:]
eris.OVOV[kp,kq,kr] = tmp[:noccb,noccb:,:noccb,noccb:]
eris.VOOV[kq,kp,ks] = tmp[:noccb,noccb:,noccb:,:noccb].conj().transpose(1,0,3,2)
eris.VOVV[kq,kp,ks] = tmp[:noccb,noccb:,noccb:,noccb:].conj().transpose(1,0,3,2)
oppp = None
if isinstance(buf, h5py.Group):
del(buf['tmp'])
out = buf.create_dataset('tmp', (nkpts,nkpts,nkpts,nocca,nmoa,nmob,nmob), dtype)
oppp = thisdf.ao2mo_7d([orboa,mo_coeff[0],mo_coeff[1],mo_coeff[1]], kpts,
factor=1./nkpts, out=out)
for kp, kq, kr in kpts_helper.loop_kkk(nkpts):
ks = kconserv[kp,kq,kr]
tmp = np.asarray(oppp[kp,kq,kr])
eris.ooOO[kp,kq,kr] = tmp[:nocca,:nocca,:noccb,:noccb]
eris.ooOV[kp,kq,kr] = tmp[:nocca,:nocca,:noccb,noccb:]
eris.ooVV[kp,kq,kr] = tmp[:nocca,:nocca,noccb:,noccb:]
eris.ovOV[kp,kq,kr] = tmp[:nocca,nocca:,:noccb,noccb:]
eris.voOV[kq,kp,ks] = tmp[:nocca,nocca:,noccb:,:noccb].conj().transpose(1,0,3,2)
eris.voVV[kq,kp,ks] = tmp[:nocca,nocca:,noccb:,noccb:].conj().transpose(1,0,3,2)
oppp = None
if isinstance(buf, h5py.Group):
del(buf['tmp'])
out = buf.create_dataset('tmp', (nkpts,nkpts,nkpts,noccb,nmob,nmoa,nmoa), dtype)
oppp = thisdf.ao2mo_7d([orbob,mo_coeff[1],mo_coeff[0],mo_coeff[0]], kpts,
factor=1./nkpts, out=out)
for kp, kq, kr in kpts_helper.loop_kkk(nkpts):
ks = kconserv[kp,kq,kr]
tmp = np.asarray(oppp[kp,kq,kr])
#eris.OOoo[kp,kq,kr] = tmp[:noccb,:noccb,:nocca,:nocca]
eris.OOov[kp,kq,kr] = tmp[:noccb,:noccb,:nocca,nocca:]
eris.OOvv[kp,kq,kr] = tmp[:noccb,:noccb,nocca:,nocca:]
eris.OVov[kp,kq,kr] = tmp[:noccb,noccb:,:nocca,nocca:]
eris.VOov[kq,kp,ks] = tmp[:noccb,noccb:,nocca:,:nocca].conj().transpose(1,0,3,2)
eris.VOvv[kq,kp,ks] = tmp[:noccb,noccb:,nocca:,nocca:].conj().transpose(1,0,3,2)
oppp = None
log.timer('CCSD integral transformation', *cput0)
return eris
def _make_eris_outcore(cc, mo_coeff=None):
eris = uccsd._ChemistsERIs()
if mo_coeff is None:
mo_coeff = cc.mo_coeff
mo_coeff = convert_mo_coeff(mo_coeff) # FIXME: Remove me!
mo_coeff = padded_mo_coeff(cc, mo_coeff)
eris.mo_coeff = mo_coeff
eris.nocc = cc.nocc
nkpts = cc.nkpts
nocca, noccb = cc.nocc
nmoa, nmob = cc.nmo
nvira, nvirb = nmoa - nocca, nmob - noccb
if gamma_point(cc.kpts):
dtype = np.double
else:
dtype = np.complex128
dtype = np.result_type(dtype, *mo_coeff[0]).char
eris.feri = feri = lib.H5TmpFile()
eris.oooo = feri.create_dataset('oooo', (nkpts,nkpts,nkpts,nocca,nocca,nocca,nocca), dtype)
eris.ooov = feri.create_dataset('ooov', (nkpts,nkpts,nkpts,nocca,nocca,nocca,nvira), dtype)
eris.oovv = feri.create_dataset('oovv', (nkpts,nkpts,nkpts,nocca,nocca,nvira,nvira), dtype)
eris.ovov = feri.create_dataset('ovov', (nkpts,nkpts,nkpts,nocca,nvira,nocca,nvira), dtype)
eris.voov = feri.create_dataset('voov', (nkpts,nkpts,nkpts,nvira,nocca,nocca,nvira), dtype)
eris.vovv = feri.create_dataset('vovv', (nkpts,nkpts,nkpts,nvira,nocca,nvira,nvira), dtype)
eris.vvvv = feri.create_dataset('vvvv', (nkpts,nkpts,nkpts,nvira,nvira,nvira,nvira), dtype)
eris.OOOO = feri.create_dataset('OOOO', (nkpts,nkpts,nkpts,noccb,noccb,noccb,noccb), dtype)
eris.OOOV = feri.create_dataset('OOOV', (nkpts,nkpts,nkpts,noccb,noccb,noccb,nvirb), dtype)
eris.OOVV = feri.create_dataset('OOVV', (nkpts,nkpts,nkpts,noccb,noccb,nvirb,nvirb), dtype)
eris.OVOV = feri.create_dataset('OVOV', (nkpts,nkpts,nkpts,noccb,nvirb,noccb,nvirb), dtype)
eris.VOOV = feri.create_dataset('VOOV', (nkpts,nkpts,nkpts,nvirb,noccb,noccb,nvirb), dtype)
eris.VOVV = feri.create_dataset('VOVV', (nkpts,nkpts,nkpts,nvirb,noccb,nvirb,nvirb), dtype)
eris.VVVV = feri.create_dataset('VVVV', (nkpts,nkpts,nkpts,nvirb,nvirb,nvirb,nvirb), dtype)
eris.ooOO = feri.create_dataset('ooOO', (nkpts,nkpts,nkpts,nocca,nocca,noccb,noccb), dtype)
eris.ooOV = feri.create_dataset('ooOV', (nkpts,nkpts,nkpts,nocca,nocca,noccb,nvirb), dtype)
eris.ooVV = feri.create_dataset('ooVV', (nkpts,nkpts,nkpts,nocca,nocca,nvirb,nvirb), dtype)
eris.ovOV = feri.create_dataset('ovOV', (nkpts,nkpts,nkpts,nocca,nvira,noccb,nvirb), dtype)
eris.voOV = feri.create_dataset('voOV', (nkpts,nkpts,nkpts,nvira,nocca,noccb,nvirb), dtype)
eris.voVV = feri.create_dataset('voVV', (nkpts,nkpts,nkpts,nvira,nocca,nvirb,nvirb), dtype)
eris.vvVV = feri.create_dataset('vvVV', (nkpts,nkpts,nkpts,nvira,nvira,nvirb,nvirb), dtype)
eris.OOoo = None
eris.OOov = feri.create_dataset('OOov', (nkpts,nkpts,nkpts,noccb,noccb,nocca,nvira), dtype)
eris.OOvv = feri.create_dataset('OOvv', (nkpts,nkpts,nkpts,noccb,noccb,nvira,nvira), dtype)
eris.OVov = feri.create_dataset('OVov', (nkpts,nkpts,nkpts,noccb,nvirb,nocca,nvira), dtype)
eris.VOov = feri.create_dataset('VOov', (nkpts,nkpts,nkpts,nvirb,noccb,nocca,nvira), dtype)
eris.VOvv = feri.create_dataset('VOvv', (nkpts,nkpts,nkpts,nvirb,noccb,nvira,nvira), dtype)
eris.VVvv = None
fswap = lib.H5TmpFile()
_kuccsd_eris_common_(cc, eris, fswap)
fswap = None
thisdf = cc._scf.with_df
orbva = np.asarray(mo_coeff[0][:,:,nocca:], order='C')
orbvb = np.asarray(mo_coeff[1][:,:,noccb:], order='C')
thisdf.ao2mo_7d(orbva, cc.kpts, factor=1./nkpts, out=eris.vvvv)
thisdf.ao2mo_7d(orbvb, cc.kpts, factor=1./nkpts, out=eris.VVVV)
thisdf.ao2mo_7d([orbva,orbva,orbvb,orbvb], cc.kpts, factor=1./nkpts, out=eris.vvVV)
return eris
def _make_df_eris(cc, mo_coeff=None):
from pyscf.pbc.df import df
from pyscf.ao2mo import _ao2mo
cell = cc._scf.cell
if cell.dimension == 2:
raise NotImplementedError
eris = uccsd._ChemistsERIs()
if mo_coeff is None:
mo_coeff = cc.mo_coeff
mo_coeff = padded_mo_coeff(cc, mo_coeff)
eris.mo_coeff = mo_coeff
eris.nocc = cc.nocc
thisdf = cc._scf.with_df
kpts = cc.kpts
nkpts = cc.nkpts
nocca, noccb = cc.nocc
nmoa, nmob = cc.nmo
nvira, nvirb = nmoa - nocca, nmob - noccb
#if getattr(thisdf, 'auxcell', None):
# naux = thisdf.auxcell.nao_nr()
#else:
# naux = thisdf.get_naoaux()
nao = cell.nao_nr()
mo_kpts_a, mo_kpts_b = eris.mo_coeff
if gamma_point(kpts):
dtype = np.double
else:
dtype = np.complex128
dtype = np.result_type(dtype, *mo_kpts_a)
eris.feri = feri = lib.H5TmpFile()
eris.oooo = feri.create_dataset('oooo', (nkpts,nkpts,nkpts,nocca,nocca,nocca,nocca), dtype)
eris.ooov = feri.create_dataset('ooov', (nkpts,nkpts,nkpts,nocca,nocca,nocca,nvira), dtype)
eris.oovv = feri.create_dataset('oovv', (nkpts,nkpts,nkpts,nocca,nocca,nvira,nvira), dtype)
eris.ovov = feri.create_dataset('ovov', (nkpts,nkpts,nkpts,nocca,nvira,nocca,nvira), dtype)
eris.voov = feri.create_dataset('voov', (nkpts,nkpts,nkpts,nvira,nocca,nocca,nvira), dtype)
eris.vovv = feri.create_dataset('vovv', (nkpts,nkpts,nkpts,nvira,nocca,nvira,nvira), dtype)
eris.vvvv = None
eris.OOOO = feri.create_dataset('OOOO', (nkpts,nkpts,nkpts,noccb,noccb,noccb,noccb), dtype)
eris.OOOV = feri.create_dataset('OOOV', (nkpts,nkpts,nkpts,noccb,noccb,noccb,nvirb), dtype)
eris.OOVV = feri.create_dataset('OOVV', (nkpts,nkpts,nkpts,noccb,noccb,nvirb,nvirb), dtype)
eris.OVOV = feri.create_dataset('OVOV', (nkpts,nkpts,nkpts,noccb,nvirb,noccb,nvirb), dtype)
eris.VOOV = feri.create_dataset('VOOV', (nkpts,nkpts,nkpts,nvirb,noccb,noccb,nvirb), dtype)
eris.VOVV = feri.create_dataset('VOVV', (nkpts,nkpts,nkpts,nvirb,noccb,nvirb,nvirb), dtype)
eris.VVVV = None
eris.ooOO = feri.create_dataset('ooOO', (nkpts,nkpts,nkpts,nocca,nocca,noccb,noccb), dtype)
eris.ooOV = feri.create_dataset('ooOV', (nkpts,nkpts,nkpts,nocca,nocca,noccb,nvirb), dtype)
eris.ooVV = feri.create_dataset('ooVV', (nkpts,nkpts,nkpts,nocca,nocca,nvirb,nvirb), dtype)
eris.ovOV = feri.create_dataset('ovOV', (nkpts,nkpts,nkpts,nocca,nvira,noccb,nvirb), dtype)
eris.voOV = feri.create_dataset('voOV', (nkpts,nkpts,nkpts,nvira,nocca,noccb,nvirb), dtype)
eris.voVV = feri.create_dataset('voVV', (nkpts,nkpts,nkpts,nvira,nocca,nvirb,nvirb), dtype)
eris.vvVV = None
eris.OOoo = None
eris.OOov = feri.create_dataset('OOov', (nkpts,nkpts,nkpts,noccb,noccb,nocca,nvira), dtype)
eris.OOvv = feri.create_dataset('OOvv', (nkpts,nkpts,nkpts,noccb,noccb,nvira,nvira), dtype)
eris.OVov = feri.create_dataset('OVov', (nkpts,nkpts,nkpts,noccb,nvirb,nocca,nvira), dtype)
eris.VOov = feri.create_dataset('VOov', (nkpts,nkpts,nkpts,nvirb,noccb,nocca,nvira), dtype)
eris.VOvv = feri.create_dataset('VOvv', (nkpts,nkpts,nkpts,nvirb,noccb,nvira,nvira), dtype)
eris.VVvv = None
fswap = lib.H5TmpFile()
_kuccsd_eris_common_(cc, eris, fswap)
fswap = None
eris.Lpv = Lpv = np.empty((nkpts,nkpts), dtype=object)
eris.LPV = LPV = np.empty((nkpts,nkpts), dtype=object)
with h5py.File(thisdf._cderi, 'r') as f:
kptij_lst = f['j3c-kptij'].value
tao = []
ao_loc = None
for ki, kpti in enumerate(kpts):
for kj, kptj in enumerate(kpts):
kpti_kptj = np.array((kpti,kptj))
Lpq = np.asarray(df._getitem(f, 'j3c', kpti_kptj, kptij_lst))
mo_a = np.hstack((mo_kpts_a[ki], mo_kpts_a[kj][:,nocca:]))
mo_b = np.hstack((mo_kpts_b[ki], mo_kpts_b[kj][:,noccb:]))
mo_a = np.asarray(mo_a, dtype=dtype, order='F')
mo_b = np.asarray(mo_b, dtype=dtype, order='F')
if dtype == np.double:
outa = _ao2mo.nr_e2(Lpq, mo_a, (0, nmoa, nmoa, nmoa+nvira), aosym='s2')
outb = _ao2mo.nr_e2(Lpq, mo_b, (0, nmob, nmob, nmob+nvirb), aosym='s2')
else:
#Note: Lpq.shape[0] != naux if linear dependency is found in auxbasis
if Lpq[0].size != nao**2: # aosym = 's2'
Lpq = lib.unpack_tril(Lpq).astype(np.complex128)
outa = _ao2mo.r_e2(Lpq, mo_a, (0, nmoa, nmoa, nmoa+nvira), tao, ao_loc)
outb = _ao2mo.r_e2(Lpq, mo_b, (0, nmob, nmob, nmob+nvirb), tao, ao_loc)
Lpv[ki,kj] = outa.reshape(-1,nmoa,nvira)
LPV[ki,kj] = outb.reshape(-1,nmob,nvirb)
return eris
scf.kuhf.KUHF.CCSD = lib.class_as_method(KUCCSD)
if __name__ == '__main__':
from pyscf.pbc import gto, cc
from pyscf import lo
cell = gto.Cell()
cell.atom='''
He 0.000000000000 0.000000000000 0.000000000000
He 1.685068664391 1.685068664391 1.685068664391
'''
#cell.basis = [[0, (1., 1.)], [1, (.5, 1.)]]
cell.basis = [[0, (1., 1.)], [0, (.5, 1.)]]
cell.a = '''
0.000000000, 3.370137329, 3.370137329
3.370137329, 0.000000000, 3.370137329
3.370137329, 3.370137329, 0.000000000'''
cell.unit = 'B'
cell.mesh = [13]*3
cell.build()
np.random.seed(2)
# Running HF and CCSD with 1x1x2 Monkhorst-Pack k-point mesh
kmf = scf.KUHF(cell, kpts=cell.make_kpts([1,1,3]), exxdiv=None)
nmo = cell.nao_nr()
kmf.mo_occ = np.zeros((2,3,nmo))
kmf.mo_occ[0,:,:3] = 1
kmf.mo_occ[1,:,:1] = 1
kmf.mo_energy = np.arange(nmo) + np.random.random((2,3,nmo)) * .3
kmf.mo_energy[kmf.mo_occ == 0] += 2
mo = (np.random.random((2,3,nmo,nmo)) +
np.random.random((2,3,nmo,nmo))*1j - .5-.5j)
s = kmf.get_ovlp()
kmf.mo_coeff = np.empty_like(mo)
nkpts = len(kmf.kpts)
for k in range(nkpts):
kmf.mo_coeff[0,k] = lo.orth.vec_lowdin(mo[0,k], s[k])
kmf.mo_coeff[1,k] = lo.orth.vec_lowdin(mo[1,k], s[k])
def rand_t1_t2(mycc):
nkpts = mycc.nkpts
nocca, noccb = mycc.nocc
nmoa, nmob = mycc.nmo
nvira, nvirb = nmoa - nocca, nmob - noccb
np.random.seed(1)
t1a = (np.random.random((nkpts,nocca,nvira)) +
np.random.random((nkpts,nocca,nvira))*1j - .5-.5j)
t1b = (np.random.random((nkpts,noccb,nvirb)) +
np.random.random((nkpts,noccb,nvirb))*1j - .5-.5j)
t2aa = (np.random.random((nkpts,nkpts,nkpts,nocca,nocca,nvira,nvira)) +
np.random.random((nkpts,nkpts,nkpts,nocca,nocca,nvira,nvira))*1j - .5-.5j)
kconserv = kpts_helper.get_kconserv(kmf.cell, kmf.kpts)
t2aa = t2aa - t2aa.transpose(1,0,2,4,3,5,6)
tmp = t2aa.copy()
for ki, kj, kk in kpts_helper.loop_kkk(nkpts):
kl = kconserv[ki, kk, kj]
t2aa[ki,kj,kk] = t2aa[ki,kj,kk] - tmp[ki,kj,kl].transpose(0,1,3,2)
t2ab = (np.random.random((nkpts,nkpts,nkpts,nocca,noccb,nvira,nvirb)) +
np.random.random((nkpts,nkpts,nkpts,nocca,noccb,nvira,nvirb))*1j - .5-.5j)
t2bb = (np.random.random((nkpts,nkpts,nkpts,noccb,noccb,nvirb,nvirb)) +
np.random.random((nkpts,nkpts,nkpts,noccb,noccb,nvirb,nvirb))*1j - .5-.5j)
t2bb = t2bb - t2bb.transpose(1,0,2,4,3,5,6)
tmp = t2bb.copy()
for ki, kj, kk in kpts_helper.loop_kkk(nkpts):
kl = kconserv[ki, kk, kj]
t2bb[ki,kj,kk] = t2bb[ki,kj,kk] - tmp[ki,kj,kl].transpose(0,1,3,2)
t1 = (t1a, t1b)
t2 = (t2aa, t2ab, t2bb)
return t1, t2
mycc = KUCCSD(kmf)
eris = mycc.ao2mo()
t1, t2 = rand_t1_t2(mycc)
Ht1, Ht2 = mycc.update_amps(t1, t2, eris)
print(lib.finger(Ht1[0]) - (2.2677885702176339-2.5150764056992041j))
print(lib.finger(Ht1[1]) - (-51.643438947846086+526.58026126100458j))
print(lib.finger(Ht2[0]) - (-29.490813482748258-8.7509143690136018j))
print(lib.finger(Ht2[1]) - (2256.0440056839416-193.16480896707569j))
print(lib.finger(Ht2[2]) - (-250.59447681063182-397.57189085666982j))
kmf.mo_occ[:] = 0
kmf.mo_occ[:,:,:2] = 1
mycc = KUCCSD(kmf)
eris = mycc.ao2mo()
t1, t2 = rand_t1_t2(mycc)
Ht1, Ht2 = mycc.update_amps(t1, t2, eris)
print(lib.finger(Ht1[0]) - (5.4622516572705662+1.990046725028729j))
print(lib.finger(Ht1[1]) - (4.8801120611799043-5.9940463787453488j))
print(lib.finger(Ht2[0]) - (-192.38864512375193+305.14191018543983j))
print(lib.finger(Ht2[1]) - (23085.044505825954-11527.802302550244j))
print(lib.finger(Ht2[2]) - (115.57932548288559-40.888597453928604j))
from pyscf.pbc.cc import kccsd
kgcc = kccsd.GCCSD(scf.addons.convert_to_ghf(kmf))
kccsd_eris = kccsd._make_eris_incore(kgcc, kgcc._scf.mo_coeff)
r1 = kgcc.spatial2spin(t1)
r2 = kgcc.spatial2spin(t2)
ge = kccsd.energy(kgcc, r1, r2, kccsd_eris)
r1, r2 = kgcc.update_amps(r1, r2, kccsd_eris)
ue = energy(mycc, t1, t2, eris)
print(abs(ge - ue))
print(abs(r1 - kgcc.spatial2spin(Ht1)).max())
print(abs(r2 - kgcc.spatial2spin(Ht2)).max())
kmf = kmf.density_fit(auxbasis=[[0, (1., 1.)]])
mycc = KUCCSD(kmf)
eris = _make_df_eris(mycc, mycc.mo_coeff)
t1, t2 = rand_t1_t2(mycc)
Ht1, Ht2 = mycc.update_amps(t1, t2, eris)
print(lib.finger(Ht1[0]) - (6.9341372555790013+0.87313546297025901j))
print(lib.finger(Ht1[1]) - (6.7538005829391992-0.95702422534126796j))
print(lib.finger(Ht2[0]) - (-509.24544842179876+448.00925776269855j))
print(lib.finger(Ht2[1]) - (107.5960392010511+40.869216223808067j) )
print(lib.finger(Ht2[2]) - (-196.75910296082139+218.53005038057515j))
kgcc = kccsd.GCCSD(scf.addons.convert_to_ghf(kmf))
kccsd_eris = kccsd._make_eris_incore(kgcc, kgcc._scf.mo_coeff)
r1 = kgcc.spatial2spin(t1)
r2 = kgcc.spatial2spin(t2)
ge = kccsd.energy(kgcc, r1, r2, kccsd_eris)
r1, r2 = kgcc.update_amps(r1, r2, kccsd_eris)
print(abs(r1 - kgcc.spatial2spin(Ht1)).max())
print(abs(r2 - kgcc.spatial2spin(Ht2)).max())
print(all([abs(lib.finger(eris.oooo) - (-0.18290712163391809-0.13839081039521306j) )<1e-8,
abs(lib.finger(eris.ooOO) - (-0.084752145202964035-0.28496525042110676j) )<1e-8,
#abs(lib.finger(eris.OOoo) - (0.43054922768629345-0.27990237216969871j) )<1e-8,
abs(lib.finger(eris.OOOO) - (-0.2941475969103261-0.047247498899840978j) )<1e-8,
abs(lib.finger(eris.ooov) - (0.23381463349517045-0.11703340936984277j) )<1e-8,
abs(lib.finger(eris.ooOV) - (-0.052655392703214066+0.69533309442418556j) )<1e-8,
abs(lib.finger(eris.OOov) - (-0.2111361247200903+0.85087916975274647j) )<1e-8,
abs(lib.finger(eris.OOOV) - (-0.36995992208047412-0.18887278030885621j) )<1e-8,
abs(lib.finger(eris.oovv) - (0.21107397525051516+0.0048714991438174871j) )<1e-8,
abs(lib.finger(eris.ooVV) - (-0.076411225687065987+0.11080438166425896j) )<1e-8,
abs(lib.finger(eris.OOvv) - (-0.17880337626095003-0.24174716216954206j) )<1e-8,
abs(lib.finger(eris.OOVV) - (0.059186286356424908+0.68433866387500164j) )<1e-8,
abs(lib.finger(eris.ovov) - (0.15402983765151051+0.064359681685222214j) )<1e-8,
abs(lib.finger(eris.ovOV) - (-0.10697649196044598+0.30351249676253234j) )<1e-8,
#abs(lib.finger(eris.OVov) - (-0.17619329728836752-0.56585020976035816j) )<1e-8,
abs(lib.finger(eris.OVOV) - (-0.63963235318492118+0.69863219317718828j) )<1e-8,
abs(lib.finger(eris.voov) - (-0.24137641647339092+0.18676684336011531j) )<1e-8,
abs(lib.finger(eris.voOV) - (0.19257709151227204+0.38929027819406414j) )<1e-8,
#abs(lib.finger(eris.VOov) - (0.07632606729926053-0.70350947950650355j) )<1e-8,
abs(lib.finger(eris.VOOV) - (-0.47970203195500816+0.46735207193861927j) )<1e-8,
abs(lib.finger(eris.vovv) - (-0.1342049915673903-0.23391327821719513j) )<1e-8,
abs(lib.finger(eris.voVV) - (-0.28989635223866056+0.9644368822688475j) )<1e-8,
abs(lib.finger(eris.VOvv) - (-0.32428269235420271+0.0029847254383674748j))<1e-8,
abs(lib.finger(eris.VOVV) - (0.45031779746222456-0.36858577475752041j) )<1e-8]))
eris = _make_eris_outcore(mycc, mycc.mo_coeff)
print(all([abs(lib.finger(eris.oooo) - (-0.18290712163391809-0.13839081039521306j) )<1e-8,
abs(lib.finger(eris.ooOO) - (-0.084752145202964035-0.28496525042110676j) )<1e-8,
#abs(lib.finger(eris.OOoo) - (0.43054922768629345-0.27990237216969871j) )<1e-8,
abs(lib.finger(eris.OOOO) - (-0.2941475969103261-0.047247498899840978j) )<1e-8,
abs(lib.finger(eris.ooov) - (0.23381463349517045-0.11703340936984277j) )<1e-8,
abs(lib.finger(eris.ooOV) - (-0.052655392703214066+0.69533309442418556j) )<1e-8,
abs(lib.finger(eris.OOov) - (-0.2111361247200903+0.85087916975274647j) )<1e-8,
abs(lib.finger(eris.OOOV) - (-0.36995992208047412-0.18887278030885621j) )<1e-8,
abs(lib.finger(eris.oovv) - (0.21107397525051516+0.0048714991438174871j) )<1e-8,
abs(lib.finger(eris.ooVV) - (-0.076411225687065987+0.11080438166425896j) )<1e-8,
abs(lib.finger(eris.OOvv) - (-0.17880337626095003-0.24174716216954206j) )<1e-8,
abs(lib.finger(eris.OOVV) - (0.059186286356424908+0.68433866387500164j) )<1e-8,
abs(lib.finger(eris.ovov) - (0.15402983765151051+0.064359681685222214j) )<1e-8,
abs(lib.finger(eris.ovOV) - (-0.10697649196044598+0.30351249676253234j) )<1e-8,
#abs(lib.finger(eris.OVov) - (-0.17619329728836752-0.56585020976035816j) )<1e-8,
abs(lib.finger(eris.OVOV) - (-0.63963235318492118+0.69863219317718828j) )<1e-8,
abs(lib.finger(eris.voov) - (-0.24137641647339092+0.18676684336011531j) )<1e-8,
abs(lib.finger(eris.voOV) - (0.19257709151227204+0.38929027819406414j) )<1e-8,
#abs(lib.finger(eris.VOov) - (0.07632606729926053-0.70350947950650355j) )<1e-8,
abs(lib.finger(eris.VOOV) - (-0.47970203195500816+0.46735207193861927j) )<1e-8,
abs(lib.finger(eris.vovv) - (-0.1342049915673903-0.23391327821719513j) )<1e-8,
abs(lib.finger(eris.voVV) - (-0.28989635223866056+0.9644368822688475j) )<1e-8,
abs(lib.finger(eris.VOvv) - (-0.32428269235420271+0.0029847254383674748j))<1e-8,
abs(lib.finger(eris.VOVV) - (0.45031779746222456-0.36858577475752041j) )<1e-8,
abs(lib.finger(eris.vvvv) - (-0.080512851258903173-0.2868384266725581j) )<1e-8,
abs(lib.finger(eris.vvVV) - (-0.5137063762484736+1.1036785801263898j) )<1e-8,
#abs(lib.finger(eris.VVvv) - (0.16468487082491939+0.25730725586992997j) )<1e-8,
abs(lib.finger(eris.VVVV) - (-0.56714875196802295+0.058636785679170501j) )<1e-8]))
|
py | 1a537bc03f754c644a038ccaf7463955e19156d6 | from mWindowsSDK import mGdi32;
def fTestGdi32(oConsole):
oGdi32DLL = mGdi32.foLoadGdi32DLL();
|
py | 1a537c4712a543825a632d19add68b71fea27ac3 | from unittest import TestCase
from ofanalysis.ts.ts_data_update import TSDataUpdate
class TestTSDataUpdate(TestCase):
def setUp(self) -> None:
self.ts_data_update_object = TSDataUpdate('602e5ad960d66ab8b1f3c13b4fd746f5323ff808b0820768b02c6da3')
def test_retrieve_all(self):
self.ts_data_update_object.retrieve_all()
print()
|
py | 1a537cadacbefb6c0836eb4b73c5bad056c6b716 | #!/usr/bin/env python
# Copyright 2017 Opera Software AS. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script generates a clang tarball to be sent to the nodes in an icecc
cluster when doing a distributed build of chromium. The tarball is created from
the clang compiler in the chromium checkout.
The generated tarball is stored in src/icecc-tarball/, which is expected by the
icecc-ninja script. Running this scripts deletes the old tarball before
generating the new one.
To make sure the tarball is up-to-date with the current checkout, add this
custom hook to the .gclient file:
"custom_hooks": [ {"pattern": ".", "action": ["icecc-create-env.py"] } ]
That will force a regeneration of the tarball when running "gclient runhooks".
"""
import os
import shutil
import subprocess
clang_path = os.path.join(os.getcwd(), "src", "third_party", "llvm-build", "Release+Asserts", "bin", "clang")
tarball_dir = os.path.join(os.getcwd(), "src", "icecc-tarball")
# Remove old tarball and re-create the directory.
if os.path.exists(tarball_dir):
shutil.rmtree(tarball_dir)
os.makedirs(tarball_dir)
# Generate tarball.
subprocess.check_output(["icecc-create-env", "--clang", clang_path], cwd=tarball_dir);
|
py | 1a537d45fe3aa108a121544341f6c622d3a96b7c | # -*- coding: utf-8 -*-
from couchbase_helper.documentgenerator import doc_generator
from failover.AutoFailoverBaseTest import AutoFailoverBaseTest
from custom_exceptions.exception import RebalanceFailedException, \
ServerUnavailableException
from membase.api.rest_client import RestConnection
class MultiNodeAutoFailoverTests(AutoFailoverBaseTest):
def setUp(self):
super(MultiNodeAutoFailoverTests, self).setUp()
self.data_load_spec = self.input.param("data_load_spec",
"volume_test_load")
self.master = self.servers[0]
def tearDown(self):
super(MultiNodeAutoFailoverTests, self).tearDown()
def _is_failover_expected(self, failure_node_number):
failover_not_expected = (
self.max_count == 1 and failure_node_number > 1 and
self.pause_between_failover_action <
self.timeout or self.num_replicas < 1)
failover_not_expected = failover_not_expected or (
1 < self.max_count < failure_node_number and
self.pause_between_failover_action < self.timeout or
self.num_replicas < failure_node_number)
return not failover_not_expected
def _multi_node_failover(self):
servers_to_fail = self.server_to_fail
for i in range(self.max_count):
self.server_to_fail = [servers_to_fail[i]]
self.failover_expected = self._is_failover_expected(i + 1)
self.failover_actions[self.failover_action](self)
self.sleep(self.timeout)
def test_autofailover(self):
"""
Test the basic autofailover for different failure scenarios.
1. Enable autofailover and validate
2. Fail a node and validate if node is failed over if required.
3. Disable autofailover and validate.
:return: Nothing
"""
self.enable_autofailover_and_validate()
self.sleep(5)
tasks = self.subsequent_load_gen()
self._multi_node_failover()
if self.spec_name is None:
for task in tasks:
self.task.jython_task_manager.get_task_result(task)
else:
self.wait_for_async_data_load_to_complete(tasks)
self.disable_autofailover_and_validate()
def _get_server_group_nodes(self, server_group):
servers_in_group = self.zones[server_group]
server_group_nodes = []
for server in self.servers:
if server.ip in servers_in_group:
server_group_nodes.append(server)
return server_group_nodes
def test_autofailover_for_server_group(self):
self.enable_autofailover_and_validate()
self.shuffle_nodes_between_zones_and_rebalance()
self.sleep(30,"waiting")
self.server_to_fail = self._get_server_group_nodes("Group 2")
self.failover_expected = True
tasks = self.subsequent_load_gen()
try:
self.failover_actions[self.failover_action](self)
except:
result = self._check_for_autofailover_initiation_for_server_group_failover(self.server_to_fail)
self.assertTrue(result,
"Server group failover msg was not seen in logs")
finally:
self.sleep(300)
self.start_couchbase_server()
if self.spec_name is None:
for task in tasks:
self.task.jython_task_manager.get_task_result(task)
else:
self.wait_for_async_data_load_to_complete(tasks)
def test_autofailover_during_rebalance(self):
"""
Test autofailover for different failure scenarios while
rebalance
of nodes in progress
1. Enable autofailover and validate
2. Start rebalance of nodes by either adding or removing nodes.
3. Fail a node and validate if node is failed over if required.
4. Disable autofailover and validate.
:return: Nothing
"""
self.enable_autofailover_and_validate()
self.sleep(5)
rebalance_task = self.task.async_rebalance(self.servers,
self.servers_to_add,
self.servers_to_remove)
self.sleep(2)
self._multi_node_failover()
tasks = self.subsequent_load_gen()
try:
rebalance_task.result()
except RebalanceFailedException:
pass
except ServerUnavailableException:
pass
except Exception:
pass
else:
self.fail("Rebalance should fail since a node went down")
finally:
if self.spec_name is None:
for task in tasks:
self.task.jython_task_manager.get_task_result(task)
else:
self.wait_for_async_data_load_to_complete(tasks)
self.disable_autofailover_and_validate()
def test_autofailover_after_rebalance(self):
"""
Test autofailover for different failure scenarios after
rebalance
of nodes
1. Enable autofailover and validate
2. Start rebalance of nodes by either adding or removing
nodes and
wait for the rebalance to be completed
3. Fail a node and validate if node is failed over if required.
4. Disable autofailover and validate.
:return: Nothing
"""
self.enable_autofailover_and_validate()
self.sleep(5)
rebalance_success = self.task.rebalance(self.servers,
self.servers_to_add,
self.servers_to_remove)
if not rebalance_success:
self.disable_firewall()
self.fail("Rebalance failed. Check logs")
tasks = self.subsequent_load_gen()
self._multi_node_failover()
if self.spec_name is None:
for task in tasks:
self.task.jython_task_manager.get_task_result(task)
else:
self.wait_for_async_data_load_to_complete(tasks)
self.disable_autofailover_and_validate()
def test_rebalance_after_autofailover(self):
"""
Test autofailover for different failure scenarios and then
rebalance
nodes
1. Enable autofailover and validate
2. Start rebalance of nodes by either adding or removing
nodes and
wait for the rebalance to be completed
3. Fail a node and validate if node is failed over if required.
4. Disable autofailover and validate.
:return: Nothing
"""
self.enable_autofailover_and_validate()
self.sleep(5)
tasks = self.subsequent_load_gen()
self._multi_node_failover()
for node in self.servers_to_add:
self.rest.add_node(user=self.orchestrator.rest_username,
password=self.orchestrator.rest_password,
remoteIp=node.ip)
nodes = self.rest.node_statuses()
nodes_to_remove = [node.id for node in nodes if
node.ip in [t.ip for t in
self.servers_to_remove]]
nodes = [node.id for node in nodes]
started = self.rest.rebalance(nodes, nodes_to_remove)
rebalance_success = False
if started:
rebalance_success = self.rest.monitorRebalance()
if (not rebalance_success or not started) and not \
self.failover_expected:
self.fail("Rebalance failed. Check logs")
if self.spec_name is None:
for task in tasks:
self.task.jython_task_manager.get_task_result(task)
else:
self.wait_for_async_data_load_to_complete(tasks)
def test_autofailover_and_addback_of_node(self):
"""
Test autofailover of nodes and then addback of the node after
failover
1. Enable autofailover and validate
2. Fail a node and validate if node is failed over if required
3. Addback node and validate that the addback was successful.
:return: Nothing
"""
if not self.failover_expected:
self.log.info("Since no failover is expected in the test, "
"skipping the test")
return
self.enable_autofailover_and_validate()
self.sleep(5)
tasks = self.subsequent_load_gen()
self._multi_node_failover()
self.server_to_fail = self._servers_to_fail()
self.bring_back_failed_nodes_up()
self.sleep(30)
self.nodes = self.rest.node_statuses()
for node in self.server_to_fail:
self.rest.add_back_node("ns_1@{}".format(node.ip))
self.rest.set_recovery_type("ns_1@{}".format(node.ip),
self.recovery_strategy)
self.rest.rebalance(otpNodes=[node.id for node in self.nodes])
msg = "rebalance failed while recovering failover nodes {0}" \
.format(self.server_to_fail[0])
self.assertTrue(self.rest.monitorRebalance(stop_if_loop=True), msg)
if self.spec_name is None:
for task in tasks:
self.task.jython_task_manager.get_task_result(task)
else:
self.wait_for_async_data_load_to_complete(tasks)
def test_autofailover_and_remove_failover_node(self):
"""
Test autofailover of nodes and remove the node via rebalance
after
the failover.
1. Enable autofailover and validate
2. Fail a node and validate if node is failed over if required
3. Rebalance of node if failover was successful and validate.
:return:
"""
if not self.failover_expected:
self.log.info("Since no failover is expected in the test, "
"skipping the test")
return
tasks = self.subsequent_load_gen()
self.enable_autofailover_and_validate()
self.sleep(5)
self._multi_node_failover()
self.nodes = self.rest.node_statuses()
self.remove_after_failover = True
self.rest.rebalance(otpNodes=[node.id for node in self.nodes])
msg = "rebalance failed while removing failover nodes {0}" \
.format(self.server_to_fail[0])
self.assertTrue(self.rest.monitorRebalance(stop_if_loop=True),
msg)
if self.spec_name is None:
for task in tasks:
self.task.jython_task_manager.get_task_result(task)
else:
self.wait_for_async_data_load_to_complete(tasks)
def _check_for_autofailover_initiation_for_server_group_failover(
self, failed_over_nodes):
rest = RestConnection(self.master)
ui_logs = rest.get_logs(10)
ui_logs_text = [t["text"] for t in ui_logs]
ui_logs_time = [t["serverTime"] for t in ui_logs]
expected_log = "Starting failing over ['ns_1@{}','ns_1@{}']".format(
failed_over_nodes[0].ip, failed_over_nodes[1].ip)
self.log.info("ui_logs_text: {0}".format(ui_logs_text))
if expected_log in ui_logs_text:
failed_over_time = ui_logs_time[ui_logs_text.index(expected_log)]
return True, failed_over_time
return False, None
def subsequent_load_gen(self, async_load=True):
if self.spec_name is None:
subsequent_load_gen = doc_generator(self.key,
self.num_items,
self.num_items*2,
key_size=self.key_size,
doc_size=self.doc_size,
doc_type=self.doc_type)
tasks = self.async_load_all_buckets(
subsequent_load_gen, "create", 0)
return tasks
else:
doc_loading_spec = self.bucket_util.get_crud_template_from_package(
self.data_load_spec)
tasks = self.bucket_util.run_scenario_from_spec(
self.task,
self.cluster,
self.bucket_util.buckets,
doc_loading_spec,
mutation_num=0,
async_load=async_load)
return tasks
def wait_for_async_data_load_to_complete(self, task):
self.task.jython_task_manager.get_task_result(task)
|
py | 1a537d8178815a36fd899124e5c0ad041b8ace43 | import logging
import json
import requests
from kube_hunter.conf import config
from kube_hunter.core.types import Hunter, RemoteCodeExec, KubernetesCluster
from kube_hunter.core.events import handler
from kube_hunter.core.events.types import Vulnerability, Event
from kube_hunter.modules.discovery.dashboard import KubeDashboardEvent
logger = logging.getLogger(__name__)
class DashboardExposed(Vulnerability, Event):
"""All operations on the cluster are exposed"""
def __init__(self, nodes):
Vulnerability.__init__(
self, KubernetesCluster, "Dashboard Exposed", category=RemoteCodeExec, vid="KHV029",
)
self.evidence = "nodes: {}".format(" ".join(nodes)) if nodes else None
@handler.subscribe(KubeDashboardEvent)
class KubeDashboard(Hunter):
"""Dashboard Hunting
Hunts open Dashboards, gets the type of nodes in the cluster
"""
def __init__(self, event):
self.event = event
def get_nodes(self):
logger.debug("Passive hunter is attempting to get nodes types of the cluster")
r = requests.get(f"http://{self.event.host}:{self.event.port}/api/v1/node", timeout=config.network_timwout)
if r.status_code == 200 and "nodes" in r.text:
return [node["objectMeta"]["name"] for node in json.loads(r.text)["nodes"]]
def execute(self):
self.publish_event(DashboardExposed(nodes=self.get_nodes()))
|
py | 1a537f39fa266818abe282d1ad857643016533c3 | #!/usr/bin/env python
from setuptools import (
setup,
find_packages,
)
extras_require = {
'test': [
'cryptography',
'pytest-cov',
'pytest-django',
'pytest-xdist',
'pytest',
'tox',
],
'lint': [
'flake8',
'pep8',
'isort',
],
'doc': [
'Sphinx>=1.6.5,<2',
'sphinx_rtd_theme>=0.1.9',
],
'dev': [
'bumpversion>=0.5.3,<1',
'pytest-watch',
'wheel',
'twine',
'ipython',
],
'python-jose': [
'python-jose==3.0.0',
],
}
extras_require['dev'] = (
extras_require['dev'] + # noqa: W504
extras_require['test'] + # noqa: W504
extras_require['lint'] + # noqa: W504
extras_require['doc'] + # noqa: W504
extras_require['python-jose']
)
setup(
name='djangorestframework_simplejwt',
version='4.4.0',
url='https://github.com/davesque/django-rest-framework-simplejwt',
license='MIT',
description='A minimal JSON Web Token authentication plugin for Django REST Framework',
long_description=open('README.rst', 'r', encoding='utf-8').read(),
author='David Sanders',
author_email='[email protected]',
install_requires=[
'django',
'djangorestframework',
'pyjwt',
],
python_requires='>=3.6,<3.9',
extras_require=extras_require,
packages=find_packages(exclude=['tests', 'tests.*', 'licenses', 'requirements']),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Internet :: WWW/HTTP',
]
)
|
py | 1a538001611b70b6673bd3dcf183b209cd05e645 | import LevelBuilder
from sprites import *
def render(name,bg):
lb = LevelBuilder.LevelBuilder(name+".plist",background=bg)
lb.addObject(Bullet.BulletSprite(x=0, y=0,width=10,height=10,angle='0',restitution=0.5,static='false',friction=0.5,density=3,spawnEvent='onShoot'))
lb.addObject(Teleporter.TeleporterSprite(level_id='leveldata/level_2'))
lb.addObject(Hero.HeroSprite(x=51, y=260,width=32,height=32))
lb.addObject(Enemy.EnemySprite(x=871, y=107,width=208,height=208,angle='0',restitution=0.2,static='false',friction=0.5,density=20 , spawnframe = 100).setName('Enemy'))
lb.addObject(Friend.FriendSprite(x=532, y=44,width=89,height=89,angle='0',restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Beam.BeamSprite(x=1505, y=63,width=127,height=14,angle='90' ,restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Bomb.BombSprite(x=1304, y=17,width=32,height=32 ,restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Beam.BeamSprite(x=1425, y=63,width=127,height=14,angle='90' ,restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Beam.BeamSprite(x=1465, y=133,width=127,height=14,angle='0' ,restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Beam.BeamSprite(x=1505, y=204,width=127,height=14,angle='90' ,restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Beam.BeamSprite(x=1425, y=203,width=127,height=14,angle='90' ,restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Beam.BeamSprite(x=1257, y=63,width=127,height=14,angle='90' ,restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Enemy.EnemySprite(x=1463, y=174,width=32,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(SpikeyBuddy.SpikeyBuddySprite(x=688, y=21,width=40,height=40,restitution=0.2,static='false',friction=0.5,density=20 ).setName('Spikey'))
lb.addObject(Enemy.EnemySprite(x=348, y=25,width=32,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Beam.BeamSprite(x=1963, y=63,width=127,height=14,angle='90' ,restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Bomb.BombSprite(x=1702, y=17,width=32,height=32 ,restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Beam.BeamSprite(x=1753, y=63,width=127,height=14,angle='90' ,restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Beam.BeamSprite(x=1852, y=133,width=301,height=14,angle='0' ,restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Beam.BeamSprite(x=1903, y=204,width=127,height=14,angle='90' ,restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Beam.BeamSprite(x=1823, y=203,width=127,height=14,angle='90' ,restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Beam.BeamSprite(x=1655, y=63,width=127,height=14,angle='90' ,restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Enemy.EnemySprite(x=1861, y=174,width=32,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Enemy.EnemySprite(x=2334, y=57,width=32,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Enemy.EnemySprite(x=2334, y=25,width=32,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Enemy.EnemySprite(x=2334, y=90,width=32,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Enemy.EnemySprite(x=2334, y=125,width=32,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Enemy.EnemySprite(x=2334, y=160,width=32,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Enemy.EnemySprite(x=2334, y=196,width=32,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Enemy.EnemySprite(x=2334, y=231,width=32,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Enemy.EnemySprite(x=2334, y=275,width=32,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Enemy.EnemySprite(x=1091, y=58,width=32,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Enemy.EnemySprite(x=1091, y=26,width=32,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Enemy.EnemySprite(x=1091, y=91,width=32,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Enemy.EnemySprite(x=1091, y=126,width=32,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Enemy.EnemySprite(x=1091, y=160,width=32,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Enemy.EnemySprite(x=1091, y=196,width=32,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Enemy.EnemySprite(x=1091, y=232,width=32,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Enemy.EnemySprite(x=1091, y=276,width=32,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Beam.BeamSprite(x=185, y=7,width=80,height=60,angle='30',restitution=0.2,static='true',friction=0.5,density=20 ).setName('Beam'))
lb.addObject(Beam.BeamSprite(x=237, y=0,width=80,height=60,angle='30',restitution=0.2,static='true',friction=0.5,density=20 ).setName('Beam'))
lb.addObject(Beam.BeamSprite(x=268, y=72,width=80,height=60,angle='30',restitution=0.2,static='true',friction=0.5,density=20 ).setName('Beam'))
lb.addObject(Beam.BeamSprite(x=425, y=-7,width=80,height=60,angle='30',restitution=0.2,static='true',friction=0.5,density=20 ).setName('Beam'))
lb.addObject(Beam.BeamSprite(x=659, y=313,width=80,height=60,angle='30',restitution=0.2,static='true',friction=0.5,density=20 ).setName('Beam'))
lb.addObject(Beam.BeamSprite(x=2419, y=329,width=80,height=60,angle='30',restitution=0.2,static='true',friction=0.5,density=20 ).setName('Beam'))
lb.addObject(PumpkinBomber.PumpkinBomberSprite(x=2138, y=57))
lb.addObject(Enemy.EnemySprite(x=0, y=0,width=24,height=24,angle='0',restitution=0.5,static='false',friction=0.5,density=2,spawnEvent='onPumpkinBomberShoot',classname='PumpkinSprite',firstframe='pumpkin.png' ))
lb.addObject(Contacts.Contact(body1='beamPumpkinBomber',body2='lbullet',event_name='onBulletHit'))
lb.addObject(Contacts.Contact(body1='beamPumpkinBomber',body2='rbullet',event_name='onBulletHit'))
lb.addObject(Contacts.Contact(body1='EnemyBullet',body2='Hero',event_name='onDamage'))
lb.addObject(Contacts.Contact(body1='EnemyBullet',body2='lbullet',event_name='onBulletHit'))
lb.addObject(Contacts.Contact(body1='EnemyBullet',body2='rbullet',event_name='onBulletHit'))
lb.addObject(Enemy.EnemySprite(x=408, y=61,width=56,height=56,angle='0',restitution=0.2,static='false',friction=0.5,density=5 , classname='BlobSprite',firstframe='monsterblob.png'))
lb.addObject(Pickup.PickupSprite(x=286,y=39,width=32, height=32, static='false',angle=0))
lb.addObject(Crate.CrateSprite(x=1464,y=21,width=32, height=32, static='false',angle=0))
lb.addObject(Crate.CrateSprite(x=1464,y=55,width=32, height=32, static='false',angle=0))
lb.addObject(Crate.CrateSprite(x=1464,y=88,width=32, height=32, static='false',angle=0))
lb.addObject(Crate.CrateSprite(x=1788,y=21,width=32, height=32, static='false',angle=0))
lb.addObject(Crate.CrateSprite(x=1788,y=55,width=32, height=32, static='false',angle=0))
lb.addObject(Crate.CrateSprite(x=1788,y=88,width=32, height=32, static='false',angle=0))
lb.addObject(Crate.CrateSprite(x=1824,y=21,width=32, height=32, static='false',angle=0))
lb.addObject(Crate.CrateSprite(x=1824,y=55,width=32, height=32, static='false',angle=0))
lb.addObject(Crate.CrateSprite(x=1824,y=88,width=32, height=32, static='false',angle=0))
lb.addObject(Crate.CrateSprite(x=1862,y=21,width=32, height=32, static='false',angle=0))
lb.addObject(Crate.CrateSprite(x=1862,y=55,width=32, height=32, static='false',angle=0))
lb.addObject(Crate.CrateSprite(x=1862,y=88,width=32, height=32, static='false',angle=0))
lb.addObject(Pickup.PickupSprite(x=1907,y=22,width=32, height=32, static='false',angle=0))
lb.addObject(Pickup.PickupSprite(x=2523,y=24,width=32, height=32, static='false',angle=0))
lb.addObject(ZoomTrigger.ZoomTriggerSprite(x=63-115-50,y=160,width=100,height=320,zoom_fact=1.0))
lb.addObject(ZoomTrigger.ZoomTriggerSprite(x=63,y=320-60,width=128,height=100,zoom_fact=0.15))
lb.addObject(ZoomTrigger.ZoomTriggerSprite(x=63+115+50,y=160,width=100,height=320,zoom_fact=1.0))
lb.addObject(WatchtowerVisual.WatchtowerVisualSprite(x=63, y=92,width=128,height=235-50,angle='0',restitution=0.2,static='true',friction=0.5,density=20,firstframe='watchtower.png' ))
lb.addObject(Enemy.EnemySprite(x=1614, y=11,width=21,height=21,angle='0',restitution=0.2,static='false',friction=0.5,density=5 , classname='BlobSprite',firstframe='monsterblob.png'))
lb.addObject(BulletTimePickup.BulletTimePickupSprite(x=1043,y=-117,width=32, height=32, static='false',angle=0))
lb.render() |
py | 1a5380022fae807ce89a6be56fd1ef56574a111a | import unittest
import provider.s3lib as s3lib
import tests.settings_mock as settings_mock
from mock import mock, patch, MagicMock
from ddt import ddt, data, unpack
from boto.s3.key import Key
from boto.s3.prefix import Prefix
class FakeKey(Key):
def __init__(self, name):
self.name = name
class FakePrefix(Prefix):
def __init__(self, name):
self.name = name
class FakeBucket(object):
items = []
def list(self, prefix=None, delimiter=None, headers=None):
return self.items
@ddt
class TestProviderS3Lib(unittest.TestCase):
def setUp(self):
self.fake_s3_keys = [
FakeKey('one.xml'),
FakeKey('one.tif'),
FakeKey('one.pdf')
]
self.fake_s3_prefixes = [
FakePrefix('two/')
]
def test_get_s3_key_names_from_bucket(self):
"simple tests for coverage"
fake_bucket = FakeBucket()
fake_bucket.items += self.fake_s3_keys
fake_bucket.items += self.fake_s3_prefixes
self.assertEqual(len(s3lib.get_s3_key_names_from_bucket(fake_bucket)), 3)
self.assertEqual(len(s3lib.get_s3_key_names_from_bucket(
fake_bucket, file_extensions=['.xml'])), 1)
self.assertEqual(len(s3lib.get_s3_key_names_from_bucket(
fake_bucket, file_extensions=['.xml', '.pdf'])), 2)
self.assertEqual(len(s3lib.get_s3_key_names_from_bucket(
fake_bucket, key_type='prefix')), 1)
@data(
(99999, ['pmc/zip/elife-05-19405.zip'], None),
(19405, ['pmc/zip/elife-05-19405.zip'], 'pmc/zip/elife-05-19405.zip'),
(24052, [
'pmc/zip/elife-06-24052.zip'
'pmc/zip/elife-06-24052.r1.zip',
'pmc/zip/elife-06-24052.r2.zip',
], 'pmc/zip/elife-06-24052.r2.zip'),
# strange example below would not normally exist but is for code coverage
(24052, [
'pmc/zip/elife-04-24052.zip',
'pmc/zip/elife-05-24052.zip',
'pmc/zip/elife-05-24052.r1.zip'
], 'pmc/zip/elife-05-24052.r1.zip'),
)
@unpack
def test_latest_pmc_zip_revision(self, doi_id, s3_key_names, expected_s3_key_name):
self.assertEqual(s3lib.latest_pmc_zip_revision(doi_id, s3_key_names), expected_s3_key_name)
if __name__ == '__main__':
unittest.main()
|
py | 1a53806d7d20b584614a6999d5d123360abd9747 | """Generate a stack of blocks.
1. Define the number of blocks and the block dimensions
2. Create an empty assembly.
3. Make a standard brick.
5. Add the blocks of the stack.
6. Serialise to json.
7. Visualise the result
"""
from math import pi
from random import choice
import os
from compas.geometry import Box
from compas.geometry import Translation
from compas.geometry import Rotation
from compas.geometry import scale_vector
from compas.geometry import add_vectors
from compas.datastructures import mesh_transform
from compas_assembly.datastructures import Assembly
from compas_assembly.datastructures import Block
from compas_assembly.datastructures import assembly_transform
from compas_assembly.plotter import AssemblyPlotter
HERE = os.path.dirname(__file__)
DATA = os.path.join(HERE, '../data')
PATH = os.path.join(DATA, 'stack.json')
# number of blocks
N = 10
# block dimensions
W = 2.0
H = 0.5
D = 1.0
# empty assembly
assembly = Assembly()
# default block
box = Box.from_width_height_depth(W, H, D)
block = Block.from_vertices_and_faces(box.vertices, box.faces)
# make all blocks
# place each block on top of previous
# shift block randomly in XY plane
for i in range(N):
b = block.copy()
factor = choice([0.01, -0.01, 0.05, -0.05, 0.1, -0.1])
axis = choice([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]])
vector = scale_vector(axis, factor)
T = Translation([vector[0], vector[1], i * H])
mesh_transform(b, T)
assembly.add_block(b, is_support=(i == 0))
# export to json
assembly.to_json(PATH)
# visualise
R = Rotation.from_axis_and_angle([1.0, 0.0, 0.0], -pi / 2)
assembly_transform(assembly, R)
plotter = AssemblyPlotter(assembly, figsize=(10, 7))
plotter.draw_vertices(text={key: str(key) for key in assembly.vertices()})
plotter.draw_blocks(
facecolor={key: (255, 0, 0) for key in assembly.vertices_where({'is_support': True})}
)
plotter.show()
|
py | 1a5380fb0a0036cddf24b623c5440cd4313ddad8 | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common client library functions and classes used by all products."""
import abc
import base64
import binascii
from functools import wraps
import inspect
from itertools import izip
import locale
import logging
import logging.config
import os
import ssl
import sys
import threading
import urllib2
import warnings
import lxml.builder
import lxml.etree
import requests.exceptions
import suds
import suds.cache
import suds.client
import suds.mx.literal
import suds.plugin
import suds.transport.http
import suds.xsd.doctor
import yaml
import zeep
import zeep.cache
import zeep.exceptions
import zeep.helpers
import zeep.transports
import zeep.xsd
import googleads.errors
import googleads.oauth2
import googleads.util
try:
import urllib2.HTTPSHandler
except ImportError:
# Python versions below 2.7.9 / 3.4 won't have this. In order to offer legacy
# support (for now) we will work around this gracefully, but users will
# not have certificate validation performed until they update.
pass
logging.getLogger('suds.client').addFilter(googleads.util.GetSudsClientFilter())
logging.getLogger('suds.mx.core').addFilter(
googleads.util.GetSudsMXCoreFilter())
logging.getLogger('suds.mx.literal').addFilter(
googleads.util.GetSudsMXLiteralFilter())
logging.getLogger('suds.transport.http').addFilter(
googleads.util.GetSudsTransportFilter())
_logger = logging.getLogger(__name__)
_PY_VERSION_MAJOR = sys.version_info.major
_PY_VERSION_MINOR = sys.version_info.minor
_PY_VERSION_MICRO = sys.version_info.micro
_DEPRECATED_VERSION_TEMPLATE = (
'This library is being run by an unsupported Python version (%s.%s.%s). In '
'order to benefit from important security improvements and ensure '
'compatibility with this library, upgrade to Python 2.7.9 or higher.')
VERSION = '17.0.0'
_COMMON_LIB_SIG = 'googleads/%s' % VERSION
_LOGGING_KEY = 'logging'
_HTTP_PROXY_YAML_KEY = 'http'
_HTTPS_PROXY_YAML_KEY = 'https'
_PROXY_CONFIG_KEY = 'proxy_config'
_PYTHON_VERSION = 'Python/%d.%d.%d' % (
_PY_VERSION_MAJOR, _PY_VERSION_MINOR, _PY_VERSION_MICRO)
# The required keys in the authentication dictionary that are used to construct
# installed application OAuth2 credentials.
_OAUTH2_INSTALLED_APP_KEYS = ('client_id', 'client_secret', 'refresh_token')
# The keys in the authentication dictionary that are used to construct service
# account OAuth2 credentials.
_OAUTH2_SERVICE_ACCT_KEYS = ('path_to_private_key_file',)
_OAUTH2_SERVICE_ACCT_KEYS_OPTIONAL = ('delegated_account',)
# A key used to configure the client to accept and automatically decompress
# gzip encoded SOAP responses.
ENABLE_COMPRESSION_KEY = 'enable_compression'
# A key used to configure the client to send arbitrary headers in SOAP requests.
CUSTOM_HEADERS_KEY = 'custom_http_headers'
# A key used to specify the SOAP implementation to use.
SOAP_IMPLEMENTATION_KEY = 'soap_impl'
# Global variables used to enable and store utility usage stats.
_utility_registry = googleads.util.UtilityRegistry()
_UTILITY_REGISTER_YAML_KEY = 'include_utilities_in_user_agent'
_UTILITY_LOCK = threading.Lock()
# Apply any necessary patches to dependency libraries.
googleads.util.PatchHelper().Apply()
def GenerateLibSig(short_name):
"""Generates a library signature suitable for a user agent field.
Args:
short_name: The short, product-specific string name for the library.
Returns:
A library signature string to append to user-supplied user-agent value.
"""
with _UTILITY_LOCK:
utilities_used = ', '.join([utility for utility
in sorted(_utility_registry)])
_utility_registry.Clear()
if utilities_used:
return ' (%s, %s, %s, %s)' % (short_name, _COMMON_LIB_SIG, _PYTHON_VERSION,
utilities_used)
else:
return ' (%s, %s, %s)' % (short_name, _COMMON_LIB_SIG, _PYTHON_VERSION)
class CommonClient(object):
"""Contains shared startup code between Ad Manager and AdWords clients."""
def __init__(self):
# Warn users on deprecated Python versions on initialization.
if _PY_VERSION_MAJOR == 2:
if _PY_VERSION_MINOR == 7 and _PY_VERSION_MICRO < 9:
_logger.warning(_DEPRECATED_VERSION_TEMPLATE, _PY_VERSION_MAJOR,
_PY_VERSION_MINOR, _PY_VERSION_MICRO)
elif _PY_VERSION_MINOR < 7:
_logger.warning(_DEPRECATED_VERSION_TEMPLATE, _PY_VERSION_MAJOR,
_PY_VERSION_MINOR, _PY_VERSION_MICRO)
# Warn users about using non-utf8 encoding
_, encoding = locale.getdefaultlocale()
if encoding is None or encoding.lower() != 'utf-8':
_logger.warn('Your default encoding, %s, is not UTF-8. Please run this'
' script with UTF-8 encoding to avoid errors.', encoding)
def LoadFromString(yaml_doc, product_yaml_key, required_client_values,
optional_product_values):
"""Loads the data necessary for instantiating a client from file storage.
In addition to the required_client_values argument, the yaml file must supply
the keys used to create OAuth2 credentials. It may also optionally set proxy
configurations.
Args:
yaml_doc: the yaml document whose keys should be used.
product_yaml_key: The key to read in the yaml as a string.
required_client_values: A tuple of strings representing values which must
be in the yaml file for a supported API. If one of these keys is not in
the yaml file, an error will be raised.
optional_product_values: A tuple of strings representing optional values
which may be in the yaml file.
Returns:
A dictionary map of the keys in the yaml file to their values. This will not
contain the keys used for OAuth2 client creation and instead will have a
GoogleOAuth2Client object stored in the 'oauth2_client' field.
Raises:
A GoogleAdsValueError if the given yaml file does not contain the
information necessary to instantiate a client object - either a
required_client_values key was missing or an OAuth2 key was missing.
"""
data = yaml.safe_load(yaml_doc) or {}
if 'dfp' in data:
raise googleads.errors.GoogleAdsValueError(
'Please replace the "dfp" key in the configuration YAML string with'
'"ad_manager" to fix this issue.')
logging_config = data.get(_LOGGING_KEY)
if logging_config:
logging.config.dictConfig(logging_config)
try:
product_data = data[product_yaml_key]
except KeyError:
raise googleads.errors.GoogleAdsValueError(
'The "%s" configuration is missing'
% (product_yaml_key,))
if not isinstance(product_data, dict):
raise googleads.errors.GoogleAdsValueError(
'The "%s" configuration is empty or invalid'
% (product_yaml_key,))
IncludeUtilitiesInUserAgent(data.get(_UTILITY_REGISTER_YAML_KEY, True))
original_keys = list(product_data.keys())
client_kwargs = {}
try:
for key in required_client_values:
client_kwargs[key] = product_data[key]
del product_data[key]
except KeyError:
raise googleads.errors.GoogleAdsValueError(
'Some of the required values are missing. Required '
'values are: %s, actual values are %s'
% (required_client_values, original_keys))
proxy_config_data = data.get(_PROXY_CONFIG_KEY, {})
proxy_config = _ExtractProxyConfig(product_yaml_key, proxy_config_data)
client_kwargs['proxy_config'] = proxy_config
client_kwargs['oauth2_client'] = _ExtractOAuth2Client(
product_yaml_key, product_data, proxy_config)
client_kwargs[ENABLE_COMPRESSION_KEY] = data.get(
ENABLE_COMPRESSION_KEY, False)
client_kwargs[CUSTOM_HEADERS_KEY] = data.get(CUSTOM_HEADERS_KEY, None)
if SOAP_IMPLEMENTATION_KEY in data:
client_kwargs[SOAP_IMPLEMENTATION_KEY] = data[SOAP_IMPLEMENTATION_KEY]
for value in optional_product_values:
if value in product_data:
client_kwargs[value] = product_data[value]
del product_data[value]
if product_data:
warnings.warn('Could not recognize the following keys: %s. '
'They were ignored.' % (product_data,), stacklevel=3)
return client_kwargs
def LoadFromStorage(path, product_yaml_key, required_client_values,
optional_product_values):
"""Loads the data necessary for instantiating a client from file storage.
In addition to the required_client_values argument, the yaml file must supply
the keys used to create OAuth2 credentials. It may also optionally set proxy
configurations.
Args:
path: A path string to the yaml document whose keys should be used.
product_yaml_key: The key to read in the yaml as a string.
required_client_values: A tuple of strings representing values which must
be in the yaml file for a supported API. If one of these keys is not in
the yaml file, an error will be raised.
optional_product_values: A tuple of strings representing optional values
which may be in the yaml file.
Returns:
A dictionary map of the keys in the yaml file to their values. This will not
contain the keys used for OAuth2 client creation and instead will have a
GoogleOAuth2Client object stored in the 'oauth2_client' field.
Raises:
A GoogleAdsValueError if the given yaml file does not contain the
information necessary to instantiate a client object - either a
required_client_values key was missing or an OAuth2 key was missing.
"""
if not os.path.isabs(path):
path = os.path.expanduser(path)
try:
with open(path, 'rb') as handle:
yaml_doc = handle.read()
except IOError:
raise googleads.errors.GoogleAdsValueError(
'Given yaml file, %s, could not be opened.' % path)
try:
client_kwargs = LoadFromString(yaml_doc, product_yaml_key,
required_client_values,
optional_product_values)
except googleads.errors.GoogleAdsValueError as e:
raise googleads.errors.GoogleAdsValueError(
'Given yaml file, %s, could not find some keys. %s' % (path, e))
return client_kwargs
def _ExtractOAuth2Client(product_yaml_key, product_data, proxy_config):
"""Generates an GoogleOAuth2Client subclass using the given product_data.
Args:
product_yaml_key: a string key identifying the product being configured.
product_data: a dict containing the configurations for a given product.
proxy_config: a ProxyConfig instance.
Returns:
An instantiated GoogleOAuth2Client subclass.
Raises:
A GoogleAdsValueError if the OAuth2 configuration for the given product is
misconfigured.
"""
oauth2_kwargs = {
'proxy_config': proxy_config
}
if all(config in product_data for config in _OAUTH2_INSTALLED_APP_KEYS):
oauth2_args = [
product_data['client_id'], product_data['client_secret'],
product_data['refresh_token']
]
oauth2_client = googleads.oauth2.GoogleRefreshTokenClient
for key in _OAUTH2_INSTALLED_APP_KEYS:
del product_data[key]
elif all(config in product_data for config in _OAUTH2_SERVICE_ACCT_KEYS):
oauth2_args = [
product_data['path_to_private_key_file'],
googleads.oauth2.GetAPIScope(product_yaml_key),
]
oauth2_kwargs.update({
'sub': product_data.get('delegated_account')
})
oauth2_client = googleads.oauth2.GoogleServiceAccountClient
for key in _OAUTH2_SERVICE_ACCT_KEYS:
del product_data[key]
for optional_key in _OAUTH2_SERVICE_ACCT_KEYS_OPTIONAL:
if optional_key in product_data:
del product_data[optional_key]
else:
raise googleads.errors.GoogleAdsValueError(
'Your yaml file is incorrectly configured for OAuth2. You need to '
'specify credentials for either the installed application flow (%s) '
'or service account flow (%s).' %
(_OAUTH2_INSTALLED_APP_KEYS, _OAUTH2_SERVICE_ACCT_KEYS))
return oauth2_client(*oauth2_args, **oauth2_kwargs)
def _ExtractProxyConfig(product_yaml_key, proxy_config_data):
"""Returns an initialized ProxyConfig using the given proxy_config_data.
Args:
product_yaml_key: a string indicating the client being loaded.
proxy_config_data: a dict containing the contents of proxy_config from the
YAML file.
Returns:
If there is a proxy to configure in proxy_config, this will return a
ProxyConfig instance with those settings. Otherwise, it will return None.
Raises:
A GoogleAdsValueError if one of the required keys specified by _PROXY_KEYS
is missing.
"""
cafile = proxy_config_data.get('cafile', None)
disable_certificate_validation = proxy_config_data.get(
'disable_certificate_validation', False)
http_proxy = proxy_config_data.get(_HTTP_PROXY_YAML_KEY)
https_proxy = proxy_config_data.get(_HTTPS_PROXY_YAML_KEY)
proxy_config = ProxyConfig(
http_proxy=http_proxy,
https_proxy=https_proxy,
cafile=cafile,
disable_certificate_validation=disable_certificate_validation)
return proxy_config
def _PackForSuds(obj, factory, packer=None, version=None):
"""Packs SOAP input into the format we want for suds.
The main goal here is to pack dictionaries with an 'xsi_type' key into
objects. This allows dictionary syntax to be used even with complex types
extending other complex types. The contents of dictionaries and lists/tuples
are recursively packed. Mutable types are copied - we don't mutate the input.
Args:
obj: A parameter for a SOAP request which will be packed. If this is
a dictionary or list, the contents will recursively be packed. If this
is not a dictionary or list, the contents will be recursively searched
for instances of unpacked dictionaries or lists.
factory: The suds.client.Factory object which can create instances of the
classes generated from the WSDL.
packer: An optional subclass of googleads.common.SoapPacker that provides
customized packing logic.
version: the version of the current API, e.g. 'v201811'
Returns:
If the given obj was a dictionary that contained the 'xsi_type' key, this
will be an instance of a class generated from the WSDL. Otherwise, this will
be the same data type as the input obj was.
"""
if packer:
obj = packer.Pack(obj, version)
if obj in ({}, None):
# Force suds to serialize empty objects. There are legitimate use cases for
# this, for example passing in an empty SearchCriteria object to a DFA
# search method in order to select everything.
return suds.null()
elif isinstance(obj, dict):
if 'xsi_type' in obj:
try:
new_obj = factory.create(obj['xsi_type'])
except suds.TypeNotFound:
new_obj = factory.create(':'.join(['ns0', obj['xsi_type']]))
# Suds sends an empty XML element for enum types which are not set. None
# of Google's Ads APIs will accept this. Initializing all of the fields in
# a suds object to None will ensure that they don't get serialized at all
# unless the user sets a value. User values explicitly set to None will be
# packed into a suds.null() object.
for param, _ in new_obj:
# Another problem is that the suds.mx.appender.ObjectAppender won't
# serialize object types with no fields set, but both AdWords and Ad
# Manager rely on sending objects with just the xsi:type set. The
# below "if" statement is an ugly hack that gets this to work in all(?)
# situations by taking advantage of the fact that these classes
# generally all have a type field. The only other option is to monkey
# patch ObjectAppender.
if param.endswith('.Type'):
setattr(new_obj, param, obj['xsi_type'])
else:
setattr(new_obj, param, None)
for key in obj:
if key == 'xsi_type': continue
setattr(new_obj, key, _PackForSuds(obj[key], factory,
packer=packer))
else:
new_obj = {}
for key in obj:
new_obj[key] = _PackForSuds(obj[key], factory,
packer=packer)
return new_obj
elif isinstance(obj, (list, tuple)):
return [_PackForSuds(item, factory,
packer=packer) for item in obj]
else:
_RecurseOverObject(obj, factory)
return obj
def _RecurseOverObject(obj, factory, parent=None):
"""Recurses over a nested structure to look for changes in Suds objects.
Args:
obj: A parameter for a SOAP request field which is to be inspected and
will be packed for Suds if an xsi_type is specified, otherwise will be
left unaltered.
factory: The suds.client.Factory object which can create instances of the
classes generated from the WSDL.
parent: The parent object that contains the obj parameter to be inspected.
"""
if _IsSudsIterable(obj):
# Since in-place modification of the Suds object is taking place, the
# iterator should be done over a frozen copy of the unpacked fields.
copy_of_obj = tuple(obj)
for item in copy_of_obj:
if _IsSudsIterable(item):
if 'xsi_type' in item:
if isinstance(obj, tuple):
parent[obj[0]] = _PackForSuds(obj[1], factory)
else:
obj.remove(item)
obj.append(_PackForSuds(item, factory))
_RecurseOverObject(item, factory, obj)
def _IsSudsIterable(obj):
"""A short helper method to determine if a field is iterable for Suds."""
return obj and not isinstance(obj, basestring) and hasattr(obj, '__iter__')
def IncludeUtilitiesInUserAgent(value):
"""Configures the logging of utilities in the User-Agent.
Args:
value: a bool indicating that you want to include utility names in the
User-Agent if set True, otherwise, these will not be added.
"""
with _UTILITY_LOCK:
_utility_registry.SetEnabled(value)
def AddToUtilityRegistry(utility_name):
"""Directly add a utility to the registry, not a decorator.
Args:
utility_name: The name of the utility to add.
"""
with _UTILITY_LOCK:
_utility_registry.Add(utility_name)
def RegisterUtility(utility_name, version_mapping=None):
"""Decorator that registers a class with the given utility name.
This will only register the utilities being used if the UtilityRegistry is
enabled. Note that only the utility class's public methods will cause the
utility name to be added to the registry.
Args:
utility_name: A str specifying the utility name associated with the class.
version_mapping: A dict containing optional version strings to append to the
utility string for individual methods; where the key is the method name and
the value is the text to be appended as the version.
Returns:
The decorated class.
"""
def IsFunctionOrMethod(member):
"""Determines if given member is a function or method.
These two are used in combination to ensure that inspect finds all of a
given utility class's methods in both Python 2 and 3.
Args:
member: object that is a member of a class, to be determined whether it is
a function or method.
Returns:
A boolean that is True if the provided member is a function or method, or
False if it isn't.
"""
return inspect.isfunction(member) or inspect.ismethod(member)
def MethodDecorator(utility_method, version):
"""Decorates a method in the utility class."""
registry_name = ('%s/%s' % (utility_name, version) if version
else utility_name)
@wraps(utility_method)
def Wrapper(*args, **kwargs):
AddToUtilityRegistry(registry_name)
return utility_method(*args, **kwargs)
return Wrapper
def ClassDecorator(cls):
"""Decorates a utility class."""
for name, method in inspect.getmembers(cls, predicate=IsFunctionOrMethod):
# Public methods of the class will have the decorator applied.
if not name.startswith('_'):
# The decorator will only be applied to unbound methods; this prevents
# it from clobbering class methods. If the attribute doesn't exist, set
# None for PY3 compatibility.
if not getattr(method, '__self__', None):
setattr(cls, name, MethodDecorator(
method, version_mapping.get(name) if version_mapping else None))
return cls
return ClassDecorator
class ProxyConfig(object):
"""A utility for configuring the usage of a proxy."""
def __init__(self, http_proxy=None, https_proxy=None, cafile=None,
disable_certificate_validation=False):
self._http_proxy = http_proxy
self._https_proxy = https_proxy
self.proxies = {}
if self._https_proxy:
self.proxies['https'] = str(self._https_proxy)
if self._http_proxy:
self.proxies['http'] = str(self._http_proxy)
self.disable_certificate_validation = disable_certificate_validation
self.cafile = None if disable_certificate_validation else cafile
# Initialize the context used to generate the urllib2.HTTPSHandler (in
# Python 2.7.9+ and 3.4+) used by suds and urllib2.
self.ssl_context = self._InitSSLContext(
self.cafile, self.disable_certificate_validation)
def _InitSSLContext(self, cafile=None,
disable_ssl_certificate_validation=False):
"""Creates a ssl.SSLContext with the given settings.
Args:
cafile: A str identifying the resolved path to the cafile. If not set,
this will use the system default cafile.
disable_ssl_certificate_validation: A boolean indicating whether
certificate verification is disabled. For security purposes, it is
highly recommended that certificate verification remain enabled.
Returns:
An ssl.SSLContext instance, or None if the version of Python being used
doesn't support it.
"""
# Attempt to create a context; this should succeed in Python 2 versions
# 2.7.9+ and Python 3 versions 3.4+.
try:
if disable_ssl_certificate_validation:
ssl._create_default_https_context = ssl._create_unverified_context
ssl_context = ssl.create_default_context()
else:
ssl_context = ssl.create_default_context(cafile=cafile)
except AttributeError:
# Earlier versions lack ssl.create_default_context()
# Rather than raising the exception, no context will be provided for
# legacy support. Of course, this means no certificate validation is
# taking place!
return None
return ssl_context
def BuildOpener(self):
"""Builds an OpenerDirector instance using the ProxyConfig settings.
In Python 2, this will return a urllib2.OpenerDirector instance. In Python
3, this will return a urllib.request.OpenerDirector instance.
Returns:
An OpenerDirector instance instantiated with settings defined in the
ProxyConfig instance.
"""
return urllib2.build_opener(*self.GetHandlers())
def GetHandlers(self):
"""Retrieve the appropriate urllib2 handlers for the given configuration.
Returns:
A list of urllib2.BaseHandler subclasses to be used when making calls
with proxy.
"""
handlers = []
if self.ssl_context:
handlers.append(urllib2.HTTPSHandler(context=self.ssl_context))
if self.proxies:
handlers.append(urllib2.ProxyHandler(self.proxies))
return handlers
def GetSudsProxyTransport(self):
"""Retrieve a suds.transport.http.HttpTransport to be used with suds.
This will apply all handlers relevant to the usage of the proxy
configuration automatically.
Returns:
A _SudsProxyTransport instance used to make requests with suds using the
configured proxy.
"""
return self._SudsProxyTransport(self.GetHandlers())
class _ZeepProxyTransport(zeep.transports.Transport):
"""A Zeep transport which configures caching, proxy support, and timeouts."""
def __init__(self, timeout, proxy_config, cache):
"""Initializes _ZeepProxyTransport.
Args:
timeout: An integer timeout in MS for connections.
proxy_config: A ProxyConfig instance representing proxy settings.
cache: A zeep.cache.Base instance representing a cache strategy to employ.
"""
if not cache:
cache = zeep.cache.SqliteCache()
elif cache == ZeepServiceProxy.NO_CACHE:
cache = None
super(_ZeepProxyTransport, self).__init__(
timeout=timeout, operation_timeout=timeout, cache=cache)
self.session.proxies = proxy_config.proxies
class _SudsProxyTransport(suds.transport.http.HttpTransport):
"""A transport that applies the given handlers for usage with a proxy."""
def __init__(self, timeout, proxy_config):
"""Initializes SudsHTTPSTransport.
Args:
timeout: An integer for the connection timeout time.
proxy_config: A ProxyConfig instance representing proxy settings.
"""
suds.transport.http.HttpTransport.__init__(self, timeout=timeout)
self.handlers = proxy_config.GetHandlers()
def u2handlers(self):
"""Get a collection of urllib2 handlers to be installed in the opener.
Returns:
A list of handlers to be installed to the OpenerDirector used by suds.
"""
# Start with the default set of handlers.
return_handlers = suds.transport.http.HttpTransport.u2handlers(self)
return_handlers.extend(self.handlers)
return return_handlers
class SoapPacker(object):
"""A utility class to be passed to argument packing functions.
A subclass should be used in cases where custom logic is needed to pack a
given object in argument packing functions.
"""
@classmethod
def Pack(cls, obj):
raise NotImplementedError('You must subclass SoapPacker.')
def GetSchemaHelperForLibrary(lib_name):
if lib_name == 'suds':
return SudsSchemaHelper
elif lib_name == 'zeep':
return ZeepSchemaHelper
class GoogleSchemaHelper(object):
"""Base class for type to xml conversion.
Only used for AdWords reporting specialness. A subclass should be created
for each underlying SOAP implementation.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def GetSoapXMLForComplexType(self, type_name, value):
"""Return an XML string representing a SOAP complex type.
Args:
type_name: The name of the type with namespace prefix if necessary.
value: A python dictionary to hydrate the type instance with.
Returns:
A string containing the SOAP XML for the type.
"""
return
class SudsSchemaHelper(GoogleSchemaHelper):
"""Suds schema helper implementation."""
def __init__(self, endpoint, timeout,
proxy_config, namespace_override, cache):
"""Initializes a SudsSchemaHelper.
Args:
endpoint: A string representing the URL to connect to.
timeout: An integer timeout in MS used to determine connection timeouts.
proxy_config: A googleads.common.ProxyConfig instance which represents
the proxy settings needed.
namespace_override: A string to doctor the WSDL namespace with.
cache: An instance of suds.cache.Cache to use for caching.
Raises:
GoogleAdsValueError: The wrong type was given for caching.
"""
if cache and not isinstance(cache, suds.cache.Cache):
raise googleads.errors.GoogleAdsValueError(
'Must use a proper suds cache with suds.')
transport = _SudsProxyTransport(timeout, proxy_config)
try:
doctor = suds.xsd.doctor.ImportDoctor(
suds.xsd.doctor.Import(
namespace_override, endpoint))
self.suds_client = suds.client.Client(
endpoint,
transport=transport,
plugins=[LoggingMessagePlugin()],
cache=cache,
doctor=doctor)
self._namespace_override = namespace_override
except suds.transport.TransportError as e:
raise googleads.errors.GoogleAdsSoapTransportError(str(e))
def GetSoapXMLForComplexType(self, type_name, value):
"""Return an XML string representing a SOAP complex type.
Args:
type_name: The name of the type with namespace prefix if necessary.
value: A python dictionary to hydrate the type instance with.
Returns:
A string containing the SOAP XML for the type.
"""
schema = self.suds_client.wsdl.schema
definition_type = schema.elements[(type_name, self._namespace_override)]
marshaller = suds.mx.literal.Literal(schema)
content = suds.mx.Content(
tag=type_name, value=value,
name=type_name, type=definition_type)
data = marshaller.process(content)
return data
class ZeepSchemaHelper(GoogleSchemaHelper):
"""Zeep schema helper implementation."""
def __init__(self, endpoint, timeout,
proxy_config, namespace_override, cache):
"""Initializes a ZeepSchemaHelper.
Args:
endpoint: A string representing the URL to connect to.
timeout: An integer timeout in MS used to determine connection timeouts.
proxy_config: A googleads.common.ProxyConfig instance which represents
the proxy settings needed.
namespace_override: A string to doctor the WSDL namespace with.
cache: An instance of zeep.cache.Base to use for caching.
Raises:
GoogleAdsValueError: The wrong type was given for caching.
"""
if cache and not (isinstance(cache, zeep.cache.Base) or
cache == ZeepServiceProxy.NO_CACHE):
raise googleads.errors.GoogleAdsValueError(
'Must use a proper zeep cache with zeep.')
transport = _ZeepProxyTransport(timeout, proxy_config, cache)
try:
data = transport.load(endpoint)
except requests.exceptions.HTTPError as e:
raise googleads.errors.GoogleAdsSoapTransportError(str(e))
self.schema = zeep.xsd.Schema(lxml.etree.fromstring(data))
self._namespace_override = namespace_override
self._element_maker = lxml.builder.ElementMaker(
namespace=namespace_override, nsmap={'tns': namespace_override})
def GetSoapXMLForComplexType(self, type_name, value):
"""Return an XML string representing a SOAP complex type.
Args:
type_name: The name of the type with namespace prefix if necessary.
value: A python dictionary to hydrate the type instance with.
Returns:
A string containing the SOAP XML for the type.
"""
element = self.schema.get_element(
'{%s}%s' % (self._namespace_override, type_name))
result_element = self._element_maker(element.qname.localname)
element_value = element(**value)
element.type.render(result_element, element_value)
data = lxml.etree.tostring(result_element).strip()
return data
def GetServiceClassForLibrary(lib_name):
if lib_name == 'suds':
return SudsServiceProxy
elif lib_name == 'zeep':
return ZeepServiceProxy
class GoogleSoapService(object):
"""Base class for a SOAP service representation.
A subclass should be created for each underlying SOAP implementation.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, header_handler, packer, version):
"""Initializes a SOAP service.
Args:
header_handler: A googleads.common.HeaderHandler instance used to set
SOAP and HTTP headers.
packer: A googleads.common.SoapPacker instance used to transform
entities.
version: the version of the current API, e.g. 'v201811'
"""
self._header_handler = header_handler
self._packer = packer
self._version = version
self._method_proxies = {}
@abc.abstractmethod
def CreateSoapElementForType(self, type_name):
"""Create an instance of a SOAP type.
Args:
type_name: The name of the type.
Returns:
An instance of type type_name.
"""
@abc.abstractmethod
def GetRequestXML(self, method, *args):
"""Get the raw SOAP XML for a request.
Args:
method: The method name.
*args: A list of arguments to be passed to the method.
Returns:
An element containing the raw XML that would be sent as the request.
"""
@abc.abstractmethod
def _WsdlHasMethod(self, method_name):
"""Determine if the wsdl contains a method.
Args:
method_name: The name of the method to search.
Returns:
True if the method is in the WSDL, otherwise False.
"""
@abc.abstractmethod
def _CreateMethod(self, method_name):
"""Create a method wrapping an invocation to the SOAP service.
Args:
method_name: A string identifying the name of the SOAP method to call.
Returns:
A callable that can be used to make the desired SOAP request.
"""
def __getattr__(self, attr):
"""Support service.method() syntax."""
if self._WsdlHasMethod(attr):
if attr not in self._method_proxies:
self._method_proxies[attr] = self._CreateMethod(attr)
return self._method_proxies[attr]
else:
raise googleads.errors.GoogleAdsValueError('Service %s not found' % attr)
class SudsServiceProxy(GoogleSoapService):
"""Wraps a suds service object, allowing custom logic to be injected.
This class is responsible for refreshing the HTTP and SOAP headers, so changes
to the client object will be reflected in future SOAP calls, and for
transforming SOAP call input parameters, allowing dictionary syntax to be used
with all SOAP complex types.
Attributes:
suds_client: The suds.client.Client this service belongs to. If you are
familiar with suds and want to use autogenerated classes, you can access
the client and its factory,
"""
def __init__(self, endpoint, header_handler, packer, proxy_config,
timeout, version, cache=None):
"""Initializes a suds service proxy.
Args:
endpoint: A URL for the service.
header_handler: A HeaderHandler responsible for setting the SOAP and HTTP
headers on the service client.
packer: An optional subclass of googleads.common.SoapPacker that provides
customized packing logic.
proxy_config: A ProxyConfig that represents proxy settings.
timeout: An integer to set the connection timeout.
version: the current version of the library, e.g. 'v201811'
cache: A suds.cache.Cache instance to pass to the underlying SOAP
library for caching.
Raises:
GoogleAdsValueError: The wrong type was given for caching.
"""
super(SudsServiceProxy, self).__init__(header_handler, packer, version)
if cache and not isinstance(cache, suds.cache.Cache):
raise googleads.errors.GoogleAdsValueError(
'Must use a proper suds cache with suds.')
transport = _SudsProxyTransport(timeout, proxy_config)
self._method_proxies = {}
try:
self.suds_client = suds.client.Client(
endpoint,
timeout=timeout,
cache=cache,
transport=transport,
plugins=[LoggingMessagePlugin()])
except suds.transport.TransportError as e:
raise googleads.errors.GoogleAdsSoapTransportError(str(e))
def GetRequestXML(self, method, *args):
"""Get the raw SOAP XML for a request.
Args:
method: The method name.
*args: A list of arguments to be passed to the method.
Returns:
An element containing the raw XML that would be sent as the request.
"""
self.suds_client.set_options(nosend=True)
service_request = (getattr(self, method))(*args).envelope
self.suds_client.set_options(nosend=False)
return lxml.etree.fromstring(service_request)
def CreateSoapElementForType(self, type_name):
"""Create an instance of a SOAP type.
Args:
type_name: The name of the type.
Returns:
An instance of type type_name.
"""
return self.suds_client.factory.create(type_name)
def SetHeaders(self, soap_headers, http_headers):
"""Set the headers for the underlying client.
Args:
soap_headers: A SOAP element for the SOAP headers.
http_headers: A dictionary for the http headers.
"""
self.suds_client.set_options(soapheaders=soap_headers, headers=http_headers)
def _WsdlHasMethod(self, method_name):
"""Determine if the wsdl contains a method.
Args:
method_name: The name of the method to search.
Returns:
True if the method is in the WSDL, otherwise False.
"""
return method_name in self.suds_client.wsdl.services[0].ports[0].methods
def _CreateMethod(self, method_name):
"""Create a method wrapping an invocation to the SOAP service.
Args:
method_name: A string identifying the name of the SOAP method to call.
Returns:
A callable that can be used to make the desired SOAP request.
"""
soap_service_method = getattr(self.suds_client.service, method_name)
def MakeSoapRequest(*args):
"""Perform a SOAP call."""
AddToUtilityRegistry('suds')
self.SetHeaders(
self._header_handler.GetSOAPHeaders(self.CreateSoapElementForType),
self._header_handler.GetHTTPHeaders())
try:
return soap_service_method(
*[_PackForSuds(arg, self.suds_client.factory,
self._packer) for arg in args])
except suds.WebFault as e:
if _logger.isEnabledFor(logging.WARNING):
_logger.warning('Response summary - %s',
_ExtractResponseSummaryFields(e.document))
_logger.debug('SOAP response:\n%s', e.document.str())
if not hasattr(e.fault, 'detail'):
exc = (googleads.errors.
GoogleAdsServerFault(e.document, message=e.fault.faultstring))
raise exc # Done this way for 2to3
# Before re-throwing the WebFault exception, an error object needs to be
# wrapped in a list for safe iteration.
fault = e.fault.detail.ApiExceptionFault
if not hasattr(fault, 'errors') or fault.errors is None:
exc = (googleads.errors.
GoogleAdsServerFault(e.document, message=e.fault.faultstring))
raise exc # Done this way for 2to3
obj = fault.errors
if not isinstance(obj, list):
fault.errors = [obj]
exc = googleads.errors.GoogleAdsServerFault(e.document, fault.errors,
message=e.fault.faultstring)
raise exc # Done this way for 2to3
return MakeSoapRequest
class _ZeepAuthHeaderPlugin(zeep.Plugin):
"""A zeep plugin responsible for setting our custom HTTP headers."""
def __init__(self, header_handler):
"""Instantiate a new _ZeepAuthHeaderPlugin.
Args:
header_handler: A googleads.common.HeaderHandler instance.
"""
self._header_handler = header_handler
def egress(self, envelope, http_headers, operation, binding_options):
"""Overriding the egress function to set our headers.
Args:
envelope: An Element with the SOAP request data.
http_headers: A dict of the current http headers.
operation: The SoapOperation instance.
binding_options: An options dict for the SOAP binding.
Returns:
A tuple of the envelope and headers.
"""
custom_headers = self._header_handler.GetHTTPHeaders()
http_headers.update(custom_headers)
return envelope, http_headers
class ZeepServiceProxy(GoogleSoapService):
"""Wraps a zeep service object, allowing custom logic to be injected.
This class is responsible for refreshing the HTTP and SOAP headers, so changes
to the client object will be reflected in future SOAP calls, and for
transforming SOAP call input parameters, allowing dictionary syntax to be used
with all SOAP complex types.
Attributes:
zeep_client: The zeep.Client this service belongs to. If you are
familiar with zeep, you can utilize this directly.
"""
NO_CACHE = 'zeep_no_cache'
def __init__(self, endpoint, header_handler, packer,
proxy_config, timeout, version, cache=None):
"""Initializes a zeep service proxy.
Args:
endpoint: A URL for the service.
header_handler: A HeaderHandler responsible for setting the SOAP and HTTP
headers on the service client.
packer: An optional subclass of googleads.common.SoapPacker that provides
customized packing logic.
proxy_config: A ProxyConfig that represents proxy settings.
timeout: An integer to set the connection timeout.
version: the version of the current API, e.g. 'v201811'
cache: An instance of zeep.cache.Base to pass to the underlying SOAP
library for caching. A file cache by default. To disable, pass
googleads.common.ZeepServiceProxy.NO_CACHE.
Raises:
GoogleAdsValueError: The wrong type was given for caching.
"""
super(ZeepServiceProxy, self).__init__(header_handler, packer, version)
if cache and not (isinstance(cache, zeep.cache.Base) or
cache == self.NO_CACHE):
raise googleads.errors.GoogleAdsValueError(
'Must use a proper zeep cache with zeep.')
transport = _ZeepProxyTransport(timeout, proxy_config, cache)
plugins = [_ZeepAuthHeaderPlugin(header_handler),
googleads.util.ZeepLogger()]
try:
self.zeep_client = zeep.Client(
endpoint, transport=transport, plugins=plugins)
except requests.exceptions.HTTPError as e:
raise googleads.errors.GoogleAdsSoapTransportError(str(e))
first_service = list(self.zeep_client.wsdl.services.itervalues())[0]
first_port = list(first_service.ports.itervalues())[0]
self._method_bindings = first_port.binding
def CreateSoapElementForType(self, type_name):
"""Create an instance of a SOAP type.
Args:
type_name: The name of the type.
Returns:
An instance of type type_name.
"""
return self.zeep_client.get_type(type_name)()
def GetRequestXML(self, method, *args):
"""Get the raw SOAP XML for a request.
Args:
method: The method name.
*args: A list of arguments to be passed to the method.
Returns:
An element containing the raw XML that would be sent as the request.
"""
packed_args = self._PackArguments(method, args, set_type_attrs=True)
headers = self._GetZeepFormattedSOAPHeaders()
return self.zeep_client.create_message(
self.zeep_client.service, method, *packed_args, _soapheaders=headers)
def _WsdlHasMethod(self, method_name):
"""Determine if a method is in the wsdl.
Args:
method_name: The name of the method.
Returns:
True if the method is in the wsdl, otherwise False.
"""
try:
self._method_bindings.get(method_name)
return True
except ValueError:
return False
def _GetBindingNamespace(self):
"""Return a string with the namespace of the service binding in the WSDL."""
return (list(self.zeep_client.wsdl.bindings.itervalues())[0]
.port_name.namespace)
def _PackArguments(self, method_name, args, set_type_attrs=False):
"""Properly pack input dictionaries for zeep.
Pack a list of python dictionaries into XML objects. Dictionaries which
contain an 'xsi_type' entry are converted into that type instead of the
argument default. This allows creation of complex objects which include
inherited types.
Args:
method_name: The name of the method that will be called.
args: A list of dictionaries containing arguments to the method.
set_type_attrs: A boolean indicating whether or not attributes that end
in .Type should be set. This is only necessary for batch job service.
Returns:
A list of XML objects that can be passed to zeep.
"""
# Get the params for the method to find the initial types to instantiate.
op_params = self.zeep_client.get_element(
'{%s}%s' % (self._GetBindingNamespace(), method_name)).type.elements
result = [self._PackArgumentsHelper(param, param_data, set_type_attrs)
for ((_, param), param_data) in izip(op_params, args)]
return result
@classmethod
def _IsBase64(cls, s):
"""An imperfect but decent method for determining if a string is base64.
Args:
s: A string with the data to test.
Returns:
True if s is base64, else False.
"""
try:
if base64.b64encode(base64.b64decode(s)).decode('utf-8') == s:
return True
except (TypeError, binascii.Error):
pass
return False
def _PackArgumentsHelper(self, elem, data, set_type_attrs):
"""Recursive helper for PackArguments.
Args:
elem: The element type we are creating.
data: The data to instantiate it with.
set_type_attrs: A boolean indicating whether or not attributes that end
in .Type should be set. This is only necessary for batch job service.
Returns:
An instance of type 'elem'.
"""
if self._packer:
data = self._packer.Pack(data, self._version)
if isinstance(data, dict): # Instantiate from simple Python dict
# See if there is a manually specified derived type.
type_override = data.get('xsi_type')
if type_override:
elem_type = self._DiscoverElementTypeFromLocalname(type_override)
else:
elem_type = elem.type
data_formatted = data.iteritems()
packed_result = self._CreateComplexTypeFromData(
elem_type, type_override is not None, data_formatted, set_type_attrs)
elif isinstance(data, zeep.xsd.CompoundValue):
# Here the data is already a SOAP element but we still need to look
# through it in case it has been edited with Python dicts.
elem_type = data._xsd_type
data_formatted = zip(dir(data), [data[k] for k in dir(data)])
packed_result = self._CreateComplexTypeFromData(
elem_type, False, data_formatted, set_type_attrs)
elif isinstance(data, (list, tuple)):
packed_result = [self._PackArgumentsHelper(elem, item, set_type_attrs)
for item in data]
else:
if elem.type.name == 'base64Binary' and self._IsBase64(data):
_logger.warn('Passing data to base64 field %s that may '
'already be encoded. Do not pre-encode base64 '
'fields with zeep.', elem.name)
packed_result = data
return packed_result
def _DiscoverElementTypeFromLocalname(self, type_localname):
"""Searches all namespaces for a type by name.
Args:
type_localname: The name of the type.
Returns:
A fully qualified SOAP type with the specified name.
Raises:
A zeep.exceptions.LookupError if the type cannot be found in any
namespace.
"""
elem_type = None
last_exception = None
for ns_prefix in self.zeep_client.wsdl.types.prefix_map.values():
try:
elem_type = self.zeep_client.get_type(
'{%s}%s' % (ns_prefix, type_localname))
except zeep.exceptions.LookupError as e:
last_exception = e
continue
break
if not elem_type:
raise last_exception
return elem_type
def _CreateComplexTypeFromData(
self, elem_type, type_is_override, data, set_type_attrs):
"""Initialize a SOAP element with specific data.
Args:
elem_type: The type of the element to create.
type_is_override: A boolean specifying if the type is being overridden.
data: The data to hydrate the type with.
set_type_attrs: A boolean indicating whether or not attributes that end
in .Type should be set. This is only necessary for batch job service.
Returns:
An fully initialized SOAP element.
"""
elem_arguments = dict(elem_type.elements)
# A post order traversal of the original data, need to instantiate from
# the bottom up.
instantiated_arguments = {
k: self._PackArgumentsHelper(elem_arguments[k], v, set_type_attrs)
for k, v in data if k != 'xsi_type'}
if set_type_attrs:
found_type_attr = next((e_name for e_name, _ in elem_type.elements
if e_name.endswith('.Type')), None)
if found_type_attr and type_is_override:
instantiated_arguments[found_type_attr] = elem_type.qname.localname
# Now go back through the tree instantiating SOAP types as we go.
return elem_type(**instantiated_arguments)
def _GetZeepFormattedSOAPHeaders(self):
"""Returns a dict with SOAP headers in the right format for zeep."""
headers = self._header_handler.GetSOAPHeaders(self.CreateSoapElementForType)
soap_headers = {'RequestHeader': headers}
return soap_headers
def _CreateMethod(self, method_name):
"""Create a method wrapping an invocation to the SOAP service.
Args:
method_name: A string identifying the name of the SOAP method to call.
Returns:
A callable that can be used to make the desired SOAP request.
"""
soap_service_method = self.zeep_client.service[method_name]
def MakeSoapRequest(*args):
AddToUtilityRegistry('zeep')
soap_headers = self._GetZeepFormattedSOAPHeaders()
packed_args = self._PackArguments(method_name, args)
try:
return soap_service_method(
*packed_args, _soapheaders=soap_headers)['body']['rval']
except zeep.exceptions.Fault as e:
error_list = ()
if e.detail is not None:
underlying_exception = e.detail.find(
'{%s}ApiExceptionFault' % self._GetBindingNamespace())
fault_type = self.zeep_client.get_element(
'{%s}ApiExceptionFault' % self._GetBindingNamespace())
fault = fault_type.parse(
underlying_exception, self.zeep_client.wsdl.types)
error_list = fault.errors or error_list
raise googleads.errors.GoogleAdsServerFault(
e.detail, errors=error_list, message=e.message)
return MakeSoapRequest
class HeaderHandler(object):
"""A generic header handler interface that must be subclassed by each API."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def GetSOAPHeaders(self, create_method):
"""Returns the required SOAP Headers."""
@abc.abstractmethod
def GetHTTPHeaders(self):
"""Returns the required HTTP headers."""
class LoggingMessagePlugin(suds.plugin.MessagePlugin):
"""A MessagePlugin used to log request summaries."""
def marshalled(self, context):
if _logger.isEnabledFor(logging.INFO):
_logger.info('Request summary - %s',
_ExtractRequestSummaryFields(context.envelope))
def parsed(self, context):
if _logger.isEnabledFor(logging.INFO):
_logger.info('Response summary - %s',
_ExtractResponseSummaryFields(context.reply))
def _ExtractRequestSummaryFields(document):
"""Extract logging fields from the request's suds.sax.element.Element.
Args:
document: A suds.sax.element.Element instance containing the API request.
Returns:
A dict mapping logging field names to their corresponding value.
"""
headers = document.childAtPath('Header/RequestHeader')
body = document.childAtPath('Body')
summary_fields = {
'methodName': body.getChildren()[0].name
}
# Extract AdWords-specific fields if they exist.
# Note: We need to check if None because this will always evaluate False.
client_customer_id = headers.getChild('clientCustomerId')
if client_customer_id is not None:
summary_fields['clientCustomerId'] = client_customer_id.text
# Extract Ad Manager-specific fields if they exist.
# Note: We need to check if None because this will always evaluate False.
network_code = headers.getChild('networkCode')
if network_code is not None:
summary_fields['networkCode'] = network_code.text
return summary_fields
def _ExtractResponseSummaryFields(document):
"""Extract logging fields from the response's suds.sax.document.Document.
Args:
document: A suds.sax.document.Document instance containing the parsed
API response for a given API request.
Returns:
A dict mapping logging field names to their corresponding value.
"""
headers = document.childAtPath('Envelope/Header/ResponseHeader')
body = document.childAtPath('Envelope/Body')
summary_fields = {}
if headers is not None:
summary_fields['requestId'] = headers.getChild('requestId').text
summary_fields['responseTime'] = headers.getChild('responseTime').text
# Extract AdWords-specific summary fields if they are present.
# Note: We need to check if None because this will always evaluate False.
service_name = headers.getChild('serviceName')
if service_name is not None:
summary_fields['serviceName'] = service_name.text
method_name = headers.getChild('methodName')
if method_name is not None:
summary_fields['methodName'] = method_name.text
operations = headers.getChild('operations')
if operations is not None:
summary_fields['operations'] = operations.text
if body is not None:
# Extract fault if it exists.
fault = body.getChild('Fault')
if fault is not None:
summary_fields['isFault'] = True
# Cap length of faultstring to 16k characters for summary.
summary_fields['faultMessage'] = fault.getChild(
'faultstring').text[:16000]
else:
summary_fields['isFault'] = False
return summary_fields
|
py | 1a538186effb1a18b3ed1c1a784363eb3158b2f1 | from dinofw.utils.config import ErrorCodes
from test.base import BaseTest
from test.functional.base_functional import BaseServerRestApi
class TestGetMessageInfo(BaseServerRestApi):
def test_get_message_info_1v1(self):
self.assert_groups_for_user(0)
group_message = self.send_1v1_message()
info = self.get_message_info(
user_id=BaseTest.USER_ID,
message_id=group_message["message_id"],
group_id=group_message["group_id"],
created_at=group_message["created_at"],
expected_response_code=200
)
self.assertEqual(group_message["message_payload"], info["message_payload"])
def test_get_message_info_1v1_wrong_created_at(self):
self.assert_groups_for_user(0)
group_message = self.send_1v1_message()
response = self.get_message_info(
user_id=BaseTest.USER_ID,
message_id=group_message["message_id"],
group_id=group_message["group_id"],
created_at=group_message["created_at"] - 3600,
expected_response_code=400
)
self.assertEqual(int(response["detail"].split(":")[0]), ErrorCodes.NO_SUCH_MESSAGE)
def test_get_message_info_1v1_wrong_user_id(self):
self.assert_groups_for_user(0)
group_message = self.send_1v1_message()
response = self.get_message_info(
user_id=BaseTest.OTHER_USER_ID,
message_id=group_message["message_id"],
group_id=group_message["group_id"],
created_at=group_message["created_at"],
expected_response_code=400
)
self.assertEqual(int(response["detail"].split(":")[0]), ErrorCodes.NO_SUCH_MESSAGE)
def test_get_message_info_1v1_wrong_group_id(self):
self.assert_groups_for_user(0)
group_message = self.send_1v1_message()
response = self.get_message_info(
user_id=BaseTest.USER_ID,
message_id=group_message["message_id"],
group_id="bad-group-id",
created_at=group_message["created_at"],
expected_response_code=400
)
self.assertEqual(int(response["detail"].split(":")[0]), ErrorCodes.NO_SUCH_MESSAGE)
|
py | 1a53837ce2812b20ad58ad1af786ae5c5474e143 | import math
import numpy as np
def vec3(x, y, z):
return np.array([x, y, z], dtype=np.float32)
def radians(v):
return np.radians(v)
def identity():
return np.identity(4, dtype=np.float32)
def empty():
return np.zeros([4, 4], dtype=np.float32)
def magnitude(v):
return np.linalg.norm(v)
def normalize(v):
m = magnitude(v)
return v if m == 0 else v / m
def dot(u, v):
return np.sum(u * v)
def cross(u, v):
res = vec3(0, 0, 0)
res[0] = u[1] * v[2] - u[2] * v[1]
res[1] = u[2] * v[0] - u[0] * v[2]
res[2] = u[0] * v[1] - u[1] * v[0]
return res
# below functions can be optimized
def translate(m, v):
res = np.copy(m)
res[:,3] = m[:,0] * v[0] + m[:,1] * v[1] + m[:,2] * v[2] + m[:,3]
return res
def rotate(m, angle, v):
a = angle
c = np.cos(a)
s = np.sin(a)
axis = normalize(v)
temp = (1 - c) * axis
rot = empty()
rot[0][0] = c + temp[0] * axis[0]
rot[0][1] = temp[0] * axis[1] + s * axis[2]
rot[0][2] = temp[0] * axis[2] - s * axis[1]
rot[1][0] = temp[1] * axis[0] - s * axis[2]
rot[1][1] = c + temp[1] * axis[1]
rot[1][2] = temp[1] * axis[2] + s * axis[0]
rot[2][0] = temp[2] * axis[0] + s * axis[1]
rot[2][1] = temp[2] * axis[1] - s * axis[0]
rot[2][2] = c + temp[2] * axis[2]
res = empty()
res[:,0] = m[:,0] * rot[0][0] + m[:,1] * rot[0][1] + m[:,2] * rot[0][2]
res[:,1] = m[:,0] * rot[1][0] + m[:,1] * rot[1][1] + m[:,2] * rot[1][2]
res[:,2] = m[:,0] * rot[2][0] + m[:,1] * rot[2][1] + m[:,2] * rot[2][2]
res[:,3] = m[:,3]
return res
def perspective(fovy, aspect, zNear, zFar):
tanHalfFovy = np.tan(fovy / 2)
res = empty()
res[0][0] = 1 / (aspect * tanHalfFovy)
res[1][1] = 1 / (tanHalfFovy)
res[2][3] = -1
res[2][2] = - (zFar + zNear) / (zFar - zNear)
res[3][2] = -(2 * zFar * zNear) / (zFar - zNear)
return res.T
def ortho(left, right, bottom, top, zNear, zFar):
#res = np.ones([4, 4], dtype=np.float32)
res = identity()
res[0][0] = 2 / (right - left)
res[1][1] = 2 / (top - bottom)
res[2][2] = - 2 / (zFar - zNear)
res[3][0] = - (right + left) / (right - left)
res[3][1] = - (top + bottom) / (top - bottom)
res[3][2] = - (zFar + zNear) / (zFar - zNear)
return res.T
def lookat(eye, center, up):
f = normalize(center - eye)
s = normalize(cross(f, up))
u = cross(s, f)
res = identity()
res[0][0] = s[0]
res[1][0] = s[1]
res[2][0] = s[2]
res[0][1] = u[0]
res[1][1] = u[1]
res[2][1] = u[2]
res[0][2] = -f[0]
res[1][2] = -f[1]
res[2][2] = -f[2]
res[3][0] = -dot(s, eye)
res[3][1] = -dot(u, eye)
res[3][2] = -dot(f, eye)
return res.T
def transform(d, m):
return np.dot(m, d.T).T
|
py | 1a5384eda43a3b828168858e0a0aa173272abebe | import re
import subprocess
import threading
import time
# from time import time
from config import *
from utils import *
def pat(test_data_in, class_path, jar, prt=False):
inputfile = open(test_data_in).readlines()
# print("@@@", test_data_in)
basetime, maxtime = datacheck(test_data_in)
# input = parseInput(inputfile)
# print("@@@", input)
start = time.time()
outputfile = callProgram(r"java -Xmx128m -cp {} {}".format(jar, class_path), inputfile)
end = time.time()
passed_time = end - start
# output = parseOutput(outputfile)
if prt:
for line in outputfile:
print(line)
# A, B, C = parseOutputABC(outputfile)
# print("Elevator A:")
# for line in A:
# print("\033[1;34m{}\033[0m".format(line))
# print("Elevator B:")
# for line in B:
# print("\033[1;35m{}\033[0m".format(line))
# print("Elevator C:")
# for line in C:
# print("\033[1;36m{}\033[0m".format(line))
# print(outputfile)
ac = checkAll(inputfile, outputfile)
t_ac = passed_time < maxtime
if ac is True and t_ac is True:
if passed_time > basetime + 20:
print("\033[1;33mWarning: {}, time:{}, base_time: {}\033[0m"
.format(test_data_in, passed_time, basetime, maxtime))
return True, passed_time
print("\033[1;32mPassed: {}, time:{}, base_time: {}\033[0m".format(test_data_in, passed_time, basetime))
return True, passed_time
if ac is not True:
print("\033[1;31mFailed: {}\n\tWA: {}\033[0m".format(test_data_in, ac))
return False, passed_time
if t_ac is not True:
print("\033[1;31mWarning: {}\n\tTLE: {}, max_time: {}\033[0m".format(test_data_in, passed_time, maxtime))
return True, passed_time
def parseInput(inputfile):
personRequests = []
for line in inputfile:
result = re.search(r'\[(.*)\](-?\d+)-FROM-(-?\d+)-TO-(-?\d+)', line.strip(), re.M)
personRequests.append(result.groups())
return personRequests
def run(p, output):
while True:
line = p.stdout.readline()
if not line:
break
# print(line)
output.append(line.decode().strip())
def callProgram(cmd, inputFile):
# print(cmd)
# os.chdir("temp")
# print(inputFile)
output = []
if cfg.CLOSE_STDERR:
p = subprocess.Popen(cmd,
shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
else:
p = subprocess.Popen(cmd,
shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
w = threading.Thread(target=run, args=(p, output,))
last_time = 0
for line in inputFile:
result = re.search(r'\[(.*)\](.*)', line.strip(), re.M)
sleeptime = result.group(1)
inputLine = result.group(2)
# print(sleeptime)
time.sleep(float(sleeptime) - last_time)
last_time = float(sleeptime)
write_str = inputLine + '\r\n'
# print(write_str)
p.stdin.write(write_str.encode("UTF-8"))
p.stdin.flush()
time.sleep(0.01)
w.start()
p.stdin.close()
try:
if p.wait(cfg.TIME_LIMIT) != 0:
return output
except subprocess.TimeoutExpired:
p.kill()
p.terminate()
print("\033[1;31mError: TimeoutExpired: May in the endless loop/wait. Check your 'synchronized'.")
return output
# print(p.returncode)
if p.returncode != 0:
print("\033[1;31mError: return code {} is not 0\033[0m".format(p.returncode))
return output
# os.chdir("..")
# print(output)
return output
def parseOutputABC(inputfile):
sequenceA = []
sequenceB = []
sequenceC = []
for line in inputfile:
result = re.search(r'-A', line.strip(), re.M)
if result is not None:
sequenceA.append(line)
continue
result = re.search(r'-B', line.strip(), re.M)
if result is not None:
sequenceB.append(line)
continue
result = re.search(r'-C', line.strip(), re.M)
if result is not None:
sequenceC.append(line)
continue
return sequenceA, sequenceB, sequenceC
def parseOutput(inputfile):
sequence = []
# IN = []
# OUT = []
# OPEN = []
# CLOSE = []
for line in inputfile:
result = re.search(r'\[(.*)\]IN-(-?\d+)-(-?\d+)', line.strip(), re.M)
if result is not None:
sequence.append(["IN", result.groups()])
continue
result = re.search(r'\[(.*)\]OUT-(-?\d+)-(-?\d+)', line.strip(), re.M)
if result is not None:
sequence.append(["OUT", result.groups()])
continue
result = re.search(r'\[(.*)\]OPEN-(-?\d+)', line.strip(), re.M)
if result is not None:
sequence.append(["OPEN", result.groups()])
continue
result = re.search(r'\[(.*)\]CLOSE-(-?\d+)', line.strip(), re.M)
if result is not None:
sequence.append(["CLOSE", result.groups()])
continue
result = re.search(r'\[(.*)\]ARRIVE-(-?\d+)', line.strip(), re.M)
if result is not None:
sequence.append(["ARRIVE", result.groups()])
continue
return sequence
def check_1_1(input, output, eId):
sequence = output
time = []
level = []
for mesType, mes in sequence:
time.append(float(mes[0]))
if mesType == "IN" or mesType == "OUT":
level.append(int(mes[2]))
else:
level.append(int(mes[1]))
assert len(time) == len(level)
for i in range(len(time) - 1):
estimate_time = abs(level[i + 1] - level[i]) * cfg.LEVEL_TIME[eId]
if level[i] * level[i + 1] < 0:
estimate_time -= cfg.LEVEL_TIME[eId]
if not (time[i + 1] - time[i] >= estimate_time - cfg.EPS):
return "The elevator has no enough time to move such far distance at {}: {}. {}, {}".format(i, [sequence[i-1], sequence[i], sequence[i+1]], time[i + 1] - time[i], estimate_time - cfg.EPS)
return True
def check_1_2(intput, output, eId):
sequence = output
length = len(sequence)
for i, (mesType, mes) in enumerate(sequence):
if mesType == "OPEN" and i != 0:
index = i + 1
while index < len(sequence) and sequence[index][0] != "CLOSE":
index += 1
diff = cfg.DOOR_TIME
if index == len(sequence):
return "No Close with {}".format(sequence[i])
if sequence[index][0] == "CLOSE":
diff = cfg.DOOR_TIME * 2
if not (float(sequence[index][1][0]) - float(sequence[i][1][0]) >= diff) - cfg.EPS:
# print(sequence[i + 1], sequence[i])
return "The elevator has no enough time to open/close at {}: {}".format(i, [sequence[index], sequence[i], sequence[i+1]])
# if mesType == "CLOSE" and i != length - 1:
# index = i - 1
# while index > 0 and sequence[index][0] != "OPEN":
# index -= 1
# diff = 0.25
# if sequence[index][0] == "OPEN":
# diff = 0.5
# if not (float(sequence[i][1][0]) - float(sequence[index][1][0]) > diff - 0.001):
# # print(sequence[i], sequence[i - 1])
# return "The elevator has no enough time to close at {}".format(i)
return True
def getLevel(sequence):
mesType, mes = sequence
if mesType in ["OPEN", "CLOSE", "ARRIVE"]:
return int(mes[1])
else:
return int(mes[2])
def getTime(sequence):
return float(sequence[1][0])
def getId(sequence):
mesType, mes = sequence
assert mesType == "IN" or mesType == "OUT"
return int(mes[1])
def check_1_3(input, output, eId):
sequence = output
isClosed = True
for i, (mesType, mes) in enumerate(sequence):
if i != 1 and not isClosed and (getLevel(sequence[i - 1]) != getLevel(sequence[i])):
# print(sequence[i - 1], sequence[i])
return "The elevator is open at {} while you want it move: {}".format(i, [sequence[i-1], sequence[i], sequence[i+1]])
if mesType == "OPEN":
isClosed = False
if mesType == "CLOSE":
isClosed = True
return True
def check_1_4(input, output, eId):
sequence = output
isOpen = False
for i, (mesType, mes) in enumerate(sequence):
if not isOpen and (mesType == "IN" or mesType == "OUT"):
return "The elevator is closed at {} while you want someone in/out: {}".format(i, [sequence[i-1], sequence[i], sequence[i+1]])
if mesType == "OPEN":
if isOpen is True:
return "The elevator is open at {} while you want it open again: {}".format(i, [sequence[i - 1],
sequence[i],
sequence[i + 1]])
isOpen = True
if mesType == "CLOSE":
if isOpen is False:
return "The elevator is closed at {} while you want it close again: {}".format(i, [sequence[i - 1],
sequence[i],
sequence[i + 1]])
isOpen = False
if isOpen == True:
return "Elevator is not closed at the end."
return True
def check_3(input, output, eId):
sequence = output
levelNow = 1
arrivalTime = 0
for i, (mesType, mes) in enumerate(sequence):
if mesType == "ARRIVE":
level = getLevel(sequence[i])
if level in [0]:
return "Bad arrive 0 at {}: {}".format(i, [sequence[-1], sequence[i], sequence[i+1]])
time = getTime(sequence[i])
if levelNow in [-1, 1]:
if not 0 < abs(levelNow - level) <= 2:
return "Bad arrive 0 at {}: {}".format(i, [sequence[-1], sequence[i], sequence[i+1]])
else:
if not 0 < abs(levelNow - level) <= 1:
#print(levelNow, level)
return "Bad arrive at {}: {}".format(i, [sequence[-1], sequence[i], sequence[i+1]])
if not abs(arrivalTime - time) >= 0.4 - cfg.EPS:
return "Bad arrive at {}: {}".format(i, [sequence[-1], sequence[i], sequence[i+1]])
arrivalTime = time
levelNow = level
return True
def check_4(input, output, eId):
sequence = output
inside = set()
for i, (mesType, mes) in enumerate(sequence):
if mesType == "IN":
inside.add(getId(sequence[i]))
maxN = 0
if eId == "A":
maxN = 6
if eId == "B":
maxN = 8
if eId == "C":
maxN = 7
if len(inside) > maxN:
return "Elevator is full at {}: {}".format(i, [sequence[-1], sequence[i], sequence[i + 1]])
if mesType == "OUT":
if getId(sequence[i]) not in inside:
return "{} not in elevator at {}: {}".format(getId(sequence[i]), i, [sequence[-1], sequence[i], sequence[i + 1]])
inside.remove(getId(sequence[i]))
return True
def check_2(input, output):
id_now = {}
id_to = {}
id_set = []
ele = set()
for time, id_, from_, to in input:
id_now[int(id_)] = int(from_)
id_to[int(id_)] = int(to)
id_set.append(int(id_))
# print(id_now)
sequence = output
for i, (mesType, mes) in enumerate(sequence):
# print(id_now)
# print(sequence[i])
if mesType == "IN":
thisID = getId(sequence[i])
level = getLevel(sequence[i])
if (thisID not in id_now) or (level != id_now[thisID]):
return "{} is not at floor {} while you want the guy in.".format(thisID, level)
del id_now[thisID]
if thisID in ele:
return "{} has been in the elevator at {} while you want the guy in again.".format(thisID, i)
ele.add(thisID)
if mesType == "OUT":
thisID = getId(sequence[i])
if thisID not in ele:
return "{} is not in the elevator at {} while you want the guy out.".format(thisID, i)
ele.remove(thisID)
id_now[thisID] = getLevel(sequence[i])
if len(ele) > 0:
return "{} still in the elevator.".format(ele)
for id_ in id_set:
if id_now[int(id_)] != id_to[int(id_)]:
return "{} in the wrong floor at the end.".format(id_)
return True
def checkAllSequence(input, output, eId):
r_1_1 = check_1_1(input, output, eId)
r_1_2 = check_1_2(input, output, eId)
r_1_3 = check_1_3(input, output, eId)
r_1_4 = check_1_4(input, output, eId)
r_4 = check_4(input, output, eId)
# r_2 = check_2(input, output)
r_3 = check_3(input, output, eId)
if r_1_1 is not True:
return "check_1_1: \n\t" + str(r_1_1) + "\n\t" + str(output)
if r_1_2 is not True:
return "check_1_2: \n\t" + str(r_1_2) + "\n\t" + str(output)
if r_1_3 is not True:
return "check_1_3: \n\t" + str(r_1_3) + "\n\t" + str(output)
if r_1_4 is not True:
return "check_1_4: \n\t" + str(r_1_4) + "\n\t" + str(output)
if r_4 is not True:
return "check_4: \n\t" + str(r_4) + "\n\t" + str(output)
# if r_2 is not True:
# return "check_2: \n\t" + str(r_2) + "\n\t" + str(output)
if r_3 is not True:
return "check_3: \n\t" + str(r_3) + "\n\t" + str(output)
return True
def checkAll(inputfile, outputfile):
input = parseInput(inputfile)
sequenceAll = parseOutput(outputfile)
sequenceA, sequenceB, sequenceC = parseOutputABC(outputfile)
outputSequenceA = parseOutput(sequenceA)
outputSequenceB = parseOutput(sequenceB)
outputSequenceC = parseOutput(sequenceC)
r_A = checkAllSequence(input, outputSequenceA, "A")
r_B = checkAllSequence(input, outputSequenceB, "B")
r_C = checkAllSequence(input, outputSequenceC, "C")
r_All = check_2(input, sequenceAll)
if r_A is not True:
return "Error Elevator A: " + str(r_A) + "\n\t" + str(outputfile)
if r_B is not True:
return "Error Elevator B: " + str(r_B) + "\n\t" + str(outputfile)
if r_C is not True:
return "Error Elevator C: " + str(r_C) + "\n\t" + str(outputfile)
if r_All is not True:
return r_All + "\n\t" + str(outputfile)
return True
|
py | 1a5385355058d3304bf127f9bf041e583d882205 |
import numpy as np
import Augmentor
from PIL import Image
class DatasetAugmentor():
def __init__(self,
dataset_config=None,
additional_augmentor_obj=None
):
self.p = Augmentor.Pipeline()
if dataset_config is not None and 'pipeline' in dataset_config:
for pipeline in dataset_config['pipeline']:
method_to_call = getattr(self.p, pipeline[0])
parameters = pipeline[1]
method_to_call(**parameters)
if additional_augmentor_obj is not None:
for pipeline in additional_augmentor_obj:
method_to_call = getattr(self.p, pipeline[0])
parameters = pipeline[1]
method_to_call(**parameters)
self.transform = self.p.torch_transform()
if dataset_config is not None and 'scaling' in dataset_config:
self.scaling = dataset_config['scaling']
else:
self.scaling = 'tanh'
def _scaling_tanh(self, img):
img = img / 127.5 - 1
return img
def _scaling_sigmoid(self, img):
img = img / 255.0
return img
def augment(self, image, isArray=False):
if isArray: # if the input is a numpy array, convert back to PIL
image = Image.fromarray(image)
image = self.transform(image)
image = np.asarray(image).astype('f')
w, h = image.shape[0], image.shape[1]
if np.ndim(image) == 2:
ch = 1
else:
ch = np.shape(image)[2]
image = image.reshape(w, h, ch)
image = image.transpose((2, 0, 1))
if self.scaling == 'none':
return image
elif self.scaling == 'sigmoid':
return self._scaling_sigmoid(image)
elif self.scaling == 'tanh':
return self._scaling_tanh(image)
else:
raise NotImplementedError
|
py | 1a5385aedf817d2cb761c5fec9ed328b0da71861 | from functools import partial
import numpy as np
import torch
import torch.nn as nn
from torchvision import transforms
from src.modules.distributions import dmol_loss, sample_from_dmol, log_normal_diag
# ----- NN Model Seleciton -----
from .image_networks.densenet16x32 import q_u, p_z, q_z, p_y, p_x
from ...utils.utils import get_shape
# ----- Two Staged VAE -----
class srVAE(nn.Module):
"""
Super-Resolution Variational Auto-Encoder (srVAE).
A Two Staged Visual Processing Variational AutoEncoder.
Author:
Ioannis Gatopoulos.
"""
def __init__(self, x_shape, args, y_shape=(3, 16, 16)):
super().__init__()
self.device = args.device
self.x_shape = x_shape
self.y_shape = (x_shape[0], y_shape[1], y_shape[2])
u_dim = args.u_dim
z_dim = args.z_dim
prior = args.prior
self.u_shape = get_shape(u_dim)
self.z_shape = get_shape(z_dim)
# q(y|x): deterministic "compressed" transformation
self.compressed_transform = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize((self.y_shape[1], self.y_shape[2])),
transforms.ToTensor()
])
# p(u)
self.p_u = globals()[prior](self.u_shape)
# q(u | y)
self.q_u = q_u(self.u_shape, self.y_shape)
# p(z | y)
self.p_z = p_z(self.z_shape, (self.y_shape, self.u_shape))
# q(z | x)
self.q_z = q_z(self.z_shape, self.x_shape)
# p(y | u)
self.p_y = p_y(self.y_shape, self.u_shape, args)
# p(x | y, z)
self.p_x = p_x(self.x_shape, (self.y_shape, self.z_shape), args)
# likelihood distribution
self.recon_loss = partial(dmol_loss)
self.sample_distribution = partial(sample_from_dmol)
def compressed_transoformation(self, input):
y = []
for x in input:
y.append(self.compressed_transform(x.cpu()))
return torch.stack(y).to(self.device)
def initialize(self, dataloader):
""" Data dependent init for weight normalization
(Automatically done during the first forward pass).
"""
with torch.no_grad():
x, _ = next(iter(dataloader))
x = x.to(self.device)
output = self.forward(x)
self.calculate_elbo(x, output)
return
@staticmethod
def reparameterize(z_mean, z_log_var):
""" z ~ N(z| z_mu, z_logvar) """
epsilon = torch.randn_like(z_mean)
return z_mean + torch.exp(0.5 * z_log_var) * epsilon
@torch.no_grad()
def generate(self, n_samples=20):
# u ~ p(u)
u = self.p_u.sample(self.u_shape, n_samples=n_samples, device=self.device).to(self.device)
# p(y|u)
y_logits = self.p_y(u)
y_hat = self.sample_distribution(y_logits, nc=self.y_shape[0])
# z ~ p(z|y, u)
z_p_mean, z_p_logvar = self.p_z((y_hat, u))
z_p = self.reparameterize(z_p_mean, z_p_logvar)
# x ~ p(x|y,z)
x_logits = self.p_x((y_hat, z_p))
x_hat = self.sample_distribution(x_logits, nc=self.x_shape[0])
return x_hat, y_hat
@torch.no_grad()
def reconstruct(self, x, **kwargs):
outputs = self.forward(x)
y_hat = self.sample_distribution(outputs.get('y_logits'), nc=self.y_shape[0])
x_hat = self.sample_distribution(outputs.get('x_logits'), nc=self.x_shape[0])
return outputs.get('y'), y_hat, x_hat
@torch.no_grad()
def super_resolution(self, y):
# u ~ q(u| y)
u_q_mean, u_q_logvar = self.q_u(y)
u_q = self.reparameterize(u_q_mean, u_q_logvar)
# z ~ p(z|y)
z_p_mean, z_p_logvar = self.p_z((y, u_q))
z_p = self.reparameterize(z_p_mean, z_p_logvar)
# x ~ p(x|y,z)
x_logits = self.p_x((y, z_p))
x_hat = self.sample_distribution(x_logits)
return x_hat
def calculate_elbo(self, x, outputs, **kwargs):
# unpack variables
y, x_logits, y_logits = outputs.get('y'), outputs.get('x_logits'), outputs.get('y_logits')
u_q, u_q_mean, u_q_logvar = outputs.get('u_q'), outputs.get('u_q_mean'), outputs.get('u_q_logvar')
z_q, z_q_mean, z_q_logvar = outputs.get('z_q'), outputs.get('z_q_mean'), outputs.get('z_q_logvar')
z_p_mean, z_p_logvar = outputs.get('z_p_mean'), outputs.get('z_p_logvar')
# Reconstraction loss
RE_x = self.recon_loss(x, x_logits, nc=self.x_shape[0])
RE_y = self.recon_loss(y, y_logits, nc=self.y_shape[0])
# Regularization loss
log_p_u = self.p_u.log_p(u_q, dim=1)
log_q_u = log_normal_diag(u_q, u_q_mean, u_q_logvar)
KL_u = log_q_u - log_p_u
log_p_z = log_normal_diag(z_q, z_p_mean, z_p_logvar)
log_q_z = log_normal_diag(z_q, z_q_mean, z_q_logvar)
KL_z = log_q_z - log_p_z
# Total lower bound loss
nelbo = - (RE_x + RE_y - KL_u - KL_z).mean()
diagnostics = {
"bpd": (nelbo.item()) / (np.prod(x.shape[1:]) * np.log(2.)),
"nelbo": nelbo.item(),
"RE": - (RE_x + RE_y).mean().item(),
"RE_x": - RE_x.mean().item(),
"RE_y": - RE_y.mean().item(),
"KL": (KL_z + KL_u).mean().item(),
"KL_u": KL_u.mean().item(),
"KL_z": KL_z.mean().item(),
}
return nelbo, diagnostics
def forward(self, x, **kwargs):
""" Forward pass through the inference and the generative model. """
# y ~ f(x) (determinist)
y = self.compressed_transoformation(x)
# u ~ q(u| y)
u_q_mean, u_q_logvar = self.q_u(y)
u_q = self.reparameterize(u_q_mean, u_q_logvar)
# z ~ q(z| x, y)
z_q_mean, z_q_logvar = self.q_z(x)
z_q = self.reparameterize(z_q_mean, z_q_logvar)
# x ~ p(x| y, z)
x_logits = self.p_x((y, z_q))
# y ~ p(y| u)
y_logits = self.p_y(u_q)
# z ~ p(z| x)
z_p_mean, z_p_logvar = self.p_z((y, u_q))
return {
'u_q_mean': u_q_mean,
'u_q_logvar': u_q_logvar,
'u_q': u_q,
'z_q_mean': z_q_mean,
'z_q_logvar': z_q_logvar,
'z_q': z_q,
'z_p_mean': z_p_mean,
'z_p_logvar': z_p_logvar,
'y': y,
'y_logits': y_logits,
'x_logits': x_logits
}
|
py | 1a53861ef9f035b306ac8c054c3b689ada33390b | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.journals import journals
def test_journals():
"""Test module journals.py by downloading
journals.csv and testing shape of
extracted data has 180 rows and 10 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = journals(test_path)
try:
assert x_train.shape == (180, 10)
except:
shutil.rmtree(test_path)
raise()
|
py | 1a5386883ab026a8f1a4999c81c7b177cb3819f8 | from mollie.api.objects.refund import Refund
from .utils import assert_list_object
PROFILE_ID = "pfl_v9hTwCvYqw"
def test_get_profile_refunds_by_profile_id(client, response):
"""Get refunds relevant to profile by profile id."""
response.get(f"https://api.mollie.com/v2/refunds?profileId={PROFILE_ID}", "refunds_list")
refunds = client.profile_refunds.with_parent_id(PROFILE_ID).list()
assert_list_object(refunds, Refund)
|
py | 1a5387162993079e1d4a84044483bb025206b530 | import sys
import time
import subprocess
def check_for_libportaudio2():
if sys.platform == 'linux':
try:
output = subprocess.run(['apt', 'list', '--installed',
'libportaudio2'],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
output = output.stdout.decode('utf-8')
if 'libportaudio2' not in output:
print('\nLibrary "libportaudio2" is missing,\nInstalling...\n')
time.sleep(2)
subprocess.run(['sudo', 'apt', 'install', 'libportaudio2'])
except OSError:
print('Could not install libportaudio2.')
except KeyboardInterrupt:
sys.exit()
check_for_libportaudio2()
|
py | 1a5387d242661880b2799fffc8d25e59b49ca508 | #!/usr/bin/python
# Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
"""This module provides shared functionality for the system to generate
dart:html APIs from the IDL database."""
import emitter
from generator import AnalyzeOperation, ConstantOutputOrder, \
DartDomNameOfAttribute, FindMatchingAttribute, IsPureInterface, \
TypeOrNothing, ConvertToFuture, GetCallbackInfo
from copy import deepcopy
from htmlrenamer import convert_to_future_members, custom_html_constructors, \
GetDDC_Extension, keep_overloaded_members, overloaded_and_renamed,\
private_html_members, renamed_html_members, renamed_overloads, \
removed_html_members
from generator import TypeOrVar
import logging
import monitored
import sys
_logger = logging.getLogger('htmldartgenerator')
# Types that are accessible cross-frame in a limited fashion.
# In these cases, the base type (e.g., WindowBase) provides restricted access
# while the subtype (e.g., Window) provides full access to the
# corresponding objects if there are from the same frame.
_secure_base_types = {
'Window': 'WindowBase',
'Location': 'LocationBase',
'History': 'HistoryBase',
}
_custom_factories = [
'Notification',
'EventSource',
]
class HtmlDartGenerator(object):
def __init__(self, interface, options, dart_use_blink, logger):
self._dart_use_blink = dart_use_blink
self._database = options.database
self._interface = interface
self._type_registry = options.type_registry
self._interface_type_info = self._type_registry.TypeInfo(
self._interface.id)
self._renamer = options.renamer
self._metadata = options.metadata
self._library_name = self._renamer.GetLibraryName(self._interface)
_logger.setLevel(logger.level)
def EmitSupportCheck(self):
if self.HasSupportCheck():
check = self.GetSupportCheck()
if type(check) != tuple:
signature = 'get supported'
else:
signature = check[0]
check = check[1]
self._members_emitter.Emit(
'\n'
' /// Checks if this type is supported on the current platform.\n'
' static bool $SIGNATURE => $SUPPORT_CHECK;\n',
SIGNATURE=signature,
SUPPORT_CHECK=check)
def EmitEventGetter(self, events_class_name):
self._members_emitter.Emit(
"EventTarget.removeEventListener, EventTarget.dispatchEvent')"
"\n @deprecated"
"\n $TYPE get on =>\n new $TYPE(this);\n",
TYPE=events_class_name)
def AddMembers(self, interface, declare_only=False, dart_js_interop=False):
if self._interface.id == 'WebGLRenderingContextBase' or self._interface.id == 'WebGL2RenderingContextBase' or \
self._interface.id == 'WebGLDrawBuffers':
# Constants in classes WebGLRenderingContextBase, WebGL2RenderingContext, WebGLDrawBuffers are consolidated into
# one synthesized class (WebGL).
self._gl_constants.extend(interface.constants)
else:
for const in sorted(interface.constants, ConstantOutputOrder):
self.AddConstant(const)
for attr in sorted(interface.attributes, ConstantOutputOrder):
if attr.type.id != 'EventHandler' and attr.type.id != 'EventListener':
self.AddAttribute(attr, declare_only)
# The implementation should define an indexer if the interface directly
# extends List.
element_type = None
requires_indexer = False
if self._interface_type_info.list_item_type():
self.AddIndexer(self._interface_type_info.list_item_type(),
self._interface_type_info.list_item_type_nullable())
else:
for parent in self._database.Hierarchy(self._interface):
if parent == self._interface:
continue
parent_type_info = self._type_registry.TypeInfo(parent.id)
if parent_type_info.list_item_type():
self.AmendIndexer(parent_type_info.list_item_type())
break
# Group overloaded operations by name.
self._AddRenamedOverloads(interface)
operationsByName = self._OperationsByName(interface)
if self.OmitOperationOverrides():
self._RemoveShadowingOperationsWithSameSignature(
operationsByName, interface)
# Generate operations.
for id in sorted(operationsByName.keys()):
operations = operationsByName[id]
info = AnalyzeOperation(interface, operations)
info.nnbd = self._nnbd
self.AddOperation(info, declare_only, dart_js_interop)
if ('%s.%s' % (interface.id,
info.declared_name) in convert_to_future_members):
self.AddOperation(ConvertToFuture(info), declare_only)
def AddSecondaryMembers(self, interface):
secondary_parents = self._database.TransitiveSecondaryParents(
interface, not self._dart_use_blink)
remove_duplicate_parents = list(set(secondary_parents))
if len(secondary_parents) != len(remove_duplicate_parents):
secondary_parents = remove_duplicate_parents
parent_list = ", ".join(
[" %s" % (parent.id) for parent in secondary_parents])
_logger.warn('Interface %s has duplicate parent interfaces %s - ' \
'ignoring duplicates. Please file a bug with the dart:html team.' % (interface.id, parent_list))
for parent_interface in sorted(secondary_parents):
if isinstance(parent_interface, str):
continue
for attr in sorted(parent_interface.attributes,
ConstantOutputOrder):
if not FindMatchingAttribute(interface, attr):
if attr.type.id != 'EventHandler':
self.SecondaryContext(parent_interface)
self.AddAttribute(attr)
# Group overloaded operations by name.
operationsByName = self._OperationsByName(parent_interface)
if self.OmitOperationOverrides():
self._RemoveShadowingOperationsWithSameSignature(
operationsByName, interface)
# Generate operations.
for id in sorted(operationsByName.keys()):
if not any(op.id == id for op in interface.operations):
operations = operationsByName[id]
info = AnalyzeOperation(interface, operations)
self.SecondaryContext(parent_interface)
self.AddOperation(info)
def _RemoveShadowingOperationsWithSameSignature(self, operationsByName,
interface):
if not interface.parents:
return
parent_name = interface.parents[0].type.id
parent = self._database.GetInterface(parent_name)
if parent == self._interface or parent == interface:
return
# Never remove operations that are added as a result of an implements they
# are pure interfaces (mixins to this interface).
if (IsPureInterface(parent_name, self._database)):
return
for operation in parent.operations:
if operation.id in operationsByName:
operations = operationsByName[operation.id]
for existing_operation in operations:
if existing_operation.SameSignatureAs(operation):
del operationsByName[operation.id]
def _AddRenamedOverloads(self, interface):
"""The IDL has a number of functions with the same name but that accept
different types. This is fine for JavaScript, but results in vague type
signatures for Dart. We rename some of these (by adding a new identical
operation with a different DartName), but leave the original version as
well in some cases."""
potential_added_operations = set()
operations_by_name = self._OperationsByName(interface)
already_renamed = [
operation.ext_attrs['DartName']
if 'DartName' in operation.ext_attrs else ''
for operation in interface.operations
]
added_operations = []
for operation in interface.operations:
full_operation_str = self._GetStringRepresentation(
interface, operation)
if (full_operation_str in renamed_overloads and
renamed_overloads[full_operation_str] not in already_renamed
):
if '%s.%s' % (interface.id,
operation.id) in overloaded_and_renamed:
cloned_operation = deepcopy(operation)
cloned_operation.ext_attrs['DartName'] = renamed_overloads[
full_operation_str]
added_operations.append(cloned_operation)
else:
dart_name = renamed_overloads[full_operation_str]
if not dart_name:
continue
operation.ext_attrs['DartName'] = dart_name
potential_added_operations.add(operation.id)
self._EnsureNoMultipleTypeSignatures(interface, operation,
operations_by_name)
interface.operations += added_operations
self._AddDesiredOverloadedOperations(potential_added_operations,
interface, operations_by_name)
def _AddDesiredOverloadedOperations(self, potential_added_operations,
interface, original_operations_by_name):
"""For some cases we desire to keep the overloaded version in dart, for
simplicity of API, and explain the parameters accepted in documentation."""
updated_operations_by_name = self._OperationsByName(interface)
for operation_id in potential_added_operations:
if (operation_id not in updated_operations_by_name and '%s.%s' %
(interface.id, operation_id) in keep_overloaded_members):
for operation in original_operations_by_name[operation_id]:
cloned_operation = deepcopy(operation)
cloned_operation.ext_attrs['DartName'] = operation_id
interface.operations.append(cloned_operation)
def _EnsureNoMultipleTypeSignatures(self, interface, operation,
operations_by_name):
"""Make sure that there is now at most one operation with a particular
operation.id. If not, stop library generation, and throw an error, requiring
programmer input about the best name change before proceeding."""
operation_str = '%s.%s' % (interface.id, operation.id)
if (operation.id in operations_by_name and
len(operations_by_name[operation.id]) > 1 and len(
filter(lambda overload: overload.startswith(operation_str),
renamed_overloads.keys())) == 0 and
operation_str not in keep_overloaded_members and
operation_str not in overloaded_and_renamed and
operation_str not in renamed_html_members and
operation_str not in private_html_members and
operation_str not in removed_html_members and
operation.id != '__getter__' and
operation.id != '__setter__' and operation.id != '__delete__'):
_logger.warn(
'Multiple type signatures for %s.%s. Please file a bug with'
' the dart:html team to determine if one of these functions should be'
' renamed.' % (interface.id, operation.id))
def _GetStringRepresentation(self, interface, operation):
"""Given an IDLOperation, return a object-independent representation of the
operations's signature."""
return '%s.%s(%s)' % (interface.id, operation.id, ', '.join(
['%s %s' % (arg.type.id, arg.id) for arg in operation.arguments]))
def _OperationsByName(self, interface):
operationsByName = {}
for operation in interface.operations:
name = operation.ext_attrs.get('DartName', operation.id)
operationsByName.setdefault(name, []).append(operation)
return operationsByName
def OmitOperationOverrides(self):
return False
def AddConstant(self, constant):
const_name = self._renamer.RenameMember(
self._interface.id,
constant,
constant.id,
'get:',
dartify_name=False)
if not const_name:
return
annotations = self._metadata.GetFormattedMetadata(
self._library_name, self._interface, constant.id, ' ')
type = TypeOrNothing(self._DartType(constant.type.id), constant.type.id)
self._members_emitter.Emit(
'\n $(ANNOTATIONS)static const $TYPE$NAME = $VALUE;\n',
ANNOTATIONS=annotations,
NAME=const_name,
TYPE=type,
VALUE=constant.value)
def AddAttribute(self, attribute, declare_only=False):
""" Adds an attribute to the generated class.
Arguments:
attribute - The attribute which is to be added.
declare_only- True if the attribute should be declared as an abstract
member and not include invocation code.
"""
dom_name = DartDomNameOfAttribute(attribute)
attr_name = self._renamer.RenameMember(self._interface.id, attribute,
dom_name, 'get:')
if not attr_name:
return
html_setter_name = self._renamer.RenameMember(
self._interface.id, attribute, dom_name, 'set:')
read_only = (attribute.is_read_only or
'Replaceable' in attribute.ext_attrs or
not html_setter_name)
# We don't yet handle inconsistent renames of the getter and setter yet.
assert (not html_setter_name or attr_name == html_setter_name)
if declare_only:
self.DeclareAttribute(attribute,
self.SecureOutputType(attribute.type.id),
attr_name, read_only)
else:
self.EmitAttribute(attribute, attr_name, read_only)
def AddOperation(self, info, declare_only=False, dart_js_interop=False):
# TODO(terry): Hack window has 2 overloaded getter one returns Window and
# and other object (we'll always return Window)?
if self._interface.id == "Window" and info.name == '__getter__':
info.operations[1].type = info.operations[0].type
""" Adds an operation to the generated class.
Arguments:
info - The operation info of the operation to be added.
declare_only- True if the operation should be declared as an abstract
member and not include invocation code.
"""
# FIXME: When we pass in operations[0] below, we're assuming all
# overloaded operations have the same security attributes. This
# is currently true, but we should consider filtering earlier or
# merging the relevant data into info itself.
method_name = self._renamer.RenameMember(
self._interface.id, info.operations[0], info.name, 'call:')
if not method_name:
if info.name == 'item':
# FIXME: item should be renamed to operator[], not removed.
self.EmitOperation(info, '_item')
return
if declare_only:
self.DeclareOperation(info, self.SecureOutputType(info.type_name),
method_name)
else:
self.EmitOperation(info, method_name, dart_js_interop)
def _GenerateOverloadDispatcher(
self,
info,
signatures,
is_void,
declaration,
generate_call,
is_optional,
emitter,
can_omit_type_check=lambda type, pos: False):
parameter_names = [p.name for p in info.param_infos]
number_of_required_in_dart = info.NumberOfRequiredInDart()
body_emitter = emitter.Emit('\n'
' $DECLARATION {\n'
'$!BODY'
' }\n',
DECLARATION=declaration)
version = [0]
def GenerateCall(signature_index, argument_count, checks):
if checks:
(stmts_emitter, call_emitter) = body_emitter.Emit(
' if ($CHECKS) {\n$!STMTS$!CALL }\n',
INDENT=' ',
CHECKS=' && '.join(checks))
else:
(stmts_emitter, call_emitter) = body_emitter.Emit(
'$!STMTS$!CALL', INDENT=' ')
if is_void:
call_emitter = call_emitter.Emit(
'$(INDENT)$!CALL;\n$(INDENT)return;\n')
else:
call_emitter = call_emitter.Emit('$(INDENT)return $!CALL;\n')
version[0] += 1
generate_call(stmts_emitter, call_emitter, version[0],
signature_index, argument_count)
def IsTypeChecking(interface_argument):
return 'LegacyInterfaceTypeChecking' in interface_argument.ext_attrs or \
self._database.HasInterface(interface_argument.id)
def GenerateChecksAndCall(signature_index, argument_count):
checks = []
typechecked_interface = IsTypeChecking(self._interface)
for i in reversed(range(0, argument_count)):
argument = signatures[signature_index][i]
parameter_name = parameter_names[i]
test_type = self._NarrowToImplementationType(argument.type.id)
if test_type in ['dynamic', 'Object']:
checks.append('%s != null' % parameter_name)
elif not can_omit_type_check(test_type, i):
typechecked = typechecked_interface or IsTypeChecking(
argument)
converts_null = \
('TreatNullAs' in argument.ext_attrs) or \
(argument.default_value is not None) or \
(argument.default_value_is_null)
if argument.type.nullable or converts_null or not typechecked:
checks.append(
'(%s is %s || %s == null)' %
(parameter_name, test_type, parameter_name))
else:
checks.append(
'(%s is %s)' % (parameter_name, test_type))
elif i >= number_of_required_in_dart and not argument.type.nullable:
checks.append('%s != null' % parameter_name)
# There can be multiple presence checks. We need them all since a later
# optional argument could have been passed by name, leaving 'holes'.
checks.extend([
'%s == null' % name for name in parameter_names[argument_count:]
])
GenerateCall(signature_index, argument_count, checks)
# TODO: Optimize the dispatch to avoid repeated checks.
if len(signatures) > 1:
index_swaps = {}
for signature_index, signature in enumerate(signatures):
for argument_position, argument in enumerate(signature):
if argument.type.id != 'ArrayBuffer':
continue
candidates = enumerate(signatures[signature_index + 1:],
signature_index + 1)
for candidate_index, candidate in candidates:
if len(candidate) <= argument_position:
continue
if candidate[
argument_position].type.id != 'ArrayBufferView':
continue
if len(index_swaps):
raise Exception(
'Cannot deal with more than a single swap')
index_swaps[candidate_index] = signature_index
index_swaps[signature_index] = candidate_index
for signature_index in range(len(signatures)):
signature_index = index_swaps.get(signature_index,
signature_index)
signature = signatures[signature_index]
for argument_position, argument in enumerate(signature):
if is_optional(signature_index, argument):
GenerateChecksAndCall(signature_index,
argument_position)
GenerateChecksAndCall(signature_index, len(signature))
body_emitter.Emit(
' throw new ArgumentError("Incorrect number or type of arguments");'
'\n')
else:
signature = signatures[0]
argument_count = len(signature)
for argument_position, argument in list(enumerate(signature))[::-1]:
if is_optional(0, argument):
check = '%s != null' % parameter_names[argument_position]
# argument_count instead of argument_position + 1 is used here to cover one
# complicated case with the effectively optional argument in the middle.
# Consider foo(x, optional y, [Default=NullString] optional z)
# (as of now it's modelled after HTMLMediaElement.webkitAddKey).
# y is optional in WebCore, while z is not.
# In this case, if y was actually passed, we'd like to emit foo(x, y, z) invocation,
# not foo(x, y).
GenerateCall(0, argument_count, [check])
argument_count = argument_position
GenerateCall(0, argument_count, [])
def _GenerateDispatcherBody(self,
info,
operations,
declaration,
generate_call,
is_optional,
can_omit_type_check=lambda type, pos: False):
def GenerateCall(stmts_emitter, call_emitter, version, signature_index,
argument_count):
generate_call(stmts_emitter, call_emitter, version,
operations[signature_index], argument_count)
def IsOptional(signature_index, argument):
return is_optional(argument)
emitter = self._members_emitter
self._GenerateOverloadDispatcher(
info, [operation.arguments for operation in operations],
operations[0].type.id == 'void', declaration, GenerateCall,
IsOptional, emitter, can_omit_type_check)
def AdditionalImplementedInterfaces(self):
# TODO: Include all implemented interfaces, including other Lists.
implements = []
if self._interface_type_info.list_item_type():
item_type = self._type_registry.TypeInfo(
self._interface_type_info.list_item_type()).dart_type()
implements.append('List<%s>' % item_type)
return implements
def Mixins(self):
mixins = []
if self._interface_type_info.list_item_type():
item_type = self._type_registry.TypeInfo(
self._interface_type_info.list_item_type()).dart_type()
mixins.append('ListMixin<%s>' % item_type)
mixins.append('ImmutableListMixin<%s>' % item_type)
return mixins
def AddConstructors(self, constructors, factory_name,
factory_constructor_name):
""" Adds all of the constructors.
Arguments:
constructors - List of the constructors to be added.
factory_name - Name of the factory for this class.
factory_constructor_name - The name of the constructor on the
factory_name to call (calls an autogenerated FactoryProvider
if unspecified)
"""
for constructor_info in constructors:
self._AddConstructor(constructor_info, factory_name,
factory_constructor_name)
def _AddConstructor(self, constructor_info, factory_name,
factory_constructor_name):
# Hack to ignore the constructor used by JavaScript.
if ((self._interface.id == 'HTMLImageElement' or
self._interface.id == 'Blob' or
self._interface.id == 'DOMException') and
not constructor_info.pure_dart_constructor):
return
if self.GenerateCustomFactory(constructor_info):
return
metadata = self._metadata.GetFormattedMetadata(
self._library_name, self._interface, self._interface.id, ' ')
if not factory_constructor_name:
factory_constructor_name = '_create'
factory_parameters = constructor_info.ParametersAsArgumentList()
else:
factory_parameters = ', '.join(constructor_info.factory_parameters)
def InputType(type_name):
conversion = self._InputConversion(type_name,
constructor_info.declared_name)
if conversion:
return conversion.input_type
else:
return self._NarrowInputType(
type_name) if type_name else 'dynamic'
if constructor_info.pure_dart_constructor:
# TODO(antonm): use common dispatcher generation for this case as well.
has_optional = any(param_info.is_optional
for param_info in constructor_info.param_infos)
factory_call = self.MakeFactoryCall(
factory_name, factory_constructor_name, factory_parameters,
constructor_info)
if not has_optional:
self._members_emitter.Emit(
'\n $(METADATA)'
'factory $CTOR($PARAMS) => '
'$FACTORY_CALL;\n',
CTOR=constructor_info._ConstructorFullName(self._DartType),
PARAMS=constructor_info.ParametersAsDeclaration(InputType),
FACTORY_CALL=factory_call,
METADATA=metadata)
else:
inits = self._members_emitter.Emit(
'\n $(METADATA)'
'factory $CONSTRUCTOR($PARAMS) {\n'
' $CONSTRUCTOR e = $FACTORY_CALL;\n'
'$!INITS'
' return e;\n'
' }\n',
CONSTRUCTOR=constructor_info._ConstructorFullName(
self._DartType),
METADATA=metadata,
FACTORY_CALL=factory_call,
PARAMS=constructor_info.ParametersAsDeclaration(InputType))
for index, param_info in enumerate(
constructor_info.param_infos):
if param_info.is_optional:
inits.Emit(
' if ($E != null) e.$E = $E;\n',
E=param_info.name)
else:
custom_factory_ctr = self._interface.id in _custom_factories
if self._interface_type_info.has_generated_interface():
constructor_full_name = constructor_info._ConstructorFullName(
self._DartType)
else:
# The interface is suppress_interface so use the implementation_name not
# the dart_type.
constructor_full_name = self._interface_type_info.implementation_name(
)
factory_name = constructor_full_name
def GenerateCall(stmts_emitter, call_emitter, version,
signature_index, argument_count):
name = emitter.Format('_create_$VERSION', VERSION=version)
arguments = constructor_info.idl_args[
signature_index][:argument_count]
args = None
call_template = ''
if self._dart_use_blink:
type_ids = [p.type.id for p in arguments]
base_name, rs = \
self.DeriveNativeEntry("constructorCallback", 'Constructor', argument_count)
qualified_name = \
self.DeriveQualifiedBlinkName(self._interface.id,
base_name)
args = constructor_info.ParametersAsArgumentList(
argument_count)
# Handle converting Maps to Dictionaries, etc.
(factory_params, converted_arguments,
calling_params) = self._ConvertArgumentTypes(
stmts_emitter, arguments, argument_count,
constructor_info)
args = ', '.join(converted_arguments)
call_template = '$FACTORY_NAME($FACTORY_PARAMS)'
else:
qualified_name = emitter.Format(
'$FACTORY.$NAME', FACTORY=factory_name, NAME=name)
(factory_params, converted_arguments,
calling_params) = self._ConvertArgumentTypes(
stmts_emitter, arguments, argument_count,
constructor_info)
args = ', '.join(converted_arguments)
call_template = '$FACTORY_NAME($FACTORY_PARAMS)'
call_emitter.Emit(
call_template,
FACTORY_NAME=qualified_name,
FACTORY_PARAMS=args)
self.EmitStaticFactoryOverload(constructor_info, name,
arguments)
def IsOptional(signature_index, argument):
return self.IsConstructorArgumentOptional(argument)
entry_declaration = emitter.Format(
'$(METADATA)$FACTORY_KEYWORD $CTOR($PARAMS)',
FACTORY_KEYWORD=('factory' if not custom_factory_ctr else
'static %s' % constructor_full_name),
CTOR=(('' if not custom_factory_ctr else '_factory') +
constructor_full_name),
METADATA=metadata,
PARAMS=constructor_info.ParametersAsDeclaration(InputType))
overload_emitter = self._members_emitter
overload_declaration = entry_declaration
self._GenerateOverloadDispatcher(constructor_info,
constructor_info.idl_args, False,
overload_declaration, GenerateCall,
IsOptional, overload_emitter)
def _AddFutureifiedOperation(self, info, html_name):
"""Given a API function that uses callbacks, convert it to using Futures.
This conversion assumes the success callback is always provided before the
error callback (and so far in the DOM API, this is the case)."""
callback_info = GetCallbackInfo(
self._database.GetInterface(info.callback_args[0].type_id))
# Generated private members never have named arguments.
ignore_named_parameters = True if html_name.startswith('_') else False
# If more than one callback then the second argument is the error callback.
# Some error callbacks have 2 args (e.g., executeSql) where the second arg
# is the error - this is the argument we want.
error_callback = ""
if len(info.callback_args) > 1:
error_callback_info = GetCallbackInfo(
self._database.GetInterface(info.callback_args[1].type_id))
error_callbackNames = []
for paramInfo in error_callback_info.param_infos:
error_callbackNames.append(paramInfo.name)
errorCallbackVariables = ", ".join(error_callbackNames)
errorName = error_callback_info.param_infos[-1].name
error_callback = (
',\n %s(%s) { completer.completeError(%s); }' % (
('%s : ' % info.callback_args[1].name
if info.requires_named_arguments and
info.callback_args[1].is_optional and
not (ignore_named_parameters) else ''),
errorCallbackVariables, errorName))
extensions = GetDDC_Extension(self._interface, info.declared_name)
if extensions:
ddc_extensions = "\n".join(extensions)
else:
ddc_extensions = ''
# Some callbacks have more than one parameters. If so use all of
# those parameters. However, if more than one argument use the
# type of the last argument to be returned e.g., executeSql the callback
# is (transaction, resultSet) and only the resultSet is returned SqlResultSet.
callbackArgsLen = len(callback_info.param_infos)
future_generic = ''
callbackVariables = ''
completerVariable = ''
if callbackArgsLen == 1:
callbackVariables = 'value'
completerVariable = callbackVariables
if callback_info.param_infos[0].type_id:
future_generic = '<%s>' % self._DartType(
callback_info.param_infos[0].type_id)
elif callbackArgsLen > 1:
callbackNames = []
for paramInfo in callback_info.param_infos:
callbackNames.append(paramInfo.name)
callbackVariables = ",".join(callbackNames)
completerVariable = callbackNames[-1]
future_generic = '<%s>' % self._DartType(
callback_info.param_infos[-1].type_id)
param_list = info.ParametersAsArgumentList(None,
ignore_named_parameters)
dictionary_argument = info.dictionaryArgumentName()
convert_map = ''
if dictionary_argument is not None:
mapArg = dictionary_argument[0]
tempVariable = '%s_dict' % mapArg
mapArgOptional = dictionary_argument[1]
if not (extensions):
if not (param_list.endswith(', mapArg') or
param_list.endswith(', options') or
param_list == mapArg):
print "ERROR: %s.%s - Last parameter or only parameter %s is not of type Map" % (
self._interface.id, html_name, mapArg)
param_list = '%s_dict' % param_list
if mapArgOptional:
convert_map = ' var %s = null;\n'\
' if (%s != null) {\n'\
' %s = convertDartToNative_Dictionary(%s);\n'\
' }\n' % (tempVariable, mapArg, tempVariable, mapArg)
else:
convert_map = ' var %s = convertDartToNative_Dictionary(%s);\n' % (
tempVariable, mapArg)
metadata = ''
if '_RenamingAnnotation' in dir(self):
metadata = (
self._RenamingAnnotation(info.declared_name, html_name) +
self._Metadata(info.type_name, info.declared_name, None))
self._members_emitter.Emit(
'\n'
' $METADATA$MODIFIERS$TYPE$FUTURE_GENERIC $NAME($PARAMS) {\n'
' $CONVERT_DICTIONARY'
' var completer = new Completer$(FUTURE_GENERIC)();\n'
' $ORIGINAL_FUNCTION($PARAMS_LIST\n'
' $NAMED_PARAM($VARIABLE_NAME) { '
'$DDC_EXTENSION\n'
'completer.complete($COMPLETER_NAME); }'
'$ERROR_CALLBACK);\n'
' return completer.future;\n'
' }\n',
METADATA=metadata,
MODIFIERS='static ' if info.IsStatic() else '',
TYPE=self.SecureOutputType(info.type_name),
NAME=html_name[1:],
PARAMS=info.
ParametersAsDeclaration(self._NarrowInputType if '_NarrowInputType'
in dir(self) else self._DartType),
CONVERT_DICTIONARY=convert_map,
PARAMS_LIST='' if param_list == '' else param_list + ',',
NAMED_PARAM=('%s : ' % info.callback_args[0].name
if info.requires_named_arguments and
info.callback_args[0].is_optional and
not (ignore_named_parameters) else ''),
VARIABLE_NAME=callbackVariables,
COMPLETER_NAME=completerVariable,
DDC_EXTENSION=ddc_extensions,
ERROR_CALLBACK=error_callback,
FUTURE_GENERIC=future_generic,
ORIGINAL_FUNCTION=html_name)
def EmitHelpers(self, base_class):
if not self._members_emitter:
return
if self._interface.id not in custom_html_constructors:
self._members_emitter.Emit(
' // To suppress missing implicit constructor warnings.\n'
' factory $CLASSNAME._() { '
'throw new UnsupportedError("Not supported"); }\n',
CLASSNAME=self._interface_type_info.implementation_name())
def DeclareAttribute(self, attribute, type_name, attr_name, read_only):
""" Declares an attribute but does not include the code to invoke it.
"""
if read_only:
# HACK(terry): Element is not abstract for Dartium so isContentEditable
# must have a body see impl_Element.darttemplate
if (self._interface.id == 'Element' and
attr_name == 'isContentEditable' and self._dart_js_interop):
return
else:
template = '\n $TYPE get $NAME;\n'
else:
template = '\n $TYPE $NAME;\n'
self._members_emitter.Emit(template, NAME=attr_name, TYPE=type_name)
def DeclareOperation(self, operation, return_type_name, method_name):
""" Declares an operation but does not include the code to invoke it.
Arguments:
operation - The operation to be declared.
return_type_name - The name of the return type.
method_name - The name of the method.
"""
# HACK(terry): Element is not abstract for Dartium so click
# must have a body see impl_Element.darttemplate
if (self._interface.id == 'Element' and method_name == 'click' and
self._dart_js_interop):
return
else:
template = '\n $TYPE $NAME($PARAMS);\n'
self._members_emitter.Emit(
template,
TYPE=return_type_name,
NAME=method_name,
PARAMS=operation.ParametersAsDeclaration(self._DartType))
def EmitListMixin(self, element_name, nullable):
# TODO(sra): Use separate mixins for mutable implementations of List<T>.
# TODO(sra): Use separate mixins for typed array implementations of List<T>.
template_file = 'immutable_list_mixin.darttemplate'
has_length = False
has_length_setter = False
def _HasExplicitIndexedGetter(self):
return any(op.id == 'getItem' for op in self._interface.operations)
def _HasCustomIndexedGetter(self):
return 'CustomIndexedGetter' in self._interface.ext_attrs
def _HasNativeIndexedGetter(self):
return not (_HasCustomIndexedGetter(self) or
_HasExplicitIndexedGetter(self))
if _HasExplicitIndexedGetter(self):
getter_name = 'getItem'
else:
getter_name = '_nativeIndexedGetter'
for attr in self._interface.attributes:
if attr.id == 'length':
has_length = True
has_length_setter = not attr.is_read_only
has_num_items = any(
attr.id == 'numberOfItems' for attr in self._interface.attributes)
template = self._template_loader.Load(
template_file, {
'DEFINE_LENGTH_AS_NUM_ITEMS':
not has_length and has_num_items,
'DEFINE_LENGTH_SETTER':
not has_length_setter,
'USE_NATIVE_INDEXED_GETTER':
_HasNativeIndexedGetter(self) or
_HasExplicitIndexedGetter(self),
})
if nullable:
element_js = element_name + "|Null"
else:
element_js = element_name
self._members_emitter.Emit(
template, E=element_name, EJS=element_js, GETTER=getter_name)
def SecureOutputType(self,
type_name,
is_dart_type=False,
can_narrow_type=False,
nullable=False):
""" Converts the type name to the secure type name for return types.
Arguments:
can_narrow_type - True if the output type can be narrowed further than
what would be accepted for input, used to narrow num APIs down to double
or int.
"""
if is_dart_type:
dart_name = type_name
else:
type_info = self._TypeInfo(type_name)
dart_name = type_info.dart_type()
if can_narrow_type and dart_name == 'num':
dart_name = type_info.native_type()
# We only need to secure Window. Only local History and Location are
# returned in generated code.
assert (dart_name != 'HistoryBase' and dart_name != 'LocationBase')
if dart_name == 'Window':
return _secure_base_types[dart_name]
if self._nnbd and nullable:
dart_name = dart_name + '?'
return dart_name
def SecureBaseName(self, type_name):
if type_name in _secure_base_types:
return _secure_base_types[type_name]
def is_DOM_type(self, type_name):
try:
self._type_registry.TypeInfo(type_name)
return True
except RuntimeError:
return False
def _NarrowToImplementationType(self, type_name):
return self._type_registry.TypeInfo(type_name).narrow_dart_type()
def _NarrowInputType(self, type_name):
return self._NarrowToImplementationType(type_name)
def _DartType(self, type_name):
return self._type_registry.DartType(type_name)
def _TypeInfo(self, type_name):
return self._type_registry.TypeInfo(type_name)
def _CallbackConvert(self, argType, info):
if self._database.HasInterface(argType):
interface = self._database.GetInterface(argType)
if "Callback" in interface.ext_attrs:
return interface.ext_attrs['Callback']
return None
def _ConvertArgumentTypes(self, stmts_emitter, arguments, argument_count,
info):
temp_version = [0]
converted_arguments = []
target_parameters = []
calling_parameters = []
for position, arg in enumerate(arguments[:argument_count]):
callBackInfo = self._CallbackConvert(
arg.type.id, info) # Returns callback arity (# of parameters)
if callBackInfo is None:
conversion = self._InputConversion(arg.type.id,
info.declared_name)
else:
conversion = self._InputConversion('Callback',
info.declared_name)
param_name = arguments[position].id
if conversion:
temp_version[0] += 1
temp_name = '%s_%s' % (param_name, temp_version[0])
temp_type = conversion.output_type
stmts_emitter.Emit(
'$(INDENT)$TYPE $NAME = $CONVERT($ARG);\n'
if callBackInfo is None else
'$(INDENT)$TYPE $NAME = $CONVERT($ARG, $ARITY);\n',
TYPE=TypeOrVar(temp_type),
NAME=temp_name,
CONVERT=conversion.function_name,
ARG=info.param_infos[position].name,
ARITY=callBackInfo)
converted_arguments.append(temp_name)
param_type = temp_type
verified_type = temp_type # verified by assignment in checked mode.
else:
converted_arguments.append(info.param_infos[position].name)
if self._database.HasTypeDef(arg.type.id):
param_type = 'dynamic'
else:
param_type = self._NarrowInputType(arg.type.id)
# Verified by argument checking on entry to the dispatcher.
verified_type = self._InputType(
info.param_infos[position].type_id, info)
# The native method does not need an argument type if we know the type.
# But we do need the native methods to have correct function types, so
# be conservative.
if param_type == verified_type:
if param_type in [
'String', 'num', 'int', 'double', 'bool',
'Object'
]:
param_type = 'dynamic'
target_parameters.append(
'%s%s' % (TypeOrNothing(param_type, nullable=arg.type.nullable),
param_name))
calling_parameters.append(',%s ' % param_name)
return target_parameters, converted_arguments, calling_parameters
def _InputType(self, type_name, info):
conversion = self._InputConversion(type_name, info.declared_name)
if conversion:
return conversion.input_type
else:
# If typedef it's a union return dynamic.
if self._database.HasTypeDef(type_name):
return 'dynamic'
else:
return self._NarrowInputType(
type_name) if type_name else 'dynamic'
|
py | 1a5388a257a024c06475a53c86c2eadd9955d5d1 | class RadialDistortion():
"""
Mix-in for sensors that use a radial distortion model.
"""
@property
def usgscsm_distortion_model(self):
"""
Expects odtk to be defined. This should be a list containing
the radial distortion coefficients
Returns
-------
: dict
Dictionary containing the usgscsm distortion model
"""
return {
"radial": {
"coefficients" : self.odtk
}
}
class NoDistortion():
"""
Mix-in for sensors and data sets that do not have a distortion model.
"""
@property
def usgscsm_distortion_model(self):
"""
Returns the specification for no distortion in usgscsm.
Returns
-------
: dict
Dictionary containing the usgscsm specification for no distortion.
"""
return {"radial": {"coefficients": [0.0, 0.0, 0.0]}}
|
py | 1a538993512f873a5a3aa59e701384bcb0017ed3 | import thingspeak
import time
import Adafruit_DHT
import Adafruit_BMP.BMP085 as BMP085
channel_id = 992285 # PUT CHANNEL ID HERE
write_key = 'EV3BMJDLOVOTMDR0' # PUT YOUR WRITE KEY HERE
read_key = 'HNUY60YMJJXVG6KS' # PUT YOUR READ KEY HER
DHT_SENSOR = Adafruit_DHT.DHT22
DHT_PIN = 4
def muestreo():
try:
humidity, temperature = Adafruit_DHT.read_retry(DHT_SENSOR, DHT_PIN)
sensor = BMP085.BMP085()
#print("Temperatura={0:0.1f}*C\nHumedad={1:0.1f}%".format(temperature, humidity))
time.sleep(20)
T1 = temperature
H1 = humidity
P1 = sensor.read_pressure()
time.sleep(20)
T2 = temperature
H2 = humidity
P2 = sensor.read_pressure()
time.sleep(20)
T3 = temperature
H3 = humidity
P3 = sensor.read_pressure()
SumT = T1+T2+T3
SumH = H1+H2+H3
SumP = P1+P2+P3
Temperatura = SumT/3
Humedad = SumH/3
Presion = SumP/3
f = open('datos.csv','a+') # w : writing mode / r : reading mode / a : appending mode
#f.write('T:''{}'.format(Temperatura))
#f.write('Date,Time,Temperatura,Humedad,Presion\r\n')
f.write('{0},{1},{2:0.1f}*C,{3:0.1f}%,{4:0.1f}Pa\r\n'.format(time.strftime('%m/%d/%y'), time.strftime('%H:%M'), Temperatura,Humedad,Presion))
#f.write('H:''{}'.format(Humedad))
#f.write('P:''{}'.format(Presion))
#f.close()
print "Temperatura={0:0.1f}*C".format(Temperatura)
print "Humedad={0:0.1f}%".format(Humedad)
#print 'Temp = {0:0.2f} *C'.format(sensor.read_temperature())
print 'Presion = {0:0.2f} Pa'.format(Presion)
#print 'Altitude = {0:0.2f} m'.format(sensor.read_altitude())
#print 'Sealevel Pressure = {0:0.2f} Pa'.format(sensor.read_sealevel_pressure())
# write
#response = channel.update({'field1': Temperatura, 'field2': Humedad, 'field3': Presion})
except:
print("Error")
if __name__ == "__main__":
#channel = thingspeak.Channel(id=channel_id, write_key=write_key, api_key=read_key)
#while True:
muestreo()
# free account has an api limit of 15sec
#while True:
# humidity, temperature = Adafruit_DHT.read_retry(DHT_SENSOR, DHT_PIN)
# if humidity is not None and temperature is not None:
# print("Temperatura={0:0.1f}*C\nHumedad={1:0.1f}%".format(temperature, humidity))
#print 'Temperatura={0:0.1f}*C'.format(temperature)
#print 'Humedad={1:0.1f}%'.format(humidity)
# sensor = BMP085.BMP085()
#print 'Temp = {0:0.2f} *C'.format(sensor.read_temperature())
# print 'Presion = {0:0.2f} Pa'.format(sensor.read_pressure())
#print 'Altitude = {0:0.2f} m'.format(sensor.read_altitude())
#print 'Sealevel Pressure = {0:0.2f} Pa'.format(sensor.read_sealevel_pressure())
# time.sleep(15)
# else:
# print("Failed to retrieve data from humidity sensor")
|
py | 1a5389d86f742ed4eccf775e5cb672d19b0c714f | import sys, os, re, time, glob
import alignlib
import ProfileLibrary
from AddaModule import AddaModuleRecord
import AddaIO
import SegmentedFile
import Experiment as E
class AddaProfiles( AddaModuleRecord ):
"""build a profile library from the pairsdb graph.
input
``files:input_graph``: the pairwise alignment graph
output
``files:output_profiles``: a profile library (:class:`ProfileLibrary.ProfileLibrary`)
"""
mName = "Profiles"
def __init__(self, *args, **kwargs ):
AddaModuleRecord.__init__( self, *args, **kwargs )
self.mFilenameProfile = self.mConfig.get( "files", "output_profiles", "adda.profiles" )
self.mScaleFactor = self.mConfig.get( "profiles", "scale_factor", 0.3 )
self.mMaxNumNeighbours = self.mConfig.get( "profiles", "max_neighbours", 1000)
self.mMaxEvalue = self.mConfig.get( "profiles", "max_evalue", 0.0)
self.mPrepareProfile = self.mConfig.get( "profiles", "prepare_profile", False )
def isComplete( self ):
fn, fi = ProfileLibrary.getFileNames( self.mFilenameProfile + self.getSlice() )
return SegmentedFile.isComplete( fi )
def startUp( self ):
if self.isComplete(): return
if self.mAppend:
self.mProfileLibrary = ProfileLibrary.ProfileLibrary( self.mFilenameProfile + self.getSlice(),
"a" )
self.mContinueAt = self.mProfileLibrary.getLastInsertedKey()
self.info("processing will continue after %s" % (str( self.mContinueAt ) ) )
else:
self.mProfileLibrary = ProfileLibrary.ProfileLibrary( self.mFilenameProfile + self.getSlice(),
"w",
force=self.mForce )
# set default values
self.mProfileLibrary.setLogOddor( alignlib.makeLogOddorDirichlet( self.mScaleFactor ) )
self.mProfileLibrary.setRegularizor( alignlib.makeRegularizorDirichletPrecomputed() )
self.mProfileLibrary.setWeightor( alignlib.makeWeightor() )
alignlib.setDefaultEncoder( alignlib.getEncoder( alignlib.Protein20 ) )
def buildMali(self, query_nid, neighbours ):
"""build a multiple alignment from a set of neighbours.
"""
# build multiple alignment
mali = alignlib.makeMultipleAlignment()
query_sequence = self.mFasta.getSequence( query_nid )
mali.add( alignlib.makeAlignatum( query_sequence ) )
qseq = alignlib.makeSequence( query_sequence )
alignator = alignlib.makeAlignatorDPFull( alignlib.ALIGNMENT_LOCAL,
-10, -2)
nskipped = 0
for n in neighbours[:self.mMaxNumNeighbours]:
if n.mSbjctToken == query_nid: continue
if n.mEvalue > self.mMaxEvalue:
nskipped += 1
continue
sequence = self.mFasta.getSequence( n.mSbjctToken )
E.debug( "adding %s" % str(n) )
map_query2sbjct = n.getAlignment()
if map_query2sbjct == None:
sseq = alignlib.makeSequence( sequence )
qseq.useSegment( n.mQueryFrom, n.mQueryTo )
sseq.useSegment( n.mSbjctFrom, n.mSbjctTo )
map_query2sbjct = alignlib.makeAlignmentVector()
alignator.align( map_query2sbjct, qseq, sseq )
if map_query2sbjct.getLength() == 0:
self.warn( "empty alignment: %s" % str( n ) )
nskipped += 1
continue
if map_query2sbjct.getRowTo() > len(query_sequence):
self.warn( "alignment out of bounds for query: %i>%i, line=%s" %\
(map_query2sbjct.getRowTo(), len(query_sequence), str(n)))
nskipped += 1
continue
elif map_query2sbjct.getColTo() > len(sequence):
self.warn( "alignment out of bounds for sbjct: %i>%i, line=%s" %\
(map_query2sbjct.getColTo(), len(sequence), str(n)))
nskipped += 1
continue
try:
mali.add( alignlib.makeAlignatum( sequence ),
map_query2sbjct,
mali_is_in_row = True,
insert_gaps_mali = False,
insert_gaps_alignatum = True,
use_end_mali = True,
use_end_alignatum = False )
except RuntimeError, msg:
self.warn( "problem when building alignment for %s: msg=%s" % (str(n), msg))
nskipped += 1
continue
if E.getLogLevel() >= 6:
x = 1
outfile = open( "mali_%s" % query_nid, "w" )
for line in str(mali).split("\n"):
try:
a,b,c = line.split("\t")
except ValueError:
continue
outfile.write( ">%06i\n%s\n" % (x,b) )
x += 1
outfile.close()
if nskipped > 0:
self.warn( "nid %s: %i/%i alignments skipped" % (str(query_nid),
nskipped,
min( len(neighbours), self.mMaxNumNeighbours ) ) )
return mali
#------------------------------------------------------------------
def applyMethod(self, neighbours ):
"""output the graph.
If mMergeRepeats is set, consecutive links are merged.
Links are consecutive if they are adjacent both in the
query and in the sbjct.
This ensures that 1:many repeats are not merged, but will
cover alignments split by transmembrane regions.
"""
if self.mContinueAt:
if neighbours.mQueryToken == self.mContinueAt:
self.info("continuing processing at %s" % str(self.mContinueAt ) )
self.mContinueAt = None
return
query_nid = neighbours.mQueryToken
self.debug( "working on profile %s with %i neighbours" % (query_nid, len(neighbours.mMatches) ) )
mali = self.buildMali( query_nid, neighbours.mMatches )
self.debug( "built mali for %s with %i neighbours" % (query_nid, len(neighbours.mMatches) ) )
profile = alignlib.makeProfile( mali )
self.debug( "built profile for %s with %i neighbours" % (query_nid, len(neighbours.mMatches) ) )
profile.setStorageType( alignlib.Sparse )
if self.mPrepareProfile:
profile.prepare()
self.debug( "prepared profile for %s with %i neighbours" % (query_nid, len(neighbours.mMatches) ) )
self.mProfileLibrary.add( query_nid, profile )
self.debug( "saved profile for %s with %i neighbours" % (query_nid, len(neighbours.mMatches) ) )
#------------------------------------------------------------------
def finish( self ):
"""finish processing.
add entries for sequences who only appear in the sbjct field.
"""
if not self.isSubset():
nids = self.mFasta.getContigSizes().keys()
nadded = 0
for nid in sorted(nids):
if nid not in self.mProfileLibrary:
self.applyMethod( AddaIO.NeighboursRecord( nid, [] ) )
nadded += 1
self.mOutput += nadded
self.info( "added %i profiles for sequences without neighbours" % nadded )
self.mProfileLibrary.close()
AddaModuleRecord.finish(self)
#--------------------------------------------------------------------------
def merge(self):
"""merge runs from parallel computations.
returns true if merging was succecss.
"""
if self.isComplete(): return
infiles = glob.glob( "%s*" % self.mFilenameProfile )
# remove suffixes
infiles = list(set([ x[:-4] for x in infiles if x != self.mFilenameProfile ]))
infiles.sort()
last_nid = None
found = set()
ninput, noutput, nfound, nunknown, nduplicate = 0, 0, 0, 0, 0
tokens = set(self.mFasta.keys())
self.mProfileLibrary = ProfileLibrary.ProfileLibrary( self.mFilenameProfile,
"w" )
for filename in infiles:
infile = ProfileLibrary.ProfileLibrary( filename, "r" )
for nid, profile in infile.iteritems_sorted():
ninput += 1
if nid in found:
nduplicates += 1
self.warn("duplicate nid: %i in file %s" % (nid, filename))
if nid not in tokens:
nunknown += 1
self.warn("unknown nid: %i in file %s" % (nid, filename))
found.add(nid)
nfound += 1
self.mProfileLibrary.add( nid, profile )
noutput += 1
missing = tokens.difference( found )
if len(missing) > 0:
self.warn( "the following nids were missing: %s" % str(missing) )
self.info( "adding %i missing nids" % len(missing))
for nid in missing:
self.applyMethod( AddaIO.NeighboursRecord( nid, [] ) )
self.info( "merging: parts=%i, ninput=%i, noutput=%i, nfound=%i, nmissing=%i, nduplicate=%i, nunknown=%i" %\
(len(infiles), ninput, noutput, nfound, len(missing), nduplicate, nunknown ) )
self.info( "deleting %i parts" % len(infiles) )
for infile in infiles:
fn, fi = ProfileLibrary.getFileNames( infile )
os.remove( fn )
os.remove( fi )
return len(missing) == 0 and nduplicate == 0 and nunknown == 0
|
py | 1a5389d9e1548b6ad729db36c980b6a4c1b2ea20 | import os
import struct
import numpy as np
"""
Loosely inspired by http://abel.ee.ucla.edu/cvxopt/_downloads/mnist.py
which is GPL licensed.
"""
def read(dataset = "training", path = "."):
"""
Python function for importing the MNIST data set. It returns an iterator
of 2-tuples with the first element being the label and the second element
being a numpy.uint8 2D array of pixel data for the given image.
"""
if dataset is "training":
fname_img = os.path.join(path, 'train-images-idx3-ubyte')
fname_lbl = os.path.join(path, 'train-labels-idx1-ubyte')
elif dataset is "testing":
fname_img = os.path.join(path, 't10k-images-idx3-ubyte')
fname_lbl = os.path.join(path, 't10k-labels-idx1-ubyte')
else:
raise(ValueError, "dataset must be 'testing' or 'training'")
# Load everything in some numpy arrays
with open(fname_lbl, 'rb') as flbl:
magic, num = struct.unpack(">II", flbl.read(8))
lbl = np.fromfile(flbl, dtype=np.int8)
with open(fname_img, 'rb') as fimg:
magic, num, rows, cols = struct.unpack(">IIII", fimg.read(16))
img = np.fromfile(fimg, dtype=np.uint8).reshape(len(lbl), rows, cols)
get_img = lambda idx: (lbl[idx], img[idx])
# Create an iterator which returns each image in turn
for i in range(len(lbl)): # xrange in python 2.7, range in python 3.6
yield get_img(i)
|
py | 1a5389f4993a787676ecf3cef6eae46d4c56f22b | # -*- coding: utf-8 -*-
from south.db import db
from django.db import models
from django_lean.experiments.models import *
class Migration:
def forwards(self, orm):
# Rename model 'DailyReport' to 'DailyActivitReport'
db.rename_table('experiments_dailyreport', 'experiments_dailyactivityreport')
def backwards(self, orm):
# Rename back model 'DailyActivitReport' to 'DailyReport'
db.rename_table('experiments_dailyreport', 'experiments_dailyactivityreport')
models = {
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'unique': 'True'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '30', 'unique': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'experiments.anonymousvisitor': {
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'experiments.dailyactivityreport': {
'confidence': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'control_group_size': ('django.db.models.fields.IntegerField', [], {}),
'control_score': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['experiments.Experiment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'test_group_size': ('django.db.models.fields.IntegerField', [], {}),
'test_score': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'experiments.experiment': {
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'unique': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'experiments.goalrecord': {
'anonymous_visitor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['experiments.AnonymousVisitor']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'goal_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['experiments.GoalType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'experiments.goaltype': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'unique': 'True'})
},
'experiments.participant': {
'Meta': {'unique_together': "(('user', 'experiment'), ('anonymous_visitor', 'experiment'))"},
'anonymous_visitor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['experiments.AnonymousVisitor']", 'null': 'True', 'blank': 'True'}),
'enrollment_date': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['experiments.Experiment']"}),
'group': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
}
}
complete_apps = ['experiments']
|
py | 1a538a0bde8e874d68a0902f1b80ea67a01a6095 | #!/usr/bin/env python3
#
# Copyright (c) 2013-2022, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
# OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# // Author: Filippov Ilia
from collections import OrderedDict
from enum import Enum, auto
import re
import traceback
class SelfbuildType(Enum):
# not a selfbuild
SINGLE = auto()
# complete selfbuild
SELF = auto()
# first phase of selfbuild only
SELF_PHASE1 = auto()
# second phase of selfbuild only
SELF_PHASE2 = auto()
def alloy_error(line, error_type = 1):
global return_status
if error_type == 1:
return_status = 1
common.error(line, error_type)
def tail_and_save(file_in, file_out, tail = 100):
with open(file_in, 'r') as f_in:
lines = f_in.readlines()[-tail:]
with open(file_out, 'w') as f_out:
f_out.writelines(lines)
def setting_paths(llvm, ispc, sde):
if llvm != "":
os.environ["LLVM_HOME"]=llvm
if ispc != "":
os.environ["ISPC_HOME"]=ispc
if sde != "":
os.environ["SDE_HOME"]=sde
def get_sde():
sde_exe = ""
PATH_dir = os.environ["PATH"].split(os.pathsep)
if current_OS == "Windows":
sde_n = "sde.exe"
else:
sde_n = "sde"
for counter in PATH_dir:
if os.path.exists(counter + os.sep + sde_n) and sde_exe == "":
sde_exe = counter + os.sep + sde_n
if os.environ.get("SDE_HOME") != None:
if os.path.exists(os.environ.get("SDE_HOME") + os.sep + sde_n):
sde_exe = os.environ.get("SDE_HOME") + os.sep + sde_n
return sde_exe
def check_LLVM(which_LLVM):
answer = []
if which_LLVM[0] == " ":
return answer
p = os.environ["LLVM_HOME"]
for i in range(0,len(which_LLVM)):
if not os.path.exists(p + os.sep + "bin-" + which_LLVM[i] + os.sep + "bin"):
answer.append(which_LLVM[i])
return answer
def try_do_LLVM(text, command, from_validation, verbose=False):
print_debug("Command line: "+command+"\n", True, alloy_build)
if from_validation == True:
text = text + "\n"
print_debug("Trying to " + text, from_validation, alloy_build)
with subprocess.Popen(command, shell=True,universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as proc:
for line in proc.stdout:
print_debug(line, not verbose, alloy_build)
proc.wait()
exit_status = proc.returncode
if exit_status != 0:
print_debug("ERROR.\n", from_validation, alloy_build)
alloy_error("can't " + text, 1)
print_debug("DONE.\n", from_validation, alloy_build)
def checkout_LLVM(component, version_LLVM, target_dir, from_validation, verbose):
# Identify the component
GIT_REPO_BASE="https://github.com/llvm/llvm-project.git"
# Identify the version
# An example of using branch (instead of final tag) is the following (for 9.0):
# git: "origin/release/9.x"
if version_LLVM == "trunk":
GIT_TAG="main"
elif version_LLVM == "14_0":
GIT_TAG="origin/release/14.x"
elif version_LLVM == "13_0":
GIT_TAG="llvmorg-13.0.1"
elif version_LLVM == "12_0":
GIT_TAG="llvmorg-12.0.1"
elif version_LLVM == "11_1":
GIT_TAG="llvmorg-11.1.0"
elif version_LLVM == "11_0":
GIT_TAG="llvmorg-11.0.1"
elif version_LLVM == "10_0":
GIT_TAG="llvmorg-10.0.1"
elif version_LLVM == "9_0":
GIT_TAG="llvmorg-9.0.1"
elif version_LLVM == "8_0":
GIT_TAG="llvmorg-8.0.1"
elif version_LLVM == "7_1":
GIT_TAG="llvmorg-7.1.0"
elif version_LLVM == "7_0":
GIT_TAG="llvmorg-7.0.1"
elif version_LLVM == "6_0":
GIT_TAG="llvmorg-6.0.1"
else:
alloy_error("Unsupported llvm version: " + version_LLVM, 1)
try_do_LLVM("clone "+component+" from "+GIT_REPO_BASE+" to "+target_dir+" ",
"git clone "+GIT_REPO_BASE+" "+target_dir,
from_validation, verbose)
if GIT_TAG != "main":
os.chdir(target_dir)
try_do_LLVM("switch to "+GIT_TAG+" tag ",
"git checkout -b "+GIT_TAG+" "+GIT_TAG, from_validation, verbose)
os.chdir("..")
# ISPC uses LLVM dumps for debug output, so build correctly it requires these functions to be
# present in LLVM libraries. In LLVM 5.0 they are not there by default and require explicit enabling.
# In later version this functionality is triggered by enabling assertions.
def get_llvm_enable_dump_switch(version_LLVM):
return " -DLLVM_ENABLE_DUMP=ON "
def get_llvm_disable_assertions_switch(llvm_disable_assertions):
if llvm_disable_assertions == True:
return " -DLLVM_ENABLE_ASSERTIONS=OFF"
else:
return " -DLLVM_ENABLE_ASSERTIONS=ON"
def build_LLVM(version_LLVM, folder, debug, selfbuild, extra, from_validation, force, make, gcc_toolchain_path, llvm_disable_assertions, verbose):
print_debug("Building LLVM. Version: " + version_LLVM + ".\n", from_validation, alloy_build)
# Here we understand what and where do we want to build
current_path = os.getcwd()
llvm_home = os.environ["LLVM_HOME"]
make_sure_dir_exists(llvm_home)
FOLDER_NAME=version_LLVM
version_LLVM = re.sub('\.', '_', version_LLVM)
os.chdir(llvm_home)
if folder == "":
folder = FOLDER_NAME
if debug == True:
folder = folder + "dbg"
LLVM_SRC="llvm-" + folder
LLVM_BUILD="build-" + folder
LLVM_BIN="bin-" + folder
if os.path.exists(LLVM_BIN + os.sep + "bin") and not force:
alloy_error("you have folder " + LLVM_BIN + ".\nIf you want to rebuild use --force", 1)
LLVM_BUILD_selfbuild = LLVM_BUILD + "_temp"
LLVM_BIN_selfbuild = LLVM_BIN + "_temp"
# Selfbuild phase2 assumes that directories are already create, for all other cases, create them.
if selfbuild is SelfbuildType.SINGLE or selfbuild is SelfbuildType.SELF or selfbuild is SelfbuildType.SELF_PHASE1:
common.remove_if_exists(LLVM_SRC)
common.remove_if_exists(LLVM_BUILD)
common.remove_if_exists(LLVM_BIN)
if selfbuild is SelfbuildType.SELF or selfbuild is SelfbuildType.SELF_PHASE1:
common.remove_if_exists(LLVM_BUILD_selfbuild)
common.remove_if_exists(LLVM_BIN_selfbuild)
print_debug("Using folders: " + LLVM_SRC + " " + LLVM_BUILD + " " + LLVM_BIN + " in " +
llvm_home + "\n", from_validation, alloy_build)
# Starting from MacOS 10.9 Maverics, C and C++ library headers are part of the SDK, not the OS itself.
# System root must be specified during the compiler build, so the compiler knows the default location to search for headers.
# C headers are located at system root location, while C++ headers are part of the toolchain.
# I.e. specifying system root solved C header problem. For C++ headers we enable libc++ build as part of clang build (our own toolchain).
# Note that on Sierra there's an issue with using C headers from High Sierra SDK, which instantiates as compile error:
# error: 'utimensat' is only available on macOS 10.13 or newer
# This is due to using SDK targeting OS, which is newer than current one.
mac_system_root = ""
if current_OS == "MacOS" \
and int(current_OS_version.split(".")[0]) >= 13:
search_path = os.environ["PATH"].split(os.pathsep)
found_xcrun = False
for path in search_path:
if os.path.exists(os.path.join(path, "xcrun")):
found_xcrun = True
if found_xcrun:
mac_system_root = "`xcrun --show-sdk-path`"
else:
alloy_error("Can't find XCode (xcrun tool) - it's required on MacOS 10.9 and newer", 1)
# prepare configuration parameters
llvm_enable_projects = " -DLLVM_ENABLE_PROJECTS=\"clang"
if current_OS == "MacOS" and int(current_OS_version.split(".")[0]) >= 13:
# Starting with MacOS 10.9 Maverics, the system doesn't contain headers for standard C++ library and
# the default library is libc++, bit libstdc++. The headers are part of XCode now. But we are checking out
# headers as part of LLVM source tree, so they will be installed in clang location and clang will be able
# to find them. Though they may not match to the library installed in the system, but seems that this should
# not happen.
# Note, that we can also build a libc++ library, but it must be on system default location or should be passed
# to the linker explicitly (either through command line or environment variables). So we are not doing it
# currently to make the build process easier.
# We either need to explicitly opt-out from using libcxxabi from this repo, or build and use it,
# otherwise a build error will occure (attempt to use just built libcxxabi, which was not built).
# An option to build seems to be a better one.
llvm_enable_projects +=";libcxx;libcxxabi"
if current_OS == "Linux":
# OpenMP is needed for Xe enabled builds.
# Starting from Ubuntu 20.04 libomp-dev package doesn't install omp.h to default location.
llvm_enable_projects +=";openmp"
if extra == True:
llvm_enable_projects +=";compiler-rt;clang-tools-extra"
llvm_enable_projects += "\""
if selfbuild is SelfbuildType.SINGLE or selfbuild is SelfbuildType.SELF or selfbuild is SelfbuildType.SELF_PHASE1:
# clone llvm repo
checkout_LLVM("llvm", version_LLVM, LLVM_SRC, from_validation, verbose)
# patch llvm
os.chdir(LLVM_SRC)
patches = glob.glob(os.environ["ISPC_HOME"] + os.sep + "llvm_patches" + os.sep + "*.*")
for patch in patches:
if version_LLVM in os.path.basename(patch):
try_do_LLVM("patch LLVM with patch " + patch + " ", "git apply " + patch, from_validation, verbose)
os.chdir("../")
# configuring llvm and build for first phase of selfbuild
cmakelists_path = LLVM_SRC + "/llvm"
if selfbuild is SelfbuildType.SELF or selfbuild is SelfbuildType.SELF_PHASE1:
print_debug("Making selfbuild and use folders " + LLVM_BUILD_selfbuild + " and " +
LLVM_BIN_selfbuild + "\n", from_validation, alloy_build)
os.makedirs(LLVM_BUILD_selfbuild)
os.makedirs(LLVM_BIN_selfbuild)
os.chdir(LLVM_BUILD_selfbuild)
try_do_LLVM("configure release version for selfbuild ",
"cmake -G " + "\"" + generator + "\"" + " -DCMAKE_EXPORT_COMPILE_COMMANDS=ON" +
" -DCMAKE_INSTALL_PREFIX=" + llvm_home + "/" + LLVM_BIN_selfbuild +
" -DCMAKE_BUILD_TYPE=Release" +
llvm_enable_projects +
get_llvm_enable_dump_switch(version_LLVM) +
get_llvm_disable_assertions_switch(llvm_disable_assertions) +
" -DLLVM_INSTALL_UTILS=ON" +
((" -DGCC_INSTALL_PREFIX=" + gcc_toolchain_path) if gcc_toolchain_path != "" else "") +
((" -DCMAKE_C_COMPILER=" + gcc_toolchain_path+"/bin/gcc") if gcc_toolchain_path != "" else "") +
((" -DCMAKE_CXX_COMPILER=" + gcc_toolchain_path+"/bin/g++") if gcc_toolchain_path != "" else "") +
((" -DDEFAULT_SYSROOT=" + mac_system_root) if mac_system_root != "" else "") +
" -DLLVM_TARGETS_TO_BUILD=AArch64\;ARM\;X86" +
" -DLLVM_EXPERIMENTAL_TARGETS_TO_BUILD=WebAssembly" +
" ../" + cmakelists_path,
from_validation, verbose)
try_do_LLVM("build release version for selfbuild ", make, from_validation, verbose)
try_do_LLVM("install release version for selfbuild ", "make install", from_validation, verbose)
os.chdir("../")
# set compiler to use if this is selfbuild
selfbuild_compiler = ""
if selfbuild is SelfbuildType.SELF or selfbuild is SelfbuildType.SELF_PHASE2:
selfbuild_compiler = (" -DCMAKE_C_COMPILER=" +llvm_home+ "/" + LLVM_BIN_selfbuild + "/bin/clang " +
" -DCMAKE_CXX_COMPILER="+llvm_home+ "/" + LLVM_BIN_selfbuild + "/bin/clang++ ")
print_debug("Use compiler for selfbuild: " + selfbuild_compiler + "\n", from_validation, alloy_build)
# configure and build for regular build or second phase of selfbuild
if selfbuild is SelfbuildType.SINGLE or selfbuild is SelfbuildType.SELF or selfbuild is SelfbuildType.SELF_PHASE2:
os.makedirs(LLVM_BUILD)
os.makedirs(LLVM_BIN)
os.chdir(LLVM_BUILD)
build_type = "Release" if debug == False else "Debug"
if current_OS != "Windows":
try_do_LLVM("configure " + build_type + " version ",
"cmake -G " + "\"" + generator + "\"" + " -DCMAKE_EXPORT_COMPILE_COMMANDS=ON" +
selfbuild_compiler +
" -DCMAKE_INSTALL_PREFIX=" + llvm_home + "/" + LLVM_BIN +
" -DCMAKE_BUILD_TYPE=" + build_type +
llvm_enable_projects +
get_llvm_enable_dump_switch(version_LLVM) +
get_llvm_disable_assertions_switch(llvm_disable_assertions) +
" -DLLVM_INSTALL_UTILS=ON" +
((" -DGCC_INSTALL_PREFIX=" + gcc_toolchain_path) if gcc_toolchain_path != "" else "") +
((" -DCMAKE_C_COMPILER=" + gcc_toolchain_path+"/bin/gcc") if gcc_toolchain_path != "" and selfbuild_compiler == "" else "") +
((" -DCMAKE_CXX_COMPILER=" + gcc_toolchain_path+"/bin/g++") if gcc_toolchain_path != "" and selfbuild_compiler == "" else "") +
((" -DDEFAULT_SYSROOT=" + mac_system_root) if mac_system_root != "" else "") +
" -DLLVM_TARGETS_TO_BUILD=AArch64\;ARM\;X86" +
" -DLLVM_EXPERIMENTAL_TARGETS_TO_BUILD=WebAssembly" +
" ../" + cmakelists_path,
from_validation, verbose)
else:
try_do_LLVM("configure " + build_type + " version ",
'cmake -Thost=x64 -G ' + '\"' + generator + '\"' + ' -DCMAKE_INSTALL_PREFIX="..\\'+ LLVM_BIN + '" ' +
' -DCMAKE_BUILD_TYPE=' + build_type +
llvm_enable_projects +
get_llvm_enable_dump_switch(version_LLVM) +
get_llvm_disable_assertions_switch(llvm_disable_assertions) +
' -DLLVM_INSTALL_UTILS=ON' +
' -DLLVM_TARGETS_TO_BUILD=AArch64\;ARM\;X86' +
' -DLLVM_EXPERIMENTAL_TARGETS_TO_BUILD=WebAssembly' +
' -DLLVM_LIT_TOOLS_DIR="C:\\gnuwin32\\bin" ..\\' + cmakelists_path,
from_validation, verbose)
# building llvm
if current_OS != "Windows":
try_do_LLVM("build LLVM ", make, from_validation, verbose)
try_do_LLVM("install LLVM ", "make install", from_validation, verbose)
else:
try_do_LLVM("build LLVM and then install LLVM ", "msbuild INSTALL.vcxproj /V:m /p:Platform=x64 /p:Configuration=" + build_type + " /t:rebuild", from_validation, verbose)
os.chdir(current_path)
def unsupported_llvm_targets(LLVM_VERSION):
prohibited_list = {"6.0":["avx512skx-x8", "avx512skx-x4", "avx512skx-x64", "avx512skx-x32"],
"7.0":["avx512skx-x8", "avx512skx-x4", "avx512skx-x64", "avx512skx-x32"],
"8.0":["avx512skx-x64", "avx512skx-x32"],
"9.0":["avx512skx-x64", "avx512skx-x32"]
}
if LLVM_VERSION in prohibited_list:
return prohibited_list[LLVM_VERSION]
return []
# Split targets into categories: native, sde.
# native - native targets run natively on current hardware.
# sde - native target, which need to be emulated on current hardware.
def check_targets():
result = []
result_sde = []
# check what native targets do we have
if current_OS != "Windows":
if options.ispc_build_compiler == "clang":
cisa_compiler = "clang"
elif options.ispc_build_compiler == "gcc":
cisa_compiler = "g++"
try_do_LLVM("build check_ISA", cisa_compiler + " check_isa.cpp -o check_isa.exe", True)
else:
try_do_LLVM("build check_ISA", "cl check_isa.cpp", True)
# Dictionary mapping hardware architecture to its targets.
# The value in the dictionary is:
# [
# list of targets corresponding to this architecture,
# list of other architecture executable on this hardware,
# flag for sde to emulate this platform,
# flag is this is supported on current platform
# ]
target_dict = OrderedDict([
("SSE2", [["sse2-i32x4", "sse2-i32x8"],
["SSE2"], "-p4", False]),
("SSE4", [["sse4-i32x4", "sse4-i32x8", "sse4-i16x8", "sse4-i8x16"],
["SSE2", "SSE4"], "-wsm", False]),
("AVX", [["avx1-i32x4", "avx1-i32x8", "avx1-i32x16", "avx1-i64x4"],
["SSE2", "SSE4", "AVX"], "-snb", False]),
("AVX2", [["avx2-i32x4", "avx2-i32x8", "avx2-i32x16", "avx2-i64x4", "avx2-i8x32", "avx2-i16x16"],
["SSE2", "SSE4", "AVX", "AVX2"], "-hsw", False]),
("KNL", [["avx512knl-x16"],
["SSE2", "SSE4", "AVX", "AVX2", "KNL"], "-knl", False]),
("SKX", [["avx512skx-x16", "avx512skx-x8", "avx512skx-x4", "avx512skx-x64", "avx512skx-x32"],
["SSE2", "SSE4", "AVX", "AVX2", "SKX"], "-skx", False])
])
hw_arch = take_lines("check_isa.exe", "first").split()[1]
if not (hw_arch in target_dict):
alloy_error("Architecture " + hw_arch + " was not recognized", 1)
# Mark all compatible architecutres in the dictionary.
for compatible_arch in target_dict[hw_arch][1]:
target_dict[compatible_arch][3] = True
# Now initialize result and result_sde.
for key in target_dict:
item = target_dict[key]
targets = item[0]
if item[3]:
# Supported natively
result = result + targets
else:
# Supported through SDE
for target in targets:
result_sde = result_sde + [[item[2], target]]
# now check what targets we have with the help of SDE
sde_exists = get_sde()
if sde_exists == "":
alloy_error("you haven't got sde neither in SDE_HOME nor in your PATH.\n" +
"To test all platforms please set SDE_HOME to path containing SDE.\n" +
"Please refer to http://www.intel.com/software/sde for SDE download information.", 2)
return [result, result_sde]
def build_ispc(version_LLVM, make):
current_path = os.getcwd()
ispc_home = os.environ["ISPC_HOME"]
os.chdir(ispc_home)
make_ispc = "make " + options.ispc_build_compiler + " -j" + options.speed
ISPC_BUILD="build-" + version_LLVM
ISPC_BIN="bin-" + version_LLVM
if not os.path.exists(ISPC_BUILD):
os.makedirs(ISPC_BUILD)
if not os.path.exists(ISPC_BUILD):
os.makedirs(ISPC_BIN)
os.chdir(ISPC_BUILD)
if current_OS != "Windows":
p_temp = os.getenv("PATH")
os.environ["PATH"] = os.environ["LLVM_HOME"] + "/bin-" + version_LLVM + "/bin:" + os.environ["PATH"]
folder = os.environ["LLVM_HOME"] + os.sep + "llvm-"
if options.folder == "":
folder += version_LLVM
if options.debug == True:
folder += "dbg"
try_do_LLVM("configure ispc build", 'cmake -DCMAKE_INSTALL_PREFIX="..\\'+ ISPC_BIN + '" ' +
' -DCMAKE_BUILD_TYPE=Release' +
ispc_home, True)
try_do_LLVM("build ISPC with LLVM version " + version_LLVM + " ", make_ispc, True)
try_do_LLVM("install ISPC ", "make install", True)
copyfile(os.path.join(ispc_home, ISPC_BIN, "bin", "ispc"), os.path.join(ispc_home, + "ispc"))
os.environ["PATH"] = p_temp
else:
try_do_LLVM("configure ispc build", 'cmake -Thost=x64 -G ' + '\"' + generator + '\"' + ' -DCMAKE_INSTALL_PREFIX="..\\'+ ISPC_BIN + '" ' +
' -DCMAKE_BUILD_TYPE=Release ' +
ispc_home, True)
try_do_LLVM("clean ISPC for building", "msbuild ispc.vcxproj /t:clean", True)
try_do_LLVM("build ISPC with LLVM version " + version_LLVM + " ", "msbuild ispc.vcxproj /V:m /p:Platform=x64 /p:Configuration=Release /t:rebuild", True)
try_do_LLVM("install ISPC ", "msbuild INSTALL.vcxproj /p:Platform=x64 /p:Configuration=Release", True)
copyfile(os.path.join(ispc_home, ISPC_BIN, "bin", "ispc.exe"), os.path.join(ispc_home, + "ispc.exe"))
os.chdir(current_path)
def execute_stability(stability, R, print_version):
global return_status
try:
stability1 = copy.deepcopy(stability)
b_temp = run_tests.run_tests(stability1, [], print_version)
temp = b_temp[0]
time = b_temp[1]
for j in range(0,4):
R[j][0] = R[j][0] + temp[j] # new_runfails, new_compfails, new_passes_runfails, new_passes_compfails
for i in range(0,len(temp[j])):
R[j][1].append(temp[4])
number_of_fails = temp[5]
number_of_new_fails = len(temp[0]) + len(temp[1])
number_of_passes = len(temp[2]) + len(temp[3])
if number_of_fails == 0:
str_fails = ". No fails"
else:
str_fails = ". Fails: " + str(number_of_fails)
if number_of_new_fails == 0:
str_new_fails = ", No new fails"
else:
str_new_fails = ", New fails: " + str(number_of_new_fails)
if number_of_passes == 0:
str_new_passes = "."
else:
str_new_passes = ", " + str(number_of_passes) + " new passes."
if stability.time:
str_time = " " + time + "\n"
else:
str_time = "\n"
print_debug(temp[4][1:-3] + stability1.ispc_flags + str_fails + str_new_fails + str_new_passes + str_time, False, stability_log)
except Exception as e:
print_debug("Exception: " + str(e), False, stability_log)
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_tb(exc_traceback, file=sys.stderr)
print_debug("ERROR: Exception in execute_stability: %s\n" % (sys.exc_info()[1]), False, stability_log)
return_status = 1
'''
R = [[new_runfails, [new_line, new_line...]],
[new_compfails, [new_line, new_line...]],
[new_passes_runfails, [new_line, new_line...]],
[new_passes_runfails, [new_line, new_line...]]]
'''
def output_test_results(R):
ttt = ["NEW RUNFAILS: ", "NEW COMPFAILS: ", "NEW PASSES RUNFAILS: ", "NEW PASSES COMPFAILS: "]
for j in range(0, 4):
if len(R[j][0]) == 0:
print_debug("NO " + ttt[j][:-2] + "\n", False, stability_log)
else:
print_debug(ttt[j] + str(len(R[j][0])) + "\n", False, stability_log)
to_print = {}
for (fail_name, opt_str) in zip(R[j][0], R[j][1]):
if fail_name not in to_print:
to_print[fail_name] = []
to_print[fail_name].append(opt_str)
# sort
for key in to_print.keys():
to_print[key] = sorted(to_print[key])
# print out
for fail_name in sorted(to_print.keys()):
print_debug("\t" + fail_name + "\n", True, stability_log)
for opt_str in to_print[fail_name]:
print_debug("\t\t\t" + opt_str, True, stability_log)
def concatenate_test_results(R1, R2):
R = [[[],[]],[[],[]],[[],[]],[[],[]]]
for j in range(0, 4):
R[j][0] = R1[j][0] + R2[j][0]
R[j][1] = R1[j][1] + R2[j][1]
return R
def validation_run(only, only_targets, reference_branch, number, update, speed_number, make, perf_llvm, time):
os.chdir(os.environ["ISPC_HOME"])
if current_OS != "Windows":
os.environ["PATH"] = os.environ["ISPC_HOME"] + ":" + os.environ["PATH"]
print_debug("Command: " + ' '.join(sys.argv) + "\n", False, "")
print_debug("Folder: " + os.environ["ISPC_HOME"] + "\n", False, "")
date = datetime.datetime.now()
print_debug("Date: " + date.strftime('%H:%M %d/%m/%Y') + "\n", False, "")
newest_LLVM="13.0"
# *** *** ***
# Stability validation run
# *** *** ***
if ((("stability" in only) == True) or ("performance" in only) == False):
print_debug("\n\nStability validation run\n\n", False, "")
stability = common.EmptyClass()
# stability constant options
stability.save_bin = False
stability.random = False
stability.ispc_flags = options.ispc_flags
stability.compiler_exe = options.compiler_exe
stability.num_jobs = speed_number
stability.verbose = False
stability.time = time
# 1200 is more than default value in run_tests.py (600).
# There's a single test, which requires longer time on AVX2 capable server (Github Action):
# tests/idiv.ispc running for avx512-i8x64 for x86 under SDE.
# For any other tests it should be more than enough.
stability.test_time = 1200
stability.csv = ""
stability.non_interactive = True
stability.update = update
stability.include_file = None
stability.silent = True
stability.in_file = "." + os.sep + f_date + os.sep + "run_tests_log.log"
stability.verify = False
stability.fail_db = "fail_db.txt"
stability.device = None
stability.ispc_output = None
stability.debug_check = False
# stability varying options
stability.target = ""
stability.arch = ""
stability.opt = ""
stability.wrapexe = ""
# prepare parameters of run
[targets_t, sde_targets_t] = check_targets()
rebuild = True
opts = []
archs = []
LLVM = []
targets = []
sde_targets = []
dbg_begin = 0
dbg_total = 1
# parsing option only, update parameters of run
if "-O2" in only:
opts.append("O2")
if "-O1" in only:
opts.append("O1")
if "-O0" in only:
opts.append("O0")
if "debug" in only:
if not ("nodebug" in only):
dbg_begin = 1
dbg_total = 2
if "x86" in only and not ("x86-64" in only):
archs.append("x86")
if "x86-64" in only:
archs.append("x86-64")
if "native" in only:
sde_targets_t = []
for i in ["6.0", "7.0", "8.0", "9.0", "10.0", "11.0", "12.0", "13.0", "14.0", "trunk"]:
if i in only:
LLVM.append(i)
if "current" in only:
LLVM = [" "]
rebuild = False
else:
common.check_tools(1)
if only_targets != "":
only_targets += " "
only_targets_t = only_targets.split(" ")
for i in only_targets_t:
if i == "":
continue
err = True
for j in range(0,len(targets_t)):
if i in targets_t[j]:
targets.append(targets_t[j])
err = False
for j in range(0,len(sde_targets_t)):
if i in sde_targets_t[j][1]:
sde_targets.append(sde_targets_t[j])
err = False
if err == True:
alloy_error("You haven't sde for target " + i, 1)
else:
targets = targets_t
sde_targets = sde_targets_t
if "build" in only:
targets = []
sde_targets = []
only = only + " stability "
# finish parameters of run, prepare LLVM
if len(opts) == 0:
opts = ["O2"]
if len(archs) == 0:
archs = ["x86", "x86-64"]
if len(LLVM) == 0:
LLVM = [newest_LLVM, "trunk"]
need_LLVM = check_LLVM(LLVM)
for i in range(0,len(need_LLVM)):
build_LLVM(need_LLVM[i], "", "", "", False, False, False, True, False, make, options.gcc_toolchain_path, False, True, False)
# begin validation run for stabitily
common.remove_if_exists(stability.in_file)
R = [[[],[]],[[],[]],[[],[]],[[],[]]]
print_debug("\n" + common.get_host_name() + "\n", False, stability_log)
print_debug("\n_________________________STABILITY REPORT_________________________\n", False, stability_log)
ispc_flags_tmp = stability.ispc_flags
for i in range(0,len(LLVM)):
R_tmp = [[[],[]],[[],[]],[[],[]],[[],[]]]
print_version = 2
if rebuild:
build_ispc(LLVM[i], make)
for j in range(0,len(targets)):
stability.target = targets[j]
# the target might be not supported by the chosen llvm version
if (stability.target in unsupported_llvm_targets(LLVM[i])):
print_debug("Warning: target " + stability.target + " is not supported in LLVM " + LLVM[i] + "\n", False, stability_log)
continue
# now set archs for targets
arch = archs
for i1 in range(0,len(arch)):
for i2 in range(0,len(opts)):
for i3 in range(dbg_begin,dbg_total):
stability.arch = arch[i1]
stability.opt = opts[i2]
stability.ispc_flags = ispc_flags_tmp
if (i3 != 0):
stability.ispc_flags += " -g"
execute_stability(stability, R_tmp, print_version)
print_version = 0
for j in range(0,len(sde_targets)):
stability.target = sde_targets[j][1]
# the target might be not supported by the chosen llvm version
if (stability.target in unsupported_llvm_targets(LLVM[i])):
print_debug("Warning: target " + stability.target + " is not supported in LLVM " + LLVM[i] + "\n", False, stability_log)
continue
stability.wrapexe = get_sde() + " " + sde_targets[j][0] + " -- "
arch = archs
for i1 in range(0,len(arch)):
for i2 in range(0,len(opts)):
for i3 in range(dbg_begin,dbg_total):
stability.arch = arch[i1]
stability.opt = opts[i2]
stability.ispc_flags = ispc_flags_tmp
if (i3 != 0):
stability.ispc_flags += " -g"
execute_stability(stability, R_tmp, print_version)
print_version = 0
# Output testing results separate for each tested LLVM version
R = concatenate_test_results(R, R_tmp)
output_test_results(R_tmp)
print_debug("\n", False, stability_log)
print_debug("\n----------------------------------------\nTOTAL:\n", False, stability_log)
output_test_results(R)
print_debug("__________________Watch stability.log for details_________________\n", False, stability_log)
# *** *** ***
# Performance validation run
# *** *** ***
if ((("performance" in only) == True) or ("stability" in only) == False):
print_debug("\n\nPerformance validation run\n\n", False, "")
common.check_tools(1)
performance = common.EmptyClass()
# performance constant options
performance.number = number
performance.config = "." + os.sep + "perf.ini"
performance.path = "." + os.sep
performance.silent = True
performance.output = ""
performance.compiler = ""
performance.ref = "ispc_ref"
if current_OS == "Windows":
performance.ref = "ispc_ref.exe"
performance.perf_target = ""
performance.in_file = "." + os.sep + f_date + os.sep + "performance.log"
# prepare newest LLVM
need_LLVM = check_LLVM([newest_LLVM])
if len(need_LLVM) != 0:
build_LLVM(need_LLVM[0], "", "", "", False, False, False, True, False, make, options.gcc_toolchain_path, True, False)
if perf_llvm == False:
# prepare reference point. build both test and reference compilers
try_do_LLVM("apply git", "git branch", True)
temp4 = take_lines("git branch", "all")
for line in temp4:
if "*" in line:
current_branch = line[2:-1]
stashing = True
sys.stdout.write("Please, don't interrupt script here! You can have not sync git status after interruption!\n")
if "No local changes" in take_lines("git stash", "first"):
stashing = False
#try_do_LLVM("stash current branch ", "git stash", True)
try_do_LLVM("checkout reference branch " + reference_branch + " ", "git checkout " + reference_branch, True)
sys.stdout.write(".\n")
build_ispc(newest_LLVM, make)
sys.stdout.write(".\n")
if current_OS != "Windows":
os.rename("ispc", "ispc_ref")
else:
common.remove_if_exists("ispc_ref.exe")
os.rename("ispc.exe", "ispc_ref.exe")
try_do_LLVM("checkout test branch " + current_branch + " ", "git checkout " + current_branch, True)
if stashing:
try_do_LLVM("return current branch ", "git stash pop", True)
sys.stdout.write("You can interrupt script now.\n")
build_ispc(newest_LLVM, make)
else:
# build compiler with two different LLVM versions
if len(check_LLVM([reference_branch])) != 0:
alloy_error("you haven't got llvm called " + reference_branch, 1)
build_ispc(newest_LLVM, make)
os.rename("ispc", "ispc_ref")
build_ispc(reference_branch, make)
# begin validation run for performance. output is inserted into perf()
perf.perf(performance, [])
# dumping gathered info to the file
common.ex_state.dump(alloy_folder + "test_table.dump", common.ex_state.tt)
def Main():
global current_OS
global current_OS_version
global return_status
current_OS_version = platform.release()
if (platform.system() == 'Windows' or 'CYGWIN_NT' in platform.system()) == True:
current_OS = "Windows"
else:
if (platform.system() == 'Darwin'):
current_OS = "MacOS"
else:
current_OS = "Linux"
if (options.build_llvm == False and options.validation_run == False):
parser.print_help()
exit(1)
# set appropriate makefile target
# gcc and g++ options are equal and added for ease of use
if options.ispc_build_compiler != "clang" and \
options.ispc_build_compiler != "gcc":
alloy_error("unknow option for --ispc-build-compiler: " + options.ispc_build_compiler, 1)
parser.print_help()
exit(1)
# check and normalize selfbuild switches
selfbuild = SelfbuildType.SINGLE
if (options.selfbuild and (options.selfbuild_phase1 or options.selfbuild_phase2)) or (options.selfbuild_phase1 and options.selfbuild_phase2):
alloy_error("Only one of --selfbuild* switches can be used at the same time", 1)
if options.selfbuild:
selfbuild = SelfbuildType.SELF
if options.selfbuild_phase1:
selfbuild = SelfbuildType.SELF_PHASE1
if options.selfbuild_phase2:
selfbuild = SelfbuildType.SELF_PHASE2
setting_paths(options.llvm_home, options.ispc_home, options.sde_home)
if os.environ.get("LLVM_HOME") == None:
alloy_error("you have no LLVM_HOME", 1)
if os.environ.get("ISPC_HOME") == None:
alloy_error("you have no ISPC_HOME", 1)
if options.only != "":
test_only_r = " 6.0 7.0 8.0 9.0 10.0 11.0 12.0 13.0 14.0 trunk current build stability performance x86 x86-64 x86_64 -O0 -O1 -O2 native debug nodebug "
test_only = options.only.split(" ")
for iterator in test_only:
if not (" " + iterator + " " in test_only_r):
alloy_error("unknown option for only: " + iterator, 1)
if current_OS == "Windows" and selfbuild is not SelfbuildType.SINGLE:
alloy_error("Selfbuild is not supported on Windows", 1)
global f_date
f_date = "logs"
common.remove_if_exists(f_date)
os.makedirs(f_date)
global alloy_folder
alloy_folder = os.getcwd() + os.sep + f_date + os.sep
global alloy_build
alloy_build = alloy_folder + "alloy_build.log"
global stability_log
stability_log = alloy_folder + "stability.log"
current_path = os.getcwd()
make = "make -j" + options.speed
if os.environ["ISPC_HOME"] != os.getcwd():
alloy_error("your ISPC_HOME and your current path are different! (" + os.environ["ISPC_HOME"] + " is not equal to " + os.getcwd() +
")\n", 2)
if options.perf_llvm == True:
if options.branch == "main":
options.branch = "trunk"
global generator
if options.generator:
generator = options.generator
else:
if current_OS == "Windows":
generator = "Visual Studio 17 2022"
else:
generator = "Unix Makefiles"
try:
start_time = time.time()
if options.build_llvm:
build_LLVM(options.version, options.folder,
options.debug, selfbuild, options.extra, False, options.force, make, options.gcc_toolchain_path, options.llvm_disable_assertions, options.verbose)
if options.validation_run:
validation_run(options.only, options.only_targets, options.branch,
options.number_for_performance, options.update, int(options.speed),
make, options.perf_llvm, options.time)
elapsed_time = time.time() - start_time
if options.time:
print_debug("Elapsed time: " + time.strftime('%Hh%Mm%Ssec.', time.gmtime(elapsed_time)) + "\n", False, "")
except Exception as e:
print_debug("Exception: " + str(e) + "\n", False, stability_log)
return_status = 1
# Finish execution: time reporting and copy log
try:
os.chdir(current_path)
date_name = "alloy_results_" + datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S')
if os.path.exists(date_name):
alloy_error("It's forbidden to run alloy two times in a second, logs are in ./logs", 1)
os.rename(f_date, date_name)
print_debug("Logs are in " + date_name + "\n", False, "")
except Exception as e:
# Do not return non-zero exit code here, as it's not a critical error and testing might be considered successful.
print_debug("Exception: " + str(e), False, stability_log)
if current_OS == "Windows":
# Windows hangs from time to time on exit, so returning without cleanup.
sys.stdout.flush()
os._exit(return_status)
exit(return_status)
###Main###
from optparse import OptionParser
from optparse import OptionGroup
import sys
import os
import errno
import operator
import time
import glob
import platform
import smtplib
import datetime
import copy
import multiprocessing
import subprocess
import re
from shutil import copyfile
# our drivers
import run_tests
import perf
import common
take_lines = common.take_lines
print_debug = common.print_debug
make_sure_dir_exists = common.make_sure_dir_exists
return_status = 0
if __name__ == '__main__':
# parsing options
class MyParser(OptionParser):
def format_epilog(self, formatter):
return self.epilog
examples = ("Examples:\n" +
"Download and build LLVM trunk\n\talloy.py -b\n" +
"Download and build LLVM 13.0. Rewrite LLVM folders\n\talloy.py -b --version=13.0 --force\n" +
"Validation run with LLVM trunk; x86, x86-64; -O2;\nall supported targets; performance\n\talloy.py -r\n" +
"Validation run with all avx targets and sse4-i8x16 without performance\n\talloy.py -r --only=stability --only-targets='avx sse4-i8x16'\n" +
"Validation run with avx2-i32x8, all sse4 and sse2 targets\nand all targets with i32x16\n\talloy.py -r --only-targets='avx2-i32x8 sse4 i32x16 sse2'\n" +
"Stability validation run with LLVM 7.0, 8.0; -O0; x86,\nupdate fail_db.txt with passes and fails\n\talloy.py -r --only='7.0 -O0 stability 8.0 x86' --update-errors=FP\n" +
"Try to build compiler with all LLVM\n\talloy.py -r --only=build\n" +
"Performance validation run with 10 runs of each test and comparing to branch 'old'\n\talloy.py -r --only=performance --compare-with=old --number=10\n" +
"Validation run. Update fail_db.txt with new fails\n\talloy.py -r --update-errors=F\n" +
"Test KNL target (requires sde)\n\talloy.py -r --only='stability' --only-targets='avx512knl-x16'\n")
num_threads="%s" % multiprocessing.cpu_count()
parser = MyParser(usage="Usage: alloy.py -r/-b [options]", epilog=examples)
parser.add_option('-b', '--build-llvm', dest='build_llvm',
help='ask to build LLVM', default=False, action="store_true")
parser.add_option('-r', '--run', dest='validation_run',
help='ask for validation run', default=False, action="store_true")
parser.add_option('-j', dest='speed',
help='set -j for make', default=num_threads)
parser.add_option('--ispc-build-compiler', dest='ispc_build_compiler',
help='set compiler to build ispc binary (clang/gcc)', default="clang")
# options for activity "build LLVM"
llvm_group = OptionGroup(parser, "Options for building LLVM",
"These options must be used with -b option.")
llvm_group.add_option('--version', dest='version',
help='version of llvm to build: 6.0 7.0 8.0 9.0 10.0 11.0 12.0 13.0 trunk. Default: trunk', default="trunk")
llvm_group.add_option('--with-gcc-toolchain', dest='gcc_toolchain_path',
help='GCC install dir to use when building clang. It is important to set when ' +
'you have alternative gcc installation. Note that otherwise gcc from standard ' +
'location will be used, not from your PATH', default="")
llvm_group.add_option('--debug', dest='debug',
help='debug build of LLVM', default=False, action="store_true")
llvm_group.add_option('--folder', dest='folder',
help='folder to build LLVM in', default="")
llvm_group.add_option('--selfbuild', dest='selfbuild',
help='make selfbuild of LLVM and clang', default=False, action="store_true")
llvm_group.add_option('--selfbuild-phase1', dest='selfbuild_phase1',
help='make selfbuild of LLVM and clang, first phase only', default=False, action="store_true")
llvm_group.add_option('--selfbuild-phase2', dest='selfbuild_phase2',
help='make selfbuild of LLVM and clang, second phase only', default=False, action="store_true")
llvm_group.add_option('--llvm-disable-assertions', dest='llvm_disable_assertions',
help='build LLVM with assertions disabled', default=False, action="store_true")
llvm_group.add_option('--force', dest='force',
help='rebuild LLVM', default=False, action='store_true')
llvm_group.add_option('--extra', dest='extra',
help='load extra clang tools', default=False, action='store_true')
llvm_group.add_option('--verbose', dest='verbose',
help='verbose output during the build', default=False, action='store_true')
parser.add_option_group(llvm_group)
# options for activity "validation run"
run_group = OptionGroup(parser, "Options for validation run",
"These options must be used with -r option.")
run_group.add_option('--compare-with', dest='branch',
help='set performance reference point. Default: main', default="main")
run_group.add_option('--compiler', dest='compiler_exe',
help='C/C++ compiler binary to use to run tests.', default=None)
run_group.add_option('--ispc-flags', dest='ispc_flags',
help='extra ispc flags.', default="")
run_group.add_option('--number', dest='number_for_performance',
help='number of performance runs for each test. Default: 5', default=5)
run_group.add_option('--update-errors', dest='update',
help='rewrite fail_db.txt file according to received results (F or FP)', default="")
run_group.add_option('--only-targets', dest='only_targets',
help='set list of targets to test. Possible values - all subnames of targets', default="")
run_group.add_option('--time', dest='time',
help='display time of testing', default=False, action='store_true')
run_group.add_option('--only', dest='only',
help='set types of tests. Possible values:\n' +
'-O0, -O1, -O2, x86, x86-64, stability (test only stability), performance (test only performance),\n' +
'build (only build with different LLVM), 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, trunk, native (do not use SDE),\n' +
'current (do not rebuild ISPC), debug (only with debug info), nodebug (only without debug info, default).',
default="")
run_group.add_option('--perf_LLVM', dest='perf_llvm',
help='compare LLVM 8.0 with "--compare-with", default trunk', default=False, action='store_true')
run_group.add_option('--generator', dest='generator',
help='specify cmake generator', default="")
parser.add_option_group(run_group)
# options for activity "setup PATHS"
setup_group = OptionGroup(parser, "Options for setup",
"These options must be use with -r or -b to setup environment variables")
setup_group.add_option('--llvm_home', dest='llvm_home',help='path to LLVM',default="")
setup_group.add_option('--ispc_home', dest='ispc_home',help='path to ISPC',default="")
setup_group.add_option('--sde_home', dest='sde_home',help='path to SDE',default="")
parser.add_option_group(setup_group)
(options, args) = parser.parse_args()
Main()
|
py | 1a538a90f4664e50e2fa18ad2c0369c8262d9d44 | #!/usr/bin/env python
"""GRR rdfvalue tests.
This module loads and registers all the tests for the RDFValue implementations.
"""
# These need to register plugins so, pylint: disable=unused-import
from grr.lib.rdfvalues import aff4_rdfvalues_test
from grr.lib.rdfvalues import basic_test
from grr.lib.rdfvalues import benchmark_test
from grr.lib.rdfvalues import client_test
from grr.lib.rdfvalues import cloud_test
from grr.lib.rdfvalues import crypto_test
from grr.lib.rdfvalues import data_store_test
from grr.lib.rdfvalues import filestore_test
from grr.lib.rdfvalues import flows_test
from grr.lib.rdfvalues import foreman_test
from grr.lib.rdfvalues import paths_test
from grr.lib.rdfvalues import protodict_test
from grr.lib.rdfvalues import standard_test
from grr.lib.rdfvalues import stats_test
from grr.lib.rdfvalues import structs_test
|
py | 1a538aafd808538c352d318b8af6ce950e64c253 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This API defines FeatureColumn abstraction.
FeatureColumns provide a high level abstraction for ingesting and representing
features. FeatureColumns are also the primary way of encoding features for
canned `tf.estimator.Estimator`s.
When using FeatureColumns with `Estimators`, the type of feature column you
should choose depends on (1) the feature type and (2) the model type.
1. Feature type:
* Continuous features can be represented by `numeric_column`.
* Categorical features can be represented by any `categorical_column_with_*`
column:
- `categorical_column_with_vocabulary_list`
- `categorical_column_with_vocabulary_file`
- `categorical_column_with_hash_bucket`
- `categorical_column_with_identity`
- `weighted_categorical_column`
2. Model type:
* Deep neural network models (`DNNClassifier`, `DNNRegressor`).
Continuous features can be directly fed into deep neural network models.
age_column = numeric_column("age")
To feed sparse features into DNN models, wrap the column with
`embedding_column` or `indicator_column`. `indicator_column` is recommended
for features with only a few possible values. For features with many
possible values, to reduce the size of your model, `embedding_column` is
recommended.
embedded_dept_column = embedding_column(
categorical_column_with_vocabulary_list(
"department", ["math", "philosophy", ...]), dimension=10)
* Wide (aka linear) models (`LinearClassifier`, `LinearRegressor`).
Sparse features can be fed directly into linear models. They behave like an
indicator column but with an efficient implementation.
dept_column = categorical_column_with_vocabulary_list("department",
["math", "philosophy", "english"])
It is recommended that continuous features be bucketized before being
fed into linear models.
bucketized_age_column = bucketized_column(
source_column=age_column,
boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
Sparse features can be crossed (also known as conjuncted or combined) in
order to form non-linearities, and then fed into linear models.
cross_dept_age_column = crossed_column(
columns=["department", bucketized_age_column],
hash_bucket_size=1000)
Example of building canned `Estimator`s using FeatureColumns:
```python
# Define features and transformations
deep_feature_columns = [age_column, embedded_dept_column]
wide_feature_columns = [dept_column, bucketized_age_column,
cross_dept_age_column]
# Build deep model
estimator = DNNClassifier(
feature_columns=deep_feature_columns,
hidden_units=[500, 250, 50])
estimator.train(...)
# Or build a wide model
estimator = LinearClassifier(
feature_columns=wide_feature_columns)
estimator.train(...)
# Or build a wide and deep model!
estimator = DNNLinearCombinedClassifier(
linear_feature_columns=wide_feature_columns,
dnn_feature_columns=deep_feature_columns,
dnn_hidden_units=[500, 250, 50])
estimator.train(...)
```
FeatureColumns can also be transformed into a generic input layer for
custom models using `input_layer`.
Example of building model using FeatureColumns, this can be used in a
`model_fn` which is given to the {tf.estimator.Estimator}:
```python
# Building model via layers
deep_feature_columns = [age_column, embedded_dept_column]
columns_to_tensor = parse_feature_columns_from_examples(
serialized=my_data,
feature_columns=deep_feature_columns)
first_layer = input_layer(
features=columns_to_tensor,
feature_columns=deep_feature_columns)
second_layer = fully_connected(first_layer, ...)
```
NOTE: Functions prefixed with "_" indicate experimental or private parts of
the API subject to change, and should not be relied upon!
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import contextlib
import math
import numpy as np
import six
import json
from tensorflow.python.eager import context
from tensorflow.python.feature_column import feature_column as fc_old
from tensorflow.python.feature_column import utils as fc_utils
from tensorflow.python.feature_column import coalesced_utils
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.framework import tensor_shape
# TODO(b/118385027): Dependency on keras can be problematic if Keras moves out
# of the main repo.
from tensorflow.python.keras import initializers
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import gen_feature_column_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_utils
from tensorflow.python.training.tracking import tracking
from tensorflow.python.util import deprecation
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
from tensorflow.python.util.compat import collections_abc
_FEATURE_COLUMN_DEPRECATION_DATE = None
_FEATURE_COLUMN_DEPRECATION = ('The old _FeatureColumn APIs are being '
'deprecated. Please use the new FeatureColumn '
'APIs instead.')
class StateManager(object):
"""Manages the state associated with FeatureColumns.
Some `FeatureColumn`s create variables or resources to assist their
computation. The `StateManager` is responsible for creating and storing these
objects since `FeatureColumn`s are supposed to be stateless configuration
only.
"""
def create_variable(self,
feature_column,
name,
shape,
dtype=None,
trainable=True,
use_resource=True,
initializer=None):
"""Creates a new variable.
Args:
feature_column: A `FeatureColumn` object this variable corresponds to.
name: variable name.
shape: variable shape.
dtype: The type of the variable. Defaults to `self.dtype` or `float32`.
trainable: Whether this variable is trainable or not.
use_resource: If true, we use resource variables. Otherwise we use
RefVariable.
initializer: initializer instance (callable).
Returns:
The created variable.
"""
del feature_column, name, shape, dtype, trainable, use_resource, initializer
raise NotImplementedError('StateManager.create_variable')
def create_hashtable(self,
feature_column,
name,
shape,
dtype,
initializer,
partitioner,
trainable):
"""Creates a new variable.
"""
del feature_column, name, shape, dtype, initializer, partitioner, trainable
raise NotImplementedError('StateManager.create_hashtable')
def add_variable(self, feature_column, var):
"""Adds an existing variable to the state.
Args:
feature_column: A `FeatureColumn` object to associate this variable with.
var: The variable.
"""
del feature_column, var
raise NotImplementedError('StateManager.add_variable')
def get_variable(self, feature_column, name):
"""Returns an existing variable.
Args:
feature_column: A `FeatureColumn` object this variable corresponds to.
name: variable name.
"""
del feature_column, name
raise NotImplementedError('StateManager.get_var')
def add_resource(self, feature_column, name, resource):
"""Creates a new resource.
Resources can be things such as tables etc.
Args:
feature_column: A `FeatureColumn` object this resource corresponds to.
name: Name of the resource.
resource: The resource.
Returns:
The created resource.
"""
del feature_column, name, resource
raise NotImplementedError('StateManager.add_resource')
def get_resource(self, feature_column, name):
"""Returns an already created resource.
Resources can be things such as tables etc.
Args:
feature_column: A `FeatureColumn` object this variable corresponds to.
name: Name of the resource.
"""
del feature_column, name
raise NotImplementedError('StateManager.get_resource')
class _StateManagerImpl(StateManager):
"""Manages the state of DenseFeatures and LinearLayer."""
def __init__(self, layer, trainable):
"""Creates an _StateManagerImpl object.
Args:
layer: The input layer this state manager is associated with.
trainable: Whether by default, variables created are trainable or not.
"""
self._trainable = trainable
self._layer = layer
if self._layer is not None:
self._layer._maybe_create_attribute('_resources', []) # pylint: disable=protected-access
self._cols_to_vars_map = collections.defaultdict(lambda: {})
# TODO(vbardiovsky): Make sure the resources are tracked by moving them to
# the layer (inheriting from AutoTrackable), e.g.:
# self._layer._resources_map = data_structures.Mapping()
self._cols_to_resources_map = collections.defaultdict(lambda: {})
def create_variable(self,
feature_column,
name,
shape,
dtype=None,
trainable=True,
use_resource=True,
initializer=None):
if name in self._cols_to_vars_map[feature_column]:
raise ValueError('Variable already exists.')
var = self._layer.add_variable(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
trainable=self._trainable and trainable,
use_resource=use_resource,
# TODO(rohanj): Get rid of this hack once we have a mechanism for
# specifying a default partitioner for an entire layer. In that case,
# the default getter for Layers should work.
getter=variable_scope.get_variable)
self._cols_to_vars_map[feature_column][name] = var
return var
def create_hashtable(self,
feature_column,
name,
shape,
dtype,
initializer,
partitioner,
trainable):
if name in self._cols_to_vars_map[feature_column]:
raise ValueError('Variable already exists.')
table = variable_scope.get_hash_table(
name, shape, dtype=dtype,
initializer=initializer,
partitioner=partitioner,
trainable=self._trainable and trainable)
self._cols_to_vars_map[feature_column][name] = table
return table
def get_variable(self, feature_column, name):
if name in self._cols_to_vars_map[feature_column]:
return self._cols_to_vars_map[feature_column][name]
raise ValueError('Variable does not exist.')
def add_resource(self, feature_column, name, resource):
self._cols_to_resources_map[feature_column][name] = resource
if self._layer is not None:
self._layer._resources.append(resource) # pylint: disable=protected-access
def get_resource(self, feature_column, name):
if name in self._cols_to_resources_map[feature_column]:
return self._cols_to_resources_map[feature_column][name]
raise ValueError('Resource does not exist.')
class _StateManagerImplV2(_StateManagerImpl):
"""Manages the state of DenseFeatures."""
def create_variable(self,
feature_column,
name,
shape,
dtype=None,
trainable=True,
use_resource=True,
initializer=None):
if name in self._cols_to_vars_map[feature_column]:
raise ValueError('Variable already exists.')
var = self._layer.add_variable(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
trainable=self._trainable and trainable,
use_resource=use_resource)
self._cols_to_vars_map[feature_column][name] = var
return var
class _BaseFeaturesLayer(Layer):
"""Base class for DenseFeatures and SequenceFeatures.
Defines common methods and helpers.
Args:
feature_columns: An iterable containing the FeatureColumns to use as
inputs to your model.
expected_column_type: Expected class for provided feature columns.
trainable: Boolean, whether the layer's variables will be updated via
gradient descent during training.
name: Name to give to the DenseFeatures.
**kwargs: Keyword arguments to construct a layer.
Raises:
ValueError: if an item in `feature_columns` doesn't match
`expected_column_type`.
"""
def __init__(self, feature_columns, expected_column_type, trainable, name,
**kwargs):
super(_BaseFeaturesLayer, self).__init__(
name=name, trainable=trainable, **kwargs)
self._feature_columns = _normalize_feature_columns(feature_columns)
self._state_manager = _StateManagerImpl(self, self.trainable)
for column in self._feature_columns:
if not isinstance(column, expected_column_type):
raise ValueError(
'Items of feature_columns must be a {}. '
'You can wrap a categorical column with an '
'embedding_column or indicator_column. Given: {}'.format(
expected_column_type, column))
def build(self, _):
for column in self._feature_columns:
with variable_scope._pure_variable_scope(self.name): # pylint: disable=protected-access
with variable_scope._pure_variable_scope(column.var_scope_name): # pylint: disable=protected-access
column.create_state(self._state_manager)
super(_BaseFeaturesLayer, self).build(None)
def _output_shape(self, input_shape, num_elements):
"""Computes expected output shape of the layer or a column's dense tensor.
Args:
input_shape: Tensor or array with batch shape.
num_elements: Size of the last dimension of the output.
Returns:
Tuple with output shape.
"""
raise NotImplementedError('Calling an abstract method.')
def compute_output_shape(self, input_shape):
total_elements = 0
for column in self._feature_columns:
total_elements += column.variable_shape.num_elements()
return self._target_shape(input_shape, total_elements)
def _process_dense_tensor(self, column, tensor):
"""Reshapes the dense tensor output of a column based on expected shape.
Args:
column: A DenseColumn or SequenceDenseColumn object.
tensor: A dense tensor obtained from the same column.
Returns:
Reshaped dense tensor."""
target_shape = column.output_shape(tensor)
return array_ops.reshape(tensor, shape=target_shape)
def _verify_and_concat_tensors(self, output_tensors):
"""Verifies and concatenates the dense output of several columns."""
_verify_static_batch_size_equality(output_tensors, self._feature_columns)
return array_ops.concat(output_tensors, -1)
def get_config(self):
# Import here to avoid circular imports.
from tensorflow.python.feature_column import serialization # pylint: disable=g-import-not-at-top
column_configs = serialization.serialize_feature_columns(
self._feature_columns)
config = {'feature_columns': column_configs}
base_config = super( # pylint: disable=bad-super-call
_BaseFeaturesLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
# Import here to avoid circular imports.
from tensorflow.python.feature_column import serialization # pylint: disable=g-import-not-at-top
config_cp = config.copy()
config_cp['feature_columns'] = serialization.deserialize_feature_columns(
config['feature_columns'], custom_objects=custom_objects)
return cls(**config_cp)
class _LinearModelLayer(Layer):
"""Layer that contains logic for `LinearModel`."""
def __init__(self,
feature_columns,
units=1,
sparse_combiner='sum',
trainable=True,
name=None,
**kwargs):
super(_LinearModelLayer, self).__init__(
name=name, trainable=trainable, **kwargs)
self._feature_columns = _normalize_feature_columns(feature_columns)
for column in self._feature_columns:
if not isinstance(column, (DenseColumn, CategoricalColumn)):
raise ValueError(
'Items of feature_columns must be either a '
'DenseColumn or CategoricalColumn. Given: {}'.format(column))
if isinstance(column, (SequenceMultiHashEmbeddingColumn, SequenceEmbeddingColumn)):
raise ValueError(
'Items of feature_columns must not be a '
'SequenceEmbeddingColumn or SequenceMultiHashEmbeddingColumn'
'. Given: {}'.format(column))
self._units = units
self._sparse_combiner = sparse_combiner
self._state_manager = _StateManagerImpl(self, self.trainable)
self.bias = None
def build(self, _):
# We need variable scopes for now because we want the variable partitioning
# information to percolate down. We also use _pure_variable_scope's here
# since we want to open up a name_scope in the `call` method while creating
# the ops.
with variable_scope._pure_variable_scope(self.name): # pylint: disable=protected-access
for column in self._feature_columns:
with variable_scope._pure_variable_scope(column.name): # pylint: disable=protected-access
# Create the state for each feature column
column.create_state(self._state_manager)
# Create a weight variable for each column.
if isinstance(column, CategoricalColumn):
first_dim = column.num_buckets
else:
first_dim = column.variable_shape.num_elements()
self._state_manager.create_variable(
column,
name='weights',
dtype=dtypes.float32,
shape=(first_dim, self._units),
initializer=init_ops.zeros_initializer(),
trainable=self.trainable)
# Create a bias variable.
self.bias = self.add_variable(
name='bias_weights',
dtype=dtypes.float32,
shape=[self._units],
initializer=init_ops.zeros_initializer(),
trainable=self.trainable,
use_resource=True,
# TODO(rohanj): Get rid of this hack once we have a mechanism for
# specifying a default partitioner for an entire layer. In that case,
# the default getter for Layers should work.
getter=variable_scope.get_variable)
super(_LinearModelLayer, self).build(None)
def call(self, features):
if not isinstance(features, dict):
raise ValueError('We expected a dictionary here. Instead we got: {}'
.format(features))
with ops.name_scope(self.name):
transformation_cache = FeatureTransformationCache(features)
weighted_sums = []
for column in self._feature_columns:
with ops.name_scope(column.name):
# All the weights used in the linear model are owned by the state
# manager associated with this Linear Model.
weight_var = self._state_manager.get_variable(column, 'weights')
weighted_sum = _create_weighted_sum(
column=column,
transformation_cache=transformation_cache,
state_manager=self._state_manager,
sparse_combiner=self._sparse_combiner,
weight_var=weight_var)
weighted_sums.append(weighted_sum)
_verify_static_batch_size_equality(weighted_sums, self._feature_columns)
predictions_no_bias = math_ops.add_n(
weighted_sums, name='weighted_sum_no_bias')
predictions = nn_ops.bias_add(
predictions_no_bias, self.bias, name='weighted_sum')
return predictions
def get_config(self):
# Import here to avoid circular imports.
from tensorflow.python.feature_column import serialization # pylint: disable=g-import-not-at-top
column_configs = serialization.serialize_feature_columns(
self._feature_columns)
config = {
'feature_columns': column_configs,
'units': self._units,
'sparse_combiner': self._sparse_combiner
}
base_config = super( # pylint: disable=bad-super-call
_LinearModelLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
# Import here to avoid circular imports.
from tensorflow.python.feature_column import serialization # pylint: disable=g-import-not-at-top
config_cp = config.copy()
columns = serialization.deserialize_feature_columns(
config_cp['feature_columns'], custom_objects=custom_objects)
del config_cp['feature_columns']
return cls(feature_columns=columns, **config_cp)
# TODO(tanzheny): Cleanup it with respect to Premade model b/132690565.
class LinearModel(training.Model):
"""Produces a linear prediction `Tensor` based on given `feature_columns`.
This layer generates a weighted sum based on output dimension `units`.
Weighted sum refers to logits in classification problems. It refers to the
prediction itself for linear regression problems.
Note on supported columns: `LinearLayer` treats categorical columns as
`indicator_column`s. To be specific, assume the input as `SparseTensor` looks
like:
```python
shape = [2, 2]
{
[0, 0]: "a"
[1, 0]: "b"
[1, 1]: "c"
}
```
`linear_model` assigns weights for the presence of "a", "b", "c' implicitly,
just like `indicator_column`, while `input_layer` explicitly requires wrapping
each of categorical columns with an `embedding_column` or an
`indicator_column`.
Example of usage:
```python
price = numeric_column('price')
price_buckets = bucketized_column(price, boundaries=[0., 10., 100., 1000.])
keywords = categorical_column_with_hash_bucket("keywords", 10K)
keywords_price = crossed_column('keywords', price_buckets, ...)
columns = [price_buckets, keywords, keywords_price ...]
linear_model = LinearLayer(columns)
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
prediction = linear_model(features)
```
"""
def __init__(self,
feature_columns,
units=1,
sparse_combiner='sum',
trainable=True,
name=None,
**kwargs):
"""Constructs a LinearLayer.
Args:
feature_columns: An iterable containing the FeatureColumns to use as
inputs to your model. All items should be instances of classes derived
from `_FeatureColumn`s.
units: An integer, dimensionality of the output space. Default value is 1.
sparse_combiner: A string specifying how to reduce if a categorical column
is multivalent. Except `numeric_column`, almost all columns passed to
`linear_model` are considered as categorical columns. It combines each
categorical column independently. Currently "mean", "sqrtn" and "sum"
are supported, with "sum" the default for linear model. "sqrtn" often
achieves good accuracy, in particular with bag-of-words columns.
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For example, for two features represented as the categorical columns:
```python
# Feature 1
shape = [2, 2]
{
[0, 0]: "a"
[0, 1]: "b"
[1, 0]: "c"
}
# Feature 2
shape = [2, 3]
{
[0, 0]: "d"
[1, 0]: "e"
[1, 1]: "f"
[1, 2]: "g"
}
```
with `sparse_combiner` as "mean", the linear model outputs conceptly are
```
y_0 = 1.0 / 2.0 * ( w_a + w_ b) + w_c + b_0
y_1 = w_d + 1.0 / 3.0 * ( w_e + w_ f + w_g) + b_1
```
where `y_i` is the output, `b_i` is the bias, and `w_x` is the weight
assigned to the presence of `x` in the input features.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: Name to give to the Linear Model. All variables and ops created will
be scoped by this name.
**kwargs: Keyword arguments to construct a layer.
Raises:
ValueError: if an item in `feature_columns` is neither a `DenseColumn`
nor `CategoricalColumn`.
"""
super(LinearModel, self).__init__(name=name, **kwargs)
self.layer = _LinearModelLayer(
feature_columns,
units,
sparse_combiner,
trainable,
name=self.name,
**kwargs)
def call(self, features):
"""Returns a `Tensor` the represents the predictions of a linear model.
Args:
features: A mapping from key to tensors. `_FeatureColumn`s look up via
these keys. For example `numeric_column('price')` will look at 'price'
key in this dict. Values are `Tensor` or `SparseTensor` depending on
corresponding `_FeatureColumn`.
Returns:
A `Tensor` which represents predictions/logits of a linear model. Its
shape is (batch_size, units) and its dtype is `float32`.
Raises:
ValueError: If features are not a dictionary.
"""
return self.layer(features)
@property
def bias(self):
return self.layer.bias
def _transform_features_v2(features, feature_columns, state_manager):
"""Returns transformed features based on features columns passed in.
Please note that most probably you would not need to use this function. Please
check `input_layer` and `linear_model` to see whether they will
satisfy your use case or not.
Example:
```python
# Define features and transformations
crosses_a_x_b = crossed_column(
columns=["sparse_feature_a", "sparse_feature_b"], hash_bucket_size=10000)
price_buckets = bucketized_column(
source_column=numeric_column("price"), boundaries=[...])
columns = [crosses_a_x_b, price_buckets]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
transformed = transform_features(features=features, feature_columns=columns)
assertCountEqual(columns, transformed.keys())
```
Args:
features: A mapping from key to tensors. `FeatureColumn`s look up via these
keys. For example `numeric_column('price')` will look at 'price' key in
this dict. Values can be a `SparseTensor` or a `Tensor` depends on
corresponding `FeatureColumn`.
feature_columns: An iterable containing all the `FeatureColumn`s.
state_manager: A StateManager object that holds the FeatureColumn state.
Returns:
A `dict` mapping `FeatureColumn` to `Tensor` and `SparseTensor` values.
"""
feature_columns = _normalize_feature_columns(feature_columns)
outputs = {}
with ops.name_scope(
None, default_name='transform_features', values=features.values()):
transformation_cache = FeatureTransformationCache(features)
for column in feature_columns:
with ops.name_scope(None, default_name=column.name):
outputs[column] = transformation_cache.get(column, state_manager)
return outputs
@tf_export('feature_column.make_parse_example_spec', v1=[])
def make_parse_example_spec_v2(feature_columns):
"""Creates parsing spec dictionary from input feature_columns.
The returned dictionary can be used as arg 'features' in
`tf.io.parse_example`.
Typical usage example:
```python
# Define features and transformations
feature_a = categorical_column_with_vocabulary_file(...)
feature_b = numeric_column(...)
feature_c_bucketized = bucketized_column(numeric_column("feature_c"), ...)
feature_a_x_feature_c = crossed_column(
columns=["feature_a", feature_c_bucketized], ...)
feature_columns = set(
[feature_b, feature_c_bucketized, feature_a_x_feature_c])
features = tf.io.parse_example(
serialized=serialized_examples,
features=make_parse_example_spec(feature_columns))
```
For the above example, make_parse_example_spec would return the dict:
```python
{
"feature_a": parsing_ops.VarLenFeature(tf.string),
"feature_b": parsing_ops.FixedLenFeature([1], dtype=tf.float32),
"feature_c": parsing_ops.FixedLenFeature([1], dtype=tf.float32)
}
```
Args:
feature_columns: An iterable containing all feature columns. All items
should be instances of classes derived from `FeatureColumn`.
Returns:
A dict mapping each feature key to a `FixedLenFeature` or `VarLenFeature`
value.
Raises:
ValueError: If any of the given `feature_columns` is not a `FeatureColumn`
instance.
"""
result = {}
for column in feature_columns:
if not isinstance(column, FeatureColumn):
raise ValueError('All feature_columns must be FeatureColumn instances. '
'Given: {}'.format(column))
config = column.parse_example_spec
for key, value in six.iteritems(config):
if key in result and value != result[key]:
raise ValueError(
'feature_columns contain different parse_spec for key '
'{}. Given {} and {}'.format(key, value, result[key]))
result.update(config)
return result
@tf_export('feature_column.embedding_column')
def embedding_column(categorical_column,
dimension,
combiner='mean',
initializer=None,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
max_norm=None,
trainable=True,
coalesced_scope=None):
"""`DenseColumn` that converts from sparse, categorical input.
Use this when your inputs are sparse, but you want to convert them to a dense
representation (e.g., to feed to a DNN).
Inputs must be a `CategoricalColumn` created by any of the
`categorical_column_*` function. Here is an example of using
`embedding_column` with `DNNClassifier`:
```python
video_id = categorical_column_with_identity(
key='video_id', num_buckets=1000000, default_value=0)
columns = [embedding_column(video_id, 9),...]
estimator = tf.estimator.DNNClassifier(feature_columns=columns, ...)
label_column = ...
def input_fn():
features = tf.io.parse_example(
..., features=make_parse_example_spec(columns + [label_column]))
labels = features.pop(label_column.name)
return features, labels
estimator.train(input_fn=input_fn, steps=100)
```
Here is an example using `embedding_column` with model_fn:
```python
def model_fn(features, ...):
video_id = categorical_column_with_identity(
key='video_id', num_buckets=1000000, default_value=0)
columns = [embedding_column(video_id, 9),...]
dense_tensor = input_layer(features, columns)
# Form DNN layers, calculate loss, and return EstimatorSpec.
...
```
Args:
categorical_column: A `CategoricalColumn` created by a
`categorical_column_with_*` function. This column produces the sparse IDs
that are inputs to the embedding lookup.
dimension: An integer specifying dimension of the embedding, must be > 0.
combiner: A string specifying how to reduce if there are multiple entries in
a single row. Currently 'mean', 'sqrtn' and 'sum' are supported, with
'mean' the default. 'sqrtn' often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column. For more information, see
`tf.embedding_lookup_sparse`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`truncated_normal_initializer` with mean `0.0` and
standard deviation `1/sqrt(dimension)`.
ckpt_to_load_from: String representing checkpoint name/pattern from which to
restore column weights. Required if `tensor_name_in_ckpt` is not `None`.
tensor_name_in_ckpt: Name of the `Tensor` in `ckpt_to_load_from` from which
to restore the column weights. Required if `ckpt_to_load_from` is not
`None`.
max_norm: If not `None`, embedding values are l2-normalized to this value.
trainable: Whether or not the embedding is trainable. Default is True.
Returns:
`DenseColumn` that converts from sparse input.
Raises:
ValueError: if `dimension` not > 0.
ValueError: if exactly one of `ckpt_to_load_from` and `tensor_name_in_ckpt`
is specified.
ValueError: if `initializer` is specified and is not callable.
RuntimeError: If eager execution is enabled.
"""
if (dimension is None) or (dimension < 1):
raise ValueError('Invalid dimension {}.'.format(dimension))
if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None):
raise ValueError('Must specify both `ckpt_to_load_from` and '
'`tensor_name_in_ckpt` or none of them.')
if (initializer is not None) and (not callable(initializer)):
raise ValueError('initializer must be callable if specified. '
'Embedding of column_name: {}'.format(
categorical_column.name))
if initializer is None:
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=1 / math.sqrt(dimension))
if coalesced_scope is None:
coalesced_scope = current_coalesced_scope()
column = EmbeddingColumn(
categorical_column=categorical_column,
dimension=dimension,
combiner=combiner,
initializer=initializer,
ckpt_to_load_from=ckpt_to_load_from,
tensor_name_in_ckpt=tensor_name_in_ckpt,
max_norm=max_norm,
trainable=trainable,
coalesced_scope=coalesced_scope)
if coalesced_scope:
coalesced_scope.add_column(column)
coalesced_utils.add_embedding_signature(
column, dimension, combiner, initializer, trainable,
categorical_column._num_buckets)
return column
@tf_export('feature_column.sequence_embedding_column')
def sequence_embedding_column(dense_column, sequence_length):
"""`DenseColumn` that converts from dense input.
Args:
dense_column: A `DenseColumn`. This column produces the dense tensor
that are inputs to reshape.
sequence_length: The sequence length of sample.
Returns:
`DenseColumn` that converts from dense input.
Raises:
ValueError: if `sequence_length` not > 0.
"""
if sequence_length is None:
raise ValueError('sequence_length must be set.')
if sequence_length < 1:
raise ValueError('sequence_length must be at least 1.')
return SequenceEmbeddingColumn(dense_column, sequence_length)
@tf_export('feature_column.sequence_multi_hash_embedding_column')
def sequence_multi_hash_embedding_column(dense_column, sequence_length):
"""`DenseColumn` that converts from dense input.
Args:
dense_column: A `SharedMultiHashEmbeddingColumn`. This column
produces the dense tensor that are inputs to reshape.
sequence_length: The sequence length of sample.
Returns:
`DenseColumn` that converts from dense input.
Raises:
ValueError: if `sequence_length` not > 0.
"""
if sequence_length is None:
raise ValueError('sequence_length must be set.')
if sequence_length < 1:
raise ValueError('sequence_length must be at least 1.')
if not isinstance(dense_column, SharedMultiHashEmbeddingColumn):
raise ValueError('input column must be multi_hash_embedding_column')
return SequenceMultiHashEmbeddingColumn(dense_column, sequence_length)
@tf_export(v1=['feature_column.shared_embedding_columns'])
def shared_embedding_columns(categorical_columns,
dimension,
combiner='mean',
initializer=None,
shared_embedding_collection_name=None,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
max_norm=None,
trainable=True):
"""List of dense columns that convert from sparse, categorical input.
This is similar to `embedding_column`, except that it produces a list of
embedding columns that share the same embedding weights.
Use this when your inputs are sparse and of the same type (e.g. watched and
impression video IDs that share the same vocabulary), and you want to convert
them to a dense representation (e.g., to feed to a DNN).
Inputs must be a list of categorical columns created by any of the
`categorical_column_*` function. They must all be of the same type and have
the same arguments except `key`. E.g. they can be
categorical_column_with_vocabulary_file with the same vocabulary_file. Some or
all columns could also be weighted_categorical_column.
Here is an example embedding of two features for a DNNClassifier model:
```python
watched_video_id = categorical_column_with_vocabulary_file(
'watched_video_id', video_vocabulary_file, video_vocabulary_size)
impression_video_id = categorical_column_with_vocabulary_file(
'impression_video_id', video_vocabulary_file, video_vocabulary_size)
columns = shared_embedding_columns(
[watched_video_id, impression_video_id], dimension=10)
estimator = tf.estimator.DNNClassifier(feature_columns=columns, ...)
label_column = ...
def input_fn():
features = tf.io.parse_example(
..., features=make_parse_example_spec(columns + [label_column]))
labels = features.pop(label_column.name)
return features, labels
estimator.train(input_fn=input_fn, steps=100)
```
Here is an example using `shared_embedding_columns` with model_fn:
```python
def model_fn(features, ...):
watched_video_id = categorical_column_with_vocabulary_file(
'watched_video_id', video_vocabulary_file, video_vocabulary_size)
impression_video_id = categorical_column_with_vocabulary_file(
'impression_video_id', video_vocabulary_file, video_vocabulary_size)
columns = shared_embedding_columns(
[watched_video_id, impression_video_id], dimension=10)
dense_tensor = input_layer(features, columns)
# Form DNN layers, calculate loss, and return EstimatorSpec.
...
```
Args:
categorical_columns: List of categorical columns created by a
`categorical_column_with_*` function. These columns produce the sparse IDs
that are inputs to the embedding lookup. All columns must be of the same
type and have the same arguments except `key`. E.g. they can be
categorical_column_with_vocabulary_file with the same vocabulary_file.
Some or all columns could also be weighted_categorical_column.
dimension: An integer specifying dimension of the embedding, must be > 0.
combiner: A string specifying how to reduce if there are multiple entries in
a single row. Currently 'mean', 'sqrtn' and 'sum' are supported, with
'mean' the default. 'sqrtn' often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column. For more information, see
`tf.embedding_lookup_sparse`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`truncated_normal_initializer` with mean `0.0` and
standard deviation `1/sqrt(dimension)`.
shared_embedding_collection_name: Optional name of the collection where
shared embedding weights are added. If not given, a reasonable name will
be chosen based on the names of `categorical_columns`. This is also used
in `variable_scope` when creating shared embedding weights.
ckpt_to_load_from: String representing checkpoint name/pattern from which to
restore column weights. Required if `tensor_name_in_ckpt` is not `None`.
tensor_name_in_ckpt: Name of the `Tensor` in `ckpt_to_load_from` from which
to restore the column weights. Required if `ckpt_to_load_from` is not
`None`.
max_norm: If not `None`, each embedding is clipped if its l2-norm is larger
than this value, before combining.
trainable: Whether or not the embedding is trainable. Default is True.
Returns:
A list of dense columns that converts from sparse input. The order of
results follows the ordering of `categorical_columns`.
Raises:
ValueError: if `dimension` not > 0.
ValueError: if any of the given `categorical_columns` is of different type
or has different arguments than the others.
ValueError: if exactly one of `ckpt_to_load_from` and `tensor_name_in_ckpt`
is specified.
ValueError: if `initializer` is specified and is not callable.
RuntimeError: if eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('shared_embedding_columns are not supported when eager '
'execution is enabled.')
if (dimension is None) or (dimension < 1):
raise ValueError('Invalid dimension {}.'.format(dimension))
if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None):
raise ValueError('Must specify both `ckpt_to_load_from` and '
'`tensor_name_in_ckpt` or none of them.')
if (initializer is not None) and (not callable(initializer)):
raise ValueError('initializer must be callable if specified.')
if initializer is None:
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=1. / math.sqrt(dimension))
# Sort the columns so the default collection name is deterministic even if the
# user passes columns from an unsorted collection, such as dict.values().
sorted_columns = sorted(categorical_columns, key=lambda x: x.name)
c0 = sorted_columns[0]
num_buckets = c0._num_buckets # pylint: disable=protected-access
if not isinstance(c0, fc_old._CategoricalColumn): # pylint: disable=protected-access
raise ValueError(
'All categorical_columns must be subclasses of _CategoricalColumn. '
'Given: {}, of type: {}'.format(c0, type(c0)))
if isinstance(c0,
(fc_old._WeightedCategoricalColumn, WeightedCategoricalColumn)): # pylint: disable=protected-access
c0 = c0.categorical_column
for c in sorted_columns[1:]:
if isinstance(
c, (fc_old._WeightedCategoricalColumn, WeightedCategoricalColumn)): # pylint: disable=protected-access
c = c.categorical_column
if not isinstance(c, type(c0)):
raise ValueError(
'To use shared_embedding_column, all categorical_columns must have '
'the same type, or be weighted_categorical_column of the same type. '
'Given column: {} of type: {} does not match given column: {} of '
'type: {}'.format(c0, type(c0), c, type(c)))
if num_buckets != c._num_buckets: # pylint: disable=protected-access
raise ValueError(
'To use shared_embedding_column, all categorical_columns must have '
'the same number of buckets. Given column: {} with buckets: {} does '
'not match column: {} with buckets: {}'.format(
c0, num_buckets, c, c._num_buckets)) # pylint: disable=protected-access
if not shared_embedding_collection_name:
shared_embedding_collection_name = '_'.join(c.name for c in sorted_columns)
shared_embedding_collection_name += '_shared_embedding'
result = []
for column in categorical_columns:
result.append(
fc_old._SharedEmbeddingColumn( # pylint: disable=protected-access
categorical_column=column,
initializer=initializer,
dimension=dimension,
combiner=combiner,
shared_embedding_collection_name=shared_embedding_collection_name,
ckpt_to_load_from=ckpt_to_load_from,
tensor_name_in_ckpt=tensor_name_in_ckpt,
max_norm=max_norm,
trainable=trainable))
return result
@tf_export('feature_column.shared_embedding_column', v1=[])
def shared_embedding_column(categorical_column,
dimension,
shared_name,
combiner='mean',
initializer=None,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
max_norm=None,
trainable=True,
coalesced_scope=None):
"""Dense column that convert from sparse, categorical input.
This is similar to `embedding_column`, except that it produces a
embedding column that can share the same embedding weights.
Here is an example embedding of two features for a DNNClassifier model:
```python
watched_video_id = categorical_column_with_vocabulary_file(
'watched_video_id', video_vocabulary_file, video_vocabulary_size)
wvi_column = shared_embedding_column(
watched_video_id, 10, 'id')
impression_video_id = categorical_column_with_vocabulary_file(
'impression_video_id', video_vocabulary_file, video_vocabulary_size)
ivi_column = shared_embedding_column(
impression_video_id, 10, 'id')
columns = [wvi_column, ivi_column]
estimator = tf.estimator.DNNClassifier(feature_columns=columns, ...)
label_column = ...
def input_fn():
features = tf.io.parse_example(
..., features=make_parse_example_spec(columns + [label_column]))
labels = features.pop(label_column.name)
return features, labels
estimator.train(input_fn=input_fn, steps=100)
```
Args:
categorical_column: a categorical column created by a
`categorical_column_with_*` function. This column produce the sparse IDs
that are inputs to the embedding lookup.
dimension: An integer specifying dimension of the embedding, must be > 0.
shared_embedding_collection_name: Shared collective name of column.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently 'mean', 'sqrtn' and 'sum' are supported, with
'mean' the default. 'sqrtn' often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column. For more information, see
`tf.embedding_lookup_sparse`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`truncated_normal_initializer` with mean `0.0` and standard
deviation `1/sqrt(dimension)`.
ckpt_to_load_from: String representing checkpoint name/pattern from which to
restore column weights. Required if `tensor_name_in_ckpt` is not `None`.
tensor_name_in_ckpt: Name of the `Tensor` in `ckpt_to_load_from` from
which to restore the column weights. Required if `ckpt_to_load_from` is
not `None`.
max_norm: If not `None`, each embedding is clipped if its l2-norm is
larger than this value, before combining.
trainable: Whether or not the embedding is trainable. Default is True.
Returns:
A dense column that converts from sparse input.
Raises:
ValueError: if `dimension` not > 0.
ValueError: if the given `categorical_column` is of different type
or has different arguments than the others.
ValueError: if exactly one of `ckpt_to_load_from` and `tensor_name_in_ckpt`
is specified.
ValueError: if `initializer` is specified and is not callable.
RuntimeError: if eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('shared_embedding_column are not supported when eager '
'execution is enabled.')
if (dimension is None) or (dimension < 1):
raise ValueError('Invalid dimension {}.'.format(dimension))
if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None):
raise ValueError('Must specify both `ckpt_to_load_from` and '
'`tensor_name_in_ckpt` or none of them.')
if (initializer is not None) and (not callable(initializer)):
raise ValueError('initializer must be callable if specified.')
if initializer is None:
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=1. / math.sqrt(dimension))
if not isinstance(categorical_column, CategoricalColumn):
raise ValueError(
'Input categorical_column must be subclasses of CategoricalColumn. '
'Given: {}, of type: {}'.format(categorical_column, type(categorical_column)))
num_buckets = categorical_column.num_buckets
if coalesced_scope is None:
coalesced_scope = current_coalesced_scope()
column = SharedEmbeddingColumnV2(
categorical_column=categorical_column,
dimension=dimension,
shared_name=shared_name,
combiner=combiner,
initializer=initializer,
ckpt_to_load_from=ckpt_to_load_from,
tensor_name_in_ckpt=tensor_name_in_ckpt,
max_norm=max_norm,
trainable=trainable,
coalesced_scope=coalesced_scope)
if coalesced_scope:
coalesced_scope.add_column(column)
coalesced_utils.add_embedding_signature(
column, dimension, combiner, initializer, trainable,
categorical_column.num_buckets)
return column
@tf_export('feature_column.multi_hash_embedding_column', v1=[])
def multi_hash_embedding_column(categorical_column,
dimension,
shared_name,
hash_combiner='mean',
combiner='mean',
initializer=None,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
max_norm=None,
trainable=True,
coalesced_scope=None):
"""Dense column that convert from sparse, categorical input.
This is similar to `embedding_column`, except that it produces a
embedding column that can share the same embedding weights.
Args:
categorical_column: a categorical column created by a
`categorical_column_with_*` function. This column produce the sparse IDs
that are inputs to the embedding lookup.
dimension: An integer specifying dimension of the embedding, must be > 0.
shared_embedding_collection_name: Shared collective name of column.
hash_combiner: A string specifying how to reduce if there are multiple
hash function in a single row.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently 'mean', 'sqrtn' and 'sum' are supported, with
'mean' the default. 'sqrtn' often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column. For more information, see
`tf.embedding_lookup_sparse`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`truncated_normal_initializer` with mean `0.0` and standard
deviation `1/sqrt(dimension)`.
ckpt_to_load_from: String representing checkpoint name/pattern from which to
restore column weights. Required if `tensor_name_in_ckpt` is not `None`.
tensor_name_in_ckpt: Name of the `Tensor` in `ckpt_to_load_from` from
which to restore the column weights. Required if `ckpt_to_load_from` is
not `None`.
max_norm: If not `None`, each embedding is clipped if its l2-norm is
larger than this value, before combining.
trainable: Whether or not the embedding is trainable. Default is True.
Returns:
A dense column that converts from sparse input.
Raises:
ValueError: if `dimension` not > 0.
ValueError: if any of the given `categorical_columns` is of different type
or has different arguments than the others.
ValueError: if exactly one of `ckpt_to_load_from` and `tensor_name_in_ckpt`
is specified.
ValueError: if `initializer` is specified and is not callable.
RuntimeError: if eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('multi_hash_embedding_column are not supported when eager '
'execution is enabled.')
if (dimension is None) or (dimension < 1):
raise ValueError('Invalid dimension {}.'.format(dimension))
if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None):
raise ValueError('Must specify both `ckpt_to_load_from` and '
'`tensor_name_in_ckpt` or none of them.')
if (initializer is not None) and (not callable(initializer)):
raise ValueError('initializer must be callable if specified.')
if initializer is None:
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=1. / math.sqrt(dimension))
input_column = categorical_column
while isinstance(input_column, CutoffCategoricalColumn):
input_column = input_column.categorical_column
if not isinstance(input_column, (MultiHashedCategoricalColumn, WeightedMultiHashedCategoricalColumn)):
raise ValueError(
'In multi_hash_embedding_column, '
'categorical_column must be one of type '
'categorical_column_with_multi_hash_bucket or '
'weighted_categorical_column_with_multi_hash_bucket. '
'Given (type {}): {}'.format(type(input_column),
input_column))
if coalesced_scope is None:
coalesced_scope = current_coalesced_scope()
num_buckets = categorical_column.num_buckets
column = SharedMultiHashEmbeddingColumn(
categorical_column=categorical_column,
dimension=dimension,
shared_name=shared_name,
hash_combiner=hash_combiner,
combiner=combiner,
initializer=initializer,
ckpt_to_load_from=ckpt_to_load_from,
tensor_name_in_ckpt=tensor_name_in_ckpt,
max_norm=max_norm,
trainable=trainable,
coalesced_scope=coalesced_scope)
if coalesced_scope:
coalesced_scope.add_column(column)
coalesced_utils.add_embedding_signature(
column, dimension, combiner, initializer, trainable,
categorical_column._num_buckets, hash_combiner=hash_combiner)
return column
@tf_export('feature_column.shared_embeddings', v1=[])
def shared_embedding_columns_v2(categorical_columns,
dimension,
combiner='mean',
initializer=None,
shared_embedding_collection_name=None,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
max_norm=None,
trainable=True,
coalesced_scope=None):
"""List of dense columns that convert from sparse, categorical input.
This is similar to `embedding_column`, except that it produces a list of
embedding columns that share the same embedding weights.
Use this when your inputs are sparse and of the same type (e.g. watched and
impression video IDs that share the same vocabulary), and you want to convert
them to a dense representation (e.g., to feed to a DNN).
Inputs must be a list of categorical columns created by any of the
`categorical_column_*` function. They must all be of the same type and have
the same arguments except `key`. E.g. they can be
categorical_column_with_vocabulary_file with the same vocabulary_file. Some or
all columns could also be weighted_categorical_column.
Here is an example embedding of two features for a DNNClassifier model:
```python
watched_video_id = categorical_column_with_vocabulary_file(
'watched_video_id', video_vocabulary_file, video_vocabulary_size)
impression_video_id = categorical_column_with_vocabulary_file(
'impression_video_id', video_vocabulary_file, video_vocabulary_size)
columns = shared_embedding_columns(
[watched_video_id, impression_video_id], dimension=10)
estimator = tf.estimator.DNNClassifier(feature_columns=columns, ...)
label_column = ...
def input_fn():
features = tf.io.parse_example(
..., features=make_parse_example_spec(columns + [label_column]))
labels = features.pop(label_column.name)
return features, labels
estimator.train(input_fn=input_fn, steps=100)
```
Here is an example using `shared_embedding_columns` with model_fn:
```python
def model_fn(features, ...):
watched_video_id = categorical_column_with_vocabulary_file(
'watched_video_id', video_vocabulary_file, video_vocabulary_size)
impression_video_id = categorical_column_with_vocabulary_file(
'impression_video_id', video_vocabulary_file, video_vocabulary_size)
columns = shared_embedding_columns(
[watched_video_id, impression_video_id], dimension=10)
dense_tensor = input_layer(features, columns)
# Form DNN layers, calculate loss, and return EstimatorSpec.
...
```
Args:
categorical_columns: List of categorical columns created by a
`categorical_column_with_*` function. These columns produce the sparse IDs
that are inputs to the embedding lookup. All columns must be of the same
type and have the same arguments except `key`. E.g. they can be
categorical_column_with_vocabulary_file with the same vocabulary_file.
Some or all columns could also be weighted_categorical_column.
dimension: An integer specifying dimension of the embedding, must be > 0.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently 'mean', 'sqrtn' and 'sum' are supported, with
'mean' the default. 'sqrtn' often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column. For more information, see
`tf.embedding_lookup_sparse`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`truncated_normal_initializer` with mean `0.0` and standard
deviation `1/sqrt(dimension)`.
shared_embedding_collection_name: Optional collective name of these columns.
If not given, a reasonable name will be chosen based on the names of
`categorical_columns`.
ckpt_to_load_from: String representing checkpoint name/pattern from which to
restore column weights. Required if `tensor_name_in_ckpt` is not `None`.
tensor_name_in_ckpt: Name of the `Tensor` in `ckpt_to_load_from` from
which to restore the column weights. Required if `ckpt_to_load_from` is
not `None`.
max_norm: If not `None`, each embedding is clipped if its l2-norm is
larger than this value, before combining.
trainable: Whether or not the embedding is trainable. Default is True.
Returns:
A list of dense columns that converts from sparse input. The order of
results follows the ordering of `categorical_columns`.
Raises:
ValueError: if `dimension` not > 0.
ValueError: if any of the given `categorical_columns` is of different type
or has different arguments than the others.
ValueError: if exactly one of `ckpt_to_load_from` and `tensor_name_in_ckpt`
is specified.
ValueError: if `initializer` is specified and is not callable.
RuntimeError: if eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('shared_embedding_columns are not supported when eager '
'execution is enabled.')
if (dimension is None) or (dimension < 1):
raise ValueError('Invalid dimension {}.'.format(dimension))
if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None):
raise ValueError('Must specify both `ckpt_to_load_from` and '
'`tensor_name_in_ckpt` or none of them.')
if (initializer is not None) and (not callable(initializer)):
raise ValueError('initializer must be callable if specified.')
if initializer is None:
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=1. / math.sqrt(dimension))
# Sort the columns so the default collection name is deterministic even if the
# user passes columns from an unsorted collection, such as dict.values().
sorted_columns = sorted(categorical_columns, key=lambda x: x.name)
c0 = sorted_columns[0]
num_buckets = c0.num_buckets
if not isinstance(c0, CategoricalColumn):
raise ValueError(
'All categorical_columns must be subclasses of CategoricalColumn. '
'Given: {}, of type: {}'.format(c0, type(c0)))
if isinstance(c0, WeightedCategoricalColumn):
c0 = c0.categorical_column
for c in sorted_columns[1:]:
if isinstance(c, WeightedCategoricalColumn):
c = c.categorical_column
if not isinstance(c, type(c0)):
raise ValueError(
'To use shared_embedding_column, all categorical_columns must have '
'the same type, or be weighted_categorical_column of the same type. '
'Given column: {} of type: {} does not match given column: {} of '
'type: {}'.format(c0, type(c0), c, type(c)))
if num_buckets != c.num_buckets:
raise ValueError(
'To use shared_embedding_column, all categorical_columns must have '
'the same number of buckets. Given column: {} with buckets: {} does '
'not match column: {} with buckets: {}'.format(
c0, num_buckets, c, c.num_buckets))
if not shared_embedding_collection_name:
shared_embedding_collection_name = '_'.join(c.name for c in sorted_columns)
shared_embedding_collection_name += '_shared_embedding'
if coalesced_scope is None:
coalesced_scope = current_coalesced_scope()
column_creator = SharedEmbeddingColumnCreator(
dimension, initializer, ckpt_to_load_from, tensor_name_in_ckpt,
num_buckets, trainable, shared_embedding_collection_name)
result = []
for column in categorical_columns:
result_column = column_creator(categorical_column=column,
combiner=combiner,
max_norm=max_norm,
coalesced_scope=coalesced_scope)
if coalesced_scope:
coalesced_scope.add_column(result_column)
coalesced_utils.add_embedding_signature(
result_column, dimension, combiner, initializer, trainable,
column.num_buckets)
result.append(result_column)
return result
@tf_export('feature_column.numeric_column')
def numeric_column(key,
shape=(1,),
default_value=None,
dtype=dtypes.float32,
normalizer_fn=None):
"""Represents real valued or numerical features.
Example:
```python
price = numeric_column('price')
columns = [price, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
# or
bucketized_price = bucketized_column(price, boundaries=[...])
columns = [bucketized_price, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the
column name and the dictionary key for feature parsing configs, feature
`Tensor` objects, and feature columns.
shape: An iterable of integers specifies the shape of the `Tensor`. An
integer can be given which means a single dimension `Tensor` with given
width. The `Tensor` representing the column will have the shape of
[batch_size] + `shape`.
default_value: A single value compatible with `dtype` or an iterable of
values compatible with `dtype` which the column takes on during
`tf.Example` parsing if data is missing. A default value of `None` will
cause `tf.io.parse_example` to fail if an example does not contain this
column. If a single value is provided, the same value will be applied as
the default value for every item. If an iterable of values is provided,
the shape of the `default_value` should be equal to the given `shape`.
dtype: defines the type of values. Default value is `tf.float32`. Must be a
non-quantized, real integer or floating point type.
normalizer_fn: If not `None`, a function that can be used to normalize the
value of the tensor after `default_value` is applied for parsing.
Normalizer function takes the input `Tensor` as its argument, and returns
the output `Tensor`. (e.g. lambda x: (x - 3.0) / 4.2). Please note that
even though the most common use case of this function is normalization, it
can be used for any kind of Tensorflow transformations.
Returns:
A `NumericColumn`.
Raises:
TypeError: if any dimension in shape is not an int
ValueError: if any dimension in shape is not a positive integer
TypeError: if `default_value` is an iterable but not compatible with `shape`
TypeError: if `default_value` is not compatible with `dtype`.
ValueError: if `dtype` is not convertible to `tf.float32`.
"""
shape = _check_shape(shape, key)
if not (dtype.is_integer or dtype.is_floating):
raise ValueError('dtype must be convertible to float. '
'dtype: {}, key: {}'.format(dtype, key))
default_value = fc_utils.check_default_value(
shape, default_value, dtype, key)
if normalizer_fn is not None and not callable(normalizer_fn):
raise TypeError(
'normalizer_fn must be a callable. Given: {}'.format(normalizer_fn))
fc_utils.assert_key_is_string(key)
return NumericColumn(
key,
shape=shape,
default_value=default_value,
dtype=dtype,
normalizer_fn=normalizer_fn)
@tf_export('feature_column.sparse_numeric_column')
def sparse_numeric_column(key,
shape=None,
dtype=dtypes.int64):
"""Represents sparse format real valued or numerical features.
Example:
```python
price = sparse_numeric_column('price')
columns = [price, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the
column name and the dictionary key for feature parsing configs, feature
`Tensor` objects, and feature columns.
shape: An iterable of integers specifies the shape of the `Tensor`. An
integer can be given which means a single dimension `Tensor` with given
width. The `Tensor` representing the column will have the shape of
[batch_size] + `shape`.
dtype: defines the type of values. Default value is `tf.float32`. Must be a
non-quantized, real integer or floating point type.
Returns:
A `SparseNumericColumn`.
Raises:
TypeError: if any dimension in shape is not an int
ValueError: if any dimension in shape is not a positive integer
ValueError: if `dtype` is not convertible to `tf.float32`.
"""
if shape is not None:
shape = _check_shape(shape, key)
if not (dtype.is_integer or dtype.is_floating):
raise ValueError('dtype must be convertible to float. '
'dtype: {}, key: {}'.format(dtype, key))
fc_utils.assert_key_is_string(key)
return SparseNumericColumn(
key,
shape=shape,
dtype=dtype)
@tf_export('feature_column.bucketized_column')
def bucketized_column(source_column, boundaries):
"""Represents discretized dense input.
Buckets include the left boundary, and exclude the right boundary. Namely,
`boundaries=[0., 1., 2.]` generates buckets `(-inf, 0.)`, `[0., 1.)`,
`[1., 2.)`, and `[2., +inf)`.
For example, if the inputs are
```python
boundaries = [0, 10, 100]
input tensor = [[-5, 10000]
[150, 10]
[5, 100]]
```
then the output will be
```python
output = [[0, 3]
[3, 2]
[1, 3]]
```
Example:
```python
price = numeric_column('price')
bucketized_price = bucketized_column(price, boundaries=[...])
columns = [bucketized_price, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
# or
columns = [bucketized_price, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
`bucketized_column` can also be crossed with another categorical column using
`crossed_column`:
```python
price = numeric_column('price')
# bucketized_column converts numerical feature to a categorical one.
bucketized_price = bucketized_column(price, boundaries=[...])
# 'keywords' is a string feature.
price_x_keywords = crossed_column([bucketized_price, 'keywords'], 50K)
columns = [price_x_keywords, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
Args:
source_column: A one-dimensional dense column which is generated with
`numeric_column`.
boundaries: A sorted list or tuple of floats specifying the boundaries.
Returns:
A `BucketizedColumn`.
Raises:
ValueError: If `source_column` is not a numeric column, or if it is not
one-dimensional.
ValueError: If `boundaries` is not a sorted list or tuple.
"""
if not isinstance(source_column, (NumericColumn, fc_old._NumericColumn)): # pylint: disable=protected-access
raise ValueError(
'source_column must be a column generated with numeric_column(). '
'Given: {}'.format(source_column))
if len(source_column.shape) > 1:
raise ValueError(
'source_column must be one-dimensional column. '
'Given: {}'.format(source_column))
if not boundaries:
raise ValueError('boundaries must not be empty.')
if not (isinstance(boundaries, list) or isinstance(boundaries, tuple)):
raise ValueError('boundaries must be a sorted list.')
for i in range(len(boundaries) - 1):
if boundaries[i] >= boundaries[i + 1]:
raise ValueError('boundaries must be a sorted list.')
return BucketizedColumn(source_column, tuple(boundaries))
@tf_export('feature_column.sparse_bucketized_column')
def sparse_bucketized_column(source_column, boundaries):
"""Represents discretized dense input.
Buckets include the left boundary, and exclude the right boundary. Namely,
`boundaries=[0., 1., 2.]` generates buckets `(-inf, 0.)`, `[0., 1.)`,
`[1., 2.)`, and `[2., +inf)`.
Example:
```python
price = sparse_numeric_column('price')
bucketized_price = sparse_bucketized_column(price, boundaries=[...])
columns = [bucketized_price, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
Args:
source_column: A one-dimensional dense column which is generated with
`numeric_column`.
boundaries: A sorted list or tuple of floats specifying the boundaries.
Returns:
A `SparseBucketizedColumn`.
Raises:
ValueError: If `source_column` is not a numeric column, or if it is not
one-dimensional.
ValueError: If `boundaries` is not a sorted list or tuple.
"""
if not isinstance(source_column, (NumericColumn, fc_old._NumericColumn, SparseNumericColumn)): # pylint: disable=protected-access
raise ValueError(
'source_column must be a column generated with numeric_column() or sparse_numeric_column. '
'Given: {}'.format(source_column))
if source_column.shape is not None and len(source_column.shape) > 1:
raise ValueError(
'source_column must be one-dimensional column. '
'Given: {}'.format(source_column))
if not boundaries:
raise ValueError('boundaries must not be empty.')
if not (isinstance(boundaries, list) or isinstance(boundaries, tuple)):
raise ValueError('boundaries must be a sorted list.')
for i in range(len(boundaries) - 1):
if boundaries[i] >= boundaries[i + 1]:
raise ValueError('boundaries must be a sorted list.')
return SparseBucketizedColumn(source_column, tuple(boundaries))
@tf_export('feature_column.cutoff_categorical_column')
def cutoff_categorical_column(categorical_column,
cutoff_length,
cutoff_side='right',
cutoff_axis=1,
reverse=False):
"""Cutoff cateforical column sparse id.
Example:
```python
price = sparse_numeric_column('price')
bucketized_price = sparse_bucketized_column(price, boundaries=[...])
cutoff_price = cutoff_categorical_column(bucketized_price, 5)
columns = [cutoff_price, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
Args:
categorical_column: A categorical column.
cutoff_length: max size of categorical column input sparse tensor after cutoff
cutoff_side: side to cut
cutoff_axis: axis to cut
reverse: if do reverse ids after cut
Returns:
A `CutoffCategoricalColumn`.
Raises:
ValueError: If `cutoff_length` is not a valid int.
ValueError: If `cutoff_axis` is smaller than 1.
ValueError: If `cutoff_side` is not one of `right/left`.
"""
if cutoff_length is None:
raise ValueError('cutoff_length must be set.')
if cutoff_length < 1:
raise ValueError('cutoff_length must be at least 1.')
if cutoff_axis < 1:
raise ValueError('cutoff_axis must be at least 1.')
if cutoff_side not in ('right', 'left'):
raise ValueError('cutoff_side must be one of `right` or `left`.')
return CutoffCategoricalColumn(categorical_column,
cutoff_length,
cutoff_side,
cutoff_axis,
reverse)
@tf_export('feature_column.categorical_column_with_hash')
def categorical_column_with_hash(key,
hash_type='farm',
allow_neg=True,
dtype=dtypes.string):
"""Represents sparse feature where ids are set by hashing.
Use this when your sparse features are in string or integer format, and you
want to distribute your inputs into a finite number of buckets by hashing.
output_id = Hash(input_feature_string) for string type input.
For int type input, the value is converted to its string representation first
and then hashed by the same formula.
For input dictionary `features`, `features[key]` is either `Tensor` or
`SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int
and `''` for string, which will be dropped by this feature column.
Example:
```python
keywords = categorical_column_with_hash("keywords", 10K)
columns = [keywords, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the
column name and the dictionary key for feature parsing configs, feature
`Tensor` objects, and feature columns.
hash_type: Hash function
allow_neg: Allow hash result a negative int
dtype: The type of features. Only string and integer types are supported.
Returns:
A `HashedOnlyCategoricalColumn`.
Raises:
ValueError: `hash_type` is not set.
"""
if hash_type is None:
raise ValueError('hash_type must be set. ' 'key: {}'.format(key))
fc_utils.assert_key_is_string(key)
fc_utils.assert_key_is_string(hash_type)
fc_utils.assert_string_or_int(dtype, prefix='column_name: {}'.format(key))
return HashOnlyCategoricalColumn(key, hash_type, allow_neg, dtype)
@tf_export('feature_column.categorical_column_with_multi_hash_bucket')
def categorical_column_with_multi_hash_bucket(key,
hash_bucket_size,
hash_types,
dtype=dtypes.string):
"""Represents sparse feature where ids are set by hashing.
Use this when your sparse features are in string or integer format, and you
want to distribute your inputs into a finite number of buckets by hashing.
output_id = Hash(input_feature_string) for string type input.
For int type input, the value is converted to its string representation first
and then hashed by the same formula.
For input dictionary `features`, `features[key]` is either `Tensor` or
`SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int
and `''` for string, which will be dropped by this feature column.
Example:
```python
keywords = categorical_column_with_hash("keywords", 10K)
columns = [keywords, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the
column name and the dictionary key for feature parsing configs, feature
`Tensor` objects, and feature columns.
hash_type: Hash function
allow_neg: Allow hash result a negative int
dtype: The type of features. Only string and integer types are supported.
Returns:
A `HashedOnlyCategoricalColumn`.
Raises:
ValueError: `hash_bucket_size` is not valid.
ValueError: `hash_types` is not set.
"""
if hash_bucket_size is None:
raise ValueError('hash_bucket_size must be set. ' 'key: {}'.format(key))
if hash_bucket_size < 1:
raise ValueError('hash_bucket_size must be at least 1. '
'hash_bucket_size: {}, key: {}'.format(
hash_bucket_size, key))
if hash_types is None:
raise ValueError('hash_types must be set. ' 'key: {}'.format(key))
hash_types = tuple(hash_types)
fc_utils.assert_key_is_string(key)
fc_utils.assert_string_or_int(dtype, prefix='column_name: {}'.format(key))
return MultiHashedCategoricalColumn(key, hash_bucket_size, hash_types, dtype)
@tf_export('feature_column.weighted_categorical_column_with_multi_hash_bucket')
def weighted_categorical_column_with_multi_hash_bucket(
categorical_column, weight_feature_key, dtype=dtypes.float32):
"""Applies weight values to a `MultiHashCategoricalColumn`.
Use this when each of your sparse inputs has both an ID and a value.
Example:
```python
categorical_column = categorical_column_with_multi_hash_bucket(
column_name='terms', hash_bucket_size=1000, hash_types=['murmur', 'farm'])
weighted_column = weighted_categorical_multi_hash_column(
categorical_column=categorical_column, weight_feature_key='frequencies')
columns = [weighted_column, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
Args:
categorical_column: A `CategoricalColumn` created by
`categorical_column_with_*` functions.
weight_feature_key: String key for weight values.
dtype: Type of weights, such as `tf.float32`. Only float and integer weights
are supported.
Returns:
A `CategoricalColumn` composed of two sparse features: one represents id,
the other represents weight (value) of the id feature in that example.
Raises:
ValueError: if `dtype` is not convertible to float.
ValueError: if `categorical_column` is not a MultiHashedCategoricalColumn.
"""
if (dtype is None) or not (dtype.is_integer or dtype.is_floating):
raise ValueError('dtype {} is not convertible to float.'.format(dtype))
if not isinstance(categorical_column, MultiHashedCategoricalColumn):
raise ValueError(
'In weighted_categorical_column_with_multi_hash_bucket, '
'categorical_column must be type '
'categorical_column_with_multi_hash_bucket. '
'Given (type {}): {}'.format(type(categorical_column),
categorical_column))
return WeightedMultiHashedCategoricalColumn(
categorical_column=categorical_column,
weight_feature_key=weight_feature_key,
dtype=dtype)
@tf_export('feature_column.categorical_column_with_embedding')
def categorical_column_with_embedding(key,
dtype=dtypes.string,
partition_num=None,
ev_option=variables.EmbeddingVariableOption()
):
return EmbeddingCategoricalColumn(key, dtype, partition_num, ev_option)
@tf_export('feature_column.categorical_column_with_adaptive_embedding')
def categorical_column_with_adaptive_embedding(key,
hash_bucket_size,
dtype=dtypes.string,
partition_num=None,
ev_option=variables.EmbeddingVariableOption()
):
return AdaptiveEmbeddingCategoricalColumn(key,
hash_bucket_size,
dtype,
partition_num,
ev_option)
@tf_export('feature_column.categorical_column_with_hash_bucket')
def categorical_column_with_hash_bucket(key,
hash_bucket_size,
dtype=dtypes.string):
"""Represents sparse feature where ids are set by hashing.
Use this when your sparse features are in string or integer format, and you
want to distribute your inputs into a finite number of buckets by hashing.
output_id = Hash(input_feature_string) % bucket_size for string type input.
For int type input, the value is converted to its string representation first
and then hashed by the same formula.
For input dictionary `features`, `features[key]` is either `Tensor` or
`SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int
and `''` for string, which will be dropped by this feature column.
Example:
```python
keywords = categorical_column_with_hash_bucket("keywords", 10K)
columns = [keywords, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
# or
keywords_embedded = embedding_column(keywords, 16)
columns = [keywords_embedded, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the
column name and the dictionary key for feature parsing configs, feature
`Tensor` objects, and feature columns.
hash_bucket_size: An int > 1. The number of buckets.
dtype: The type of features. Only string and integer types are supported.
Returns:
A `HashedCategoricalColumn`.
Raises:
ValueError: `hash_bucket_size` is not greater than 1.
ValueError: `dtype` is neither string nor integer.
"""
if hash_bucket_size is None:
raise ValueError('hash_bucket_size must be set. ' 'key: {}'.format(key))
if hash_bucket_size < 1:
raise ValueError('hash_bucket_size must be at least 1. '
'hash_bucket_size: {}, key: {}'.format(
hash_bucket_size, key))
fc_utils.assert_key_is_string(key)
fc_utils.assert_string_or_int(dtype, prefix='column_name: {}'.format(key))
return HashedCategoricalColumn(key, hash_bucket_size, dtype)
@tf_export(v1=['feature_column.categorical_column_with_vocabulary_file'])
def categorical_column_with_vocabulary_file(key,
vocabulary_file,
vocabulary_size=None,
num_oov_buckets=0,
default_value=None,
dtype=dtypes.string):
"""A `CategoricalColumn` with a vocabulary file.
Use this when your inputs are in string or integer format, and you have a
vocabulary file that maps each value to an integer ID. By default,
out-of-vocabulary values are ignored. Use either (but not both) of
`num_oov_buckets` and `default_value` to specify how to include
out-of-vocabulary values.
For input dictionary `features`, `features[key]` is either `Tensor` or
`SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int
and `''` for string, which will be dropped by this feature column.
Example with `num_oov_buckets`:
File '/us/states.txt' contains 50 lines, each with a 2-character U.S. state
abbreviation. All inputs with values in that file are assigned an ID 0-49,
corresponding to its line number. All other values are hashed and assigned an
ID 50-54.
```python
states = categorical_column_with_vocabulary_file(
key='states', vocabulary_file='/us/states.txt', vocabulary_size=50,
num_oov_buckets=5)
columns = [states, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
Example with `default_value`:
File '/us/states.txt' contains 51 lines - the first line is 'XX', and the
other 50 each have a 2-character U.S. state abbreviation. Both a literal 'XX'
in input, and other values missing from the file, will be assigned ID 0. All
others are assigned the corresponding line number 1-50.
```python
states = categorical_column_with_vocabulary_file(
key='states', vocabulary_file='/us/states.txt', vocabulary_size=51,
default_value=0)
columns = [states, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
And to make an embedding with either:
```python
columns = [embedding_column(states, 3),...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the
column name and the dictionary key for feature parsing configs, feature
`Tensor` objects, and feature columns.
vocabulary_file: The vocabulary file name.
vocabulary_size: Number of the elements in the vocabulary. This must be no
greater than length of `vocabulary_file`, if less than length, later
values are ignored. If None, it is set to the length of `vocabulary_file`.
num_oov_buckets: Non-negative integer, the number of out-of-vocabulary
buckets. All out-of-vocabulary inputs will be assigned IDs in the range
`[vocabulary_size, vocabulary_size+num_oov_buckets)` based on a hash of
the input value. A positive `num_oov_buckets` can not be specified with
`default_value`.
default_value: The integer ID value to return for out-of-vocabulary feature
values, defaults to `-1`. This can not be specified with a positive
`num_oov_buckets`.
dtype: The type of features. Only string and integer types are supported.
Returns:
A `CategoricalColumn` with a vocabulary file.
Raises:
ValueError: `vocabulary_file` is missing or cannot be opened.
ValueError: `vocabulary_size` is missing or < 1.
ValueError: `num_oov_buckets` is a negative integer.
ValueError: `num_oov_buckets` and `default_value` are both specified.
ValueError: `dtype` is neither string nor integer.
"""
return categorical_column_with_vocabulary_file_v2(
key, vocabulary_file, vocabulary_size,
dtype, default_value,
num_oov_buckets)
@tf_export('feature_column.categorical_column_with_vocabulary_file', v1=[])
def categorical_column_with_vocabulary_file_v2(key,
vocabulary_file,
vocabulary_size=None,
dtype=dtypes.string,
default_value=None,
num_oov_buckets=0):
"""A `CategoricalColumn` with a vocabulary file.
Use this when your inputs are in string or integer format, and you have a
vocabulary file that maps each value to an integer ID. By default,
out-of-vocabulary values are ignored. Use either (but not both) of
`num_oov_buckets` and `default_value` to specify how to include
out-of-vocabulary values.
For input dictionary `features`, `features[key]` is either `Tensor` or
`SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int
and `''` for string, which will be dropped by this feature column.
Example with `num_oov_buckets`:
File `'/us/states.txt'` contains 50 lines, each with a 2-character U.S. state
abbreviation. All inputs with values in that file are assigned an ID 0-49,
corresponding to its line number. All other values are hashed and assigned an
ID 50-54.
```python
states = categorical_column_with_vocabulary_file(
key='states', vocabulary_file='/us/states.txt', vocabulary_size=50,
num_oov_buckets=5)
columns = [states, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
Example with `default_value`:
File `'/us/states.txt'` contains 51 lines - the first line is `'XX'`, and the
other 50 each have a 2-character U.S. state abbreviation. Both a literal
`'XX'` in input, and other values missing from the file, will be assigned
ID 0. All others are assigned the corresponding line number 1-50.
```python
states = categorical_column_with_vocabulary_file(
key='states', vocabulary_file='/us/states.txt', vocabulary_size=51,
default_value=0)
columns = [states, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
And to make an embedding with either:
```python
columns = [embedding_column(states, 3),...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the
column name and the dictionary key for feature parsing configs, feature
`Tensor` objects, and feature columns.
vocabulary_file: The vocabulary file name.
vocabulary_size: Number of the elements in the vocabulary. This must be no
greater than length of `vocabulary_file`, if less than length, later
values are ignored. If None, it is set to the length of `vocabulary_file`.
dtype: The type of features. Only string and integer types are supported.
default_value: The integer ID value to return for out-of-vocabulary feature
values, defaults to `-1`. This can not be specified with a positive
`num_oov_buckets`.
num_oov_buckets: Non-negative integer, the number of out-of-vocabulary
buckets. All out-of-vocabulary inputs will be assigned IDs in the range
`[vocabulary_size, vocabulary_size+num_oov_buckets)` based on a hash of
the input value. A positive `num_oov_buckets` can not be specified with
`default_value`.
Returns:
A `CategoricalColumn` with a vocabulary file.
Raises:
ValueError: `vocabulary_file` is missing or cannot be opened.
ValueError: `vocabulary_size` is missing or < 1.
ValueError: `num_oov_buckets` is a negative integer.
ValueError: `num_oov_buckets` and `default_value` are both specified.
ValueError: `dtype` is neither string nor integer.
"""
if not vocabulary_file:
raise ValueError('Missing vocabulary_file in {}.'.format(key))
if vocabulary_size is None:
if not gfile.Exists(vocabulary_file):
raise ValueError('vocabulary_file in {} does not exist.'.format(key))
with gfile.GFile(vocabulary_file) as f:
vocabulary_size = sum(1 for _ in f)
logging.info(
'vocabulary_size = %d in %s is inferred from the number of elements '
'in the vocabulary_file %s.', vocabulary_size, key, vocabulary_file)
# `vocabulary_size` isn't required for lookup, but it is for `_num_buckets`.
if vocabulary_size < 1:
raise ValueError('Invalid vocabulary_size in {}.'.format(key))
if num_oov_buckets:
if default_value is not None:
raise ValueError(
'Can\'t specify both num_oov_buckets and default_value in {}.'.format(
key))
if num_oov_buckets < 0:
raise ValueError('Invalid num_oov_buckets {} in {}.'.format(
num_oov_buckets, key))
fc_utils.assert_string_or_int(dtype, prefix='column_name: {}'.format(key))
fc_utils.assert_key_is_string(key)
return VocabularyFileCategoricalColumn(
key=key,
vocabulary_file=vocabulary_file,
vocabulary_size=vocabulary_size,
num_oov_buckets=0 if num_oov_buckets is None else num_oov_buckets,
default_value=-1 if default_value is None else default_value,
dtype=dtype)
@tf_export('feature_column.categorical_column_with_vocabulary_list')
def categorical_column_with_vocabulary_list(key,
vocabulary_list,
dtype=None,
default_value=-1,
num_oov_buckets=0):
"""A `CategoricalColumn` with in-memory vocabulary.
Use this when your inputs are in string or integer format, and you have an
in-memory vocabulary mapping each value to an integer ID. By default,
out-of-vocabulary values are ignored. Use either (but not both) of
`num_oov_buckets` and `default_value` to specify how to include
out-of-vocabulary values.
For input dictionary `features`, `features[key]` is either `Tensor` or
`SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int
and `''` for string, which will be dropped by this feature column.
Example with `num_oov_buckets`:
In the following example, each input in `vocabulary_list` is assigned an ID
0-3 corresponding to its index (e.g., input 'B' produces output 2). All other
inputs are hashed and assigned an ID 4-5.
```python
colors = categorical_column_with_vocabulary_list(
key='colors', vocabulary_list=('R', 'G', 'B', 'Y'),
num_oov_buckets=2)
columns = [colors, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
Example with `default_value`:
In the following example, each input in `vocabulary_list` is assigned an ID
0-4 corresponding to its index (e.g., input 'B' produces output 3). All other
inputs are assigned `default_value` 0.
```python
colors = categorical_column_with_vocabulary_list(
key='colors', vocabulary_list=('X', 'R', 'G', 'B', 'Y'), default_value=0)
columns = [colors, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
And to make an embedding with either:
```python
columns = [embedding_column(colors, 3),...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the column
name and the dictionary key for feature parsing configs, feature `Tensor`
objects, and feature columns.
vocabulary_list: An ordered iterable defining the vocabulary. Each feature
is mapped to the index of its value (if present) in `vocabulary_list`.
Must be castable to `dtype`.
dtype: The type of features. Only string and integer types are supported. If
`None`, it will be inferred from `vocabulary_list`.
default_value: The integer ID value to return for out-of-vocabulary feature
values, defaults to `-1`. This can not be specified with a positive
`num_oov_buckets`.
num_oov_buckets: Non-negative integer, the number of out-of-vocabulary
buckets. All out-of-vocabulary inputs will be assigned IDs in the range
`[len(vocabulary_list), len(vocabulary_list)+num_oov_buckets)` based on a
hash of the input value. A positive `num_oov_buckets` can not be specified
with `default_value`.
Returns:
A `CategoricalColumn` with in-memory vocabulary.
Raises:
ValueError: if `vocabulary_list` is empty, or contains duplicate keys.
ValueError: `num_oov_buckets` is a negative integer.
ValueError: `num_oov_buckets` and `default_value` are both specified.
ValueError: if `dtype` is not integer or string.
"""
if (vocabulary_list is None) or (len(vocabulary_list) < 1):
raise ValueError(
'vocabulary_list {} must be non-empty, column_name: {}'.format(
vocabulary_list, key))
if len(set(vocabulary_list)) != len(vocabulary_list):
raise ValueError(
'Duplicate keys in vocabulary_list {}, column_name: {}'.format(
vocabulary_list, key))
vocabulary_dtype = dtypes.as_dtype(np.array(vocabulary_list).dtype)
if num_oov_buckets:
if default_value != -1:
raise ValueError(
'Can\'t specify both num_oov_buckets and default_value in {}.'.format(
key))
if num_oov_buckets < 0:
raise ValueError('Invalid num_oov_buckets {} in {}.'.format(
num_oov_buckets, key))
fc_utils.assert_string_or_int(
vocabulary_dtype, prefix='column_name: {} vocabulary'.format(key))
if dtype is None:
dtype = vocabulary_dtype
elif dtype.is_integer != vocabulary_dtype.is_integer:
raise ValueError(
'dtype {} and vocabulary dtype {} do not match, column_name: {}'.format(
dtype, vocabulary_dtype, key))
fc_utils.assert_string_or_int(dtype, prefix='column_name: {}'.format(key))
fc_utils.assert_key_is_string(key)
return VocabularyListCategoricalColumn(
key=key,
vocabulary_list=tuple(vocabulary_list),
dtype=dtype,
default_value=default_value,
num_oov_buckets=num_oov_buckets)
@tf_export('feature_column.categorical_column_with_identity')
def categorical_column_with_identity(key, num_buckets, default_value=None):
"""A `CategoricalColumn` that returns identity values.
Use this when your inputs are integers in the range `[0, num_buckets)`, and
you want to use the input value itself as the categorical ID. Values outside
this range will result in `default_value` if specified, otherwise it will
fail.
Typically, this is used for contiguous ranges of integer indexes, but
it doesn't have to be. This might be inefficient, however, if many of IDs
are unused. Consider `categorical_column_with_hash_bucket` in that case.
For input dictionary `features`, `features[key]` is either `Tensor` or
`SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int
and `''` for string, which will be dropped by this feature column.
In the following examples, each input in the range `[0, 1000000)` is assigned
the same value. All other inputs are assigned `default_value` 0. Note that a
literal 0 in inputs will result in the same default ID.
Linear model:
```python
video_id = categorical_column_with_identity(
key='video_id', num_buckets=1000000, default_value=0)
columns = [video_id, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
Embedding for a DNN model:
```python
columns = [embedding_column(video_id, 9),...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the
column name and the dictionary key for feature parsing configs, feature
`Tensor` objects, and feature columns.
num_buckets: Range of inputs and outputs is `[0, num_buckets)`.
default_value: If `None`, this column's graph operations will fail for
out-of-range inputs. Otherwise, this value must be in the range
`[0, num_buckets)`, and will replace inputs in that range.
Returns:
A `CategoricalColumn` that returns identity values.
Raises:
ValueError: if `num_buckets` is less than one.
ValueError: if `default_value` is not in range `[0, num_buckets)`.
"""
if num_buckets < 1:
raise ValueError(
'num_buckets {} < 1, column_name {}'.format(num_buckets, key))
if (default_value is not None) and (
(default_value < 0) or (default_value >= num_buckets)):
raise ValueError(
'default_value {} not in range [0, {}), column_name {}'.format(
default_value, num_buckets, key))
fc_utils.assert_key_is_string(key)
return IdentityCategoricalColumn(
key=key, number_buckets=num_buckets, default_value=default_value)
@tf_export('feature_column.indicator_column')
def indicator_column(categorical_column):
"""Represents multi-hot representation of given categorical column.
- For DNN model, `indicator_column` can be used to wrap any
`categorical_column_*` (e.g., to feed to DNN). Consider to Use
`embedding_column` if the number of buckets/unique(values) are large.
- For Wide (aka linear) model, `indicator_column` is the internal
representation for categorical column when passing categorical column
directly (as any element in feature_columns) to `linear_model`. See
`linear_model` for details.
```python
name = indicator_column(categorical_column_with_vocabulary_list(
'name', ['bob', 'george', 'wanda'])
columns = [name, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
dense_tensor == [[1, 0, 0]] # If "name" bytes_list is ["bob"]
dense_tensor == [[1, 0, 1]] # If "name" bytes_list is ["bob", "wanda"]
dense_tensor == [[2, 0, 0]] # If "name" bytes_list is ["bob", "bob"]
```
Args:
categorical_column: A `CategoricalColumn` which is created by
`categorical_column_with_*` or `crossed_column` functions.
Returns:
An `IndicatorColumn`.
"""
return IndicatorColumn(categorical_column)
@tf_export('feature_column.weighted_categorical_column')
def weighted_categorical_column(categorical_column,
weight_feature_key,
dtype=dtypes.float32):
"""Applies weight values to a `CategoricalColumn`.
Use this when each of your sparse inputs has both an ID and a value. For
example, if you're representing text documents as a collection of word
frequencies, you can provide 2 parallel sparse input features ('terms' and
'frequencies' below).
Example:
Input `tf.Example` objects:
```proto
[
features {
feature {
key: "terms"
value {bytes_list {value: "very" value: "model"}}
}
feature {
key: "frequencies"
value {float_list {value: 0.3 value: 0.1}}
}
},
features {
feature {
key: "terms"
value {bytes_list {value: "when" value: "course" value: "human"}}
}
feature {
key: "frequencies"
value {float_list {value: 0.4 value: 0.1 value: 0.2}}
}
}
]
```
```python
categorical_column = categorical_column_with_hash_bucket(
column_name='terms', hash_bucket_size=1000)
weighted_column = weighted_categorical_column(
categorical_column=categorical_column, weight_feature_key='frequencies')
columns = [weighted_column, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
This assumes the input dictionary contains a `SparseTensor` for key
'terms', and a `SparseTensor` for key 'frequencies'. These 2 tensors must have
the same indices and dense shape.
Args:
categorical_column: A `CategoricalColumn` created by
`categorical_column_with_*` functions.
weight_feature_key: String key for weight values.
dtype: Type of weights, such as `tf.float32`. Only float and integer weights
are supported.
Returns:
A `CategoricalColumn` composed of two sparse features: one represents id,
the other represents weight (value) of the id feature in that example.
Raises:
ValueError: if `dtype` is not convertible to float.
"""
if (dtype is None) or not (dtype.is_integer or dtype.is_floating):
raise ValueError('dtype {} is not convertible to float.'.format(dtype))
return WeightedCategoricalColumn(
categorical_column=categorical_column,
weight_feature_key=weight_feature_key,
dtype=dtype)
@tf_export('feature_column.crossed_column')
def crossed_column(keys, hash_bucket_size, hash_key=None):
"""Returns a column for performing crosses of categorical features.
Crossed features will be hashed according to `hash_bucket_size`. Conceptually,
the transformation can be thought of as:
Hash(cartesian product of features) % `hash_bucket_size`
For example, if the input features are:
* SparseTensor referred by first key:
```python
shape = [2, 2]
{
[0, 0]: "a"
[1, 0]: "b"
[1, 1]: "c"
}
```
* SparseTensor referred by second key:
```python
shape = [2, 1]
{
[0, 0]: "d"
[1, 0]: "e"
}
```
then crossed feature will look like:
```python
shape = [2, 2]
{
[0, 0]: Hash64("d", Hash64("a")) % hash_bucket_size
[1, 0]: Hash64("e", Hash64("b")) % hash_bucket_size
[1, 1]: Hash64("e", Hash64("c")) % hash_bucket_size
}
```
Here is an example to create a linear model with crosses of string features:
```python
keywords_x_doc_terms = crossed_column(['keywords', 'doc_terms'], 50K)
columns = [keywords_x_doc_terms, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
You could also use vocabulary lookup before crossing:
```python
keywords = categorical_column_with_vocabulary_file(
'keywords', '/path/to/vocabulary/file', vocabulary_size=1K)
keywords_x_doc_terms = crossed_column([keywords, 'doc_terms'], 50K)
columns = [keywords_x_doc_terms, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
If an input feature is of numeric type, you can use
`categorical_column_with_identity`, or `bucketized_column`, as in the example:
```python
# vertical_id is an integer categorical feature.
vertical_id = categorical_column_with_identity('vertical_id', 10K)
price = numeric_column('price')
# bucketized_column converts numerical feature to a categorical one.
bucketized_price = bucketized_column(price, boundaries=[...])
vertical_id_x_price = crossed_column([vertical_id, bucketized_price], 50K)
columns = [vertical_id_x_price, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
To use crossed column in DNN model, you need to add it in an embedding column
as in this example:
```python
vertical_id_x_price = crossed_column([vertical_id, bucketized_price], 50K)
vertical_id_x_price_embedded = embedding_column(vertical_id_x_price, 10)
dense_tensor = input_layer(features, [vertical_id_x_price_embedded, ...])
```
Args:
keys: An iterable identifying the features to be crossed. Each element can
be either:
* string: Will use the corresponding feature which must be of string type.
* `CategoricalColumn`: Will use the transformed tensor produced by this
column. Does not support hashed categorical column.
hash_bucket_size: An int > 1. The number of buckets.
hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
function to combine the crosses fingerprints on SparseCrossOp (optional).
Returns:
A `CrossedColumn`.
Raises:
ValueError: If `len(keys) < 2`.
ValueError: If any of the keys is neither a string nor `CategoricalColumn`.
ValueError: If any of the keys is `HashedCategoricalColumn`.
ValueError: If `hash_bucket_size < 1`.
"""
if not hash_bucket_size or hash_bucket_size < 1:
raise ValueError('hash_bucket_size must be > 1. '
'hash_bucket_size: {}'.format(hash_bucket_size))
if not keys or len(keys) < 2:
raise ValueError(
'keys must be a list with length > 1. Given: {}'.format(keys))
for key in keys:
if (not isinstance(key, six.string_types) and
not isinstance(key, (CategoricalColumn, fc_old._CategoricalColumn))): # pylint: disable=protected-access
raise ValueError(
'Unsupported key type. All keys must be either string, or '
'categorical column except HashedCategoricalColumn. '
'Given: {}'.format(key))
if isinstance(key,
(HashedCategoricalColumn, fc_old._HashedCategoricalColumn)): # pylint: disable=protected-access
raise ValueError(
'categorical_column_with_hash_bucket is not supported for crossing. '
'Hashing before crossing will increase probability of collision. '
'Instead, use the feature name as a string. Given: {}'.format(key))
return CrossedColumn(
keys=tuple(keys), hash_bucket_size=hash_bucket_size, hash_key=hash_key)
@six.add_metaclass(abc.ABCMeta)
class FeatureColumn(object):
"""Represents a feature column abstraction.
WARNING: Do not subclass this layer unless you know what you are doing:
the API is subject to future changes.
To distinguish between the concept of a feature family and a specific binary
feature within a family, we refer to a feature family like "country" as a
feature column. For example, we can have a feature in a `tf.Example` format:
{key: "country", value: [ "US" ]}
In this example the value of feature is "US" and "country" refers to the
column of the feature.
This class is an abstract class. Users should not create instances of this.
"""
@abc.abstractproperty
def name(self):
"""Returns string. Used for naming."""
pass
@property
def var_scope_name(self):
return self.name
@property
def embedding_name(self):
return self.name
def __lt__(self, other):
"""Allows feature columns to be sorted in Python 3 as they are in Python 2.
Feature columns need to occasionally be sortable, for example when used as
keys in a features dictionary passed to a layer.
In CPython, `__lt__` must be defined for all objects in the
sequence being sorted.
If any objects in teh sequence being sorted do not have an `__lt__` method
compatible with feature column objects (such as strings), then CPython will
fall back to using the `__gt__` method below.
https://docs.python.org/3/library/stdtypes.html#list.sort
Args:
other: The other object to compare to.
Returns:
True if the string representation of this object is lexicographically less
than the string representation of `other`. For FeatureColumn objects,
this looks like "<__main__.FeatureColumn object at 0xa>".
"""
return str(self) < str(other)
def __gt__(self, other):
"""Allows feature columns to be sorted in Python 3 as they are in Python 2.
Feature columns need to occasionally be sortable, for example when used as
keys in a features dictionary passed to a layer.
`__gt__` is called when the "other" object being compared during the sort
does not have `__lt__` defined.
Example: http://gpaste/4803354716798976
Args:
other: The other object to compare to.
Returns:
True if the string representation of this object is lexicographically
greater than the string representation of `other`. For FeatureColumn
objects, this looks like "<__main__.FeatureColumn object at 0xa>".
"""
return str(self) > str(other)
@abc.abstractmethod
def transform_feature(self, transformation_cache, state_manager):
"""Returns intermediate representation (usually a `Tensor`).
Uses `transformation_cache` to create an intermediate representation
(usually a `Tensor`) that other feature columns can use.
Example usage of `transformation_cache`:
Let's say a Feature column depends on raw feature ('raw') and another
`FeatureColumn` (input_fc). To access corresponding `Tensor`s,
transformation_cache will be used as follows:
```python
raw_tensor = transformation_cache.get('raw', state_manager)
fc_tensor = transformation_cache.get(input_fc, state_manager)
```
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
Returns:
Transformed feature `Tensor`.
"""
pass
@abc.abstractproperty
def parse_example_spec(self):
"""Returns a `tf.Example` parsing spec as dict.
It is used for get_parsing_spec for `tf.io.parse_example`. Returned spec is
a dict from keys ('string') to `VarLenFeature`, `FixedLenFeature`, and other
supported objects. Please check documentation of `tf.io.parse_example` for
all supported spec objects.
Let's say a Feature column depends on raw feature ('raw') and another
`FeatureColumn` (input_fc). One possible implementation of
parse_example_spec is as follows:
```python
spec = {'raw': tf.io.FixedLenFeature(...)}
spec.update(input_fc.parse_example_spec)
return spec
```
"""
pass
def create_state(self, state_manager):
"""Uses the `state_manager` to create state for the FeatureColumn.
Args:
state_manager: A `StateManager` to create / access resources such as
lookup tables and variables.
"""
pass
@abc.abstractproperty
def _is_v2_column(self):
"""Returns whether this FeatureColumn is fully conformant to the new API.
This is needed for composition type cases where an EmbeddingColumn etc.
might take in old categorical columns as input and then we want to use the
old API.
"""
pass
@abc.abstractproperty
def parents(self):
"""Returns a list of immediate raw feature and FeatureColumn dependencies.
For example:
# For the following feature columns
a = numeric_column('f1')
c = crossed_column(a, 'f2')
# The expected parents are:
a.parents = ['f1']
c.parents = [a, 'f2']
"""
pass
@abc.abstractmethod
def _get_config(self):
"""Returns the config of the feature column.
A FeatureColumn config is a Python dictionary (serializable) containing the
configuration of a FeatureColumn. The same FeatureColumn can be
reinstantiated later from this configuration.
The config of a feature column does not include information about feature
columns depending on it nor the FeatureColumn class name.
Example with (de)serialization practices followed in this file:
```python
class SerializationExampleFeatureColumn(
FeatureColumn, collections.namedtuple(
'SerializationExampleFeatureColumn',
('dimension', 'parent', 'dtype', 'normalizer_fn'))):
def _get_config(self):
# Create a dict from the namedtuple.
# Python attribute literals can be directly copied from / to the config.
# For example 'dimension', assuming it is an integer literal.
config = dict(zip(self._fields, self))
# (De)serialization of parent FeatureColumns should use the provided
# (de)serialize_feature_column() methods that take care of de-duping.
config['parent'] = serialize_feature_column(self.parent)
# Many objects provide custom (de)serialization e.g: for tf.DType
# tf.DType.name, tf.as_dtype() can be used.
config['dtype'] = self.dtype.name
# Non-trivial dependencies should be Keras-(de)serializable.
config['normalizer_fn'] = generic_utils.serialize_keras_object(
self.normalizer_fn)
return config
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
# This should do the inverse transform from `_get_config` and construct
# the namedtuple.
kwargs = config.copy()
kwargs['parent'] = deserialize_feature_column(
config['parent'], custom_objects, columns_by_name)
kwargs['dtype'] = dtypes.as_dtype(config['dtype'])
kwargs['normalizer_fn'] = generic_utils.deserialize_keras_object(
config['normalizer_fn'], custom_objects=custom_objects)
return cls(**kwargs)
```
Returns:
A serializable Dict that can be used to deserialize the object with
from_config.
"""
pass
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
"""Creates a FeatureColumn from its config.
This method should be the reverse of `_get_config`, capable of instantiating
the same FeatureColumn from the config dictionary. See `_get_config` for an
example of common (de)serialization practices followed in this file.
TODO(b/118939620): This is a private method until consensus is reached on
supporting object deserialization deduping within Keras.
Args:
config: A Dict config acquired with `_get_config`.
custom_objects: Optional dictionary mapping names (strings) to custom
classes or functions to be considered during deserialization.
columns_by_name: A Dict[String, FeatureColumn] of existing columns in
order to avoid duplication. Should be passed to any calls to
deserialize_feature_column().
Returns:
A FeatureColumn for the input config.
"""
pass
class DenseColumn(FeatureColumn):
"""Represents a column which can be represented as `Tensor`.
Some examples of this type are: numeric_column, embedding_column,
indicator_column.
"""
@abc.abstractproperty
def variable_shape(self):
"""`TensorShape` of `get_dense_tensor`, without batch dimension."""
pass
@abc.abstractmethod
def get_dense_tensor(self, transformation_cache, state_manager):
"""Returns a `Tensor`.
The output of this function will be used by model-builder-functions. For
example the pseudo code of `input_layer` will be like:
```python
def input_layer(features, feature_columns, ...):
outputs = [fc.get_dense_tensor(...) for fc in feature_columns]
return tf.concat(outputs)
```
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
Returns:
`Tensor` of shape [batch_size] + `variable_shape`.
"""
pass
def output_shape(self, inputs):
"""Tuple of column output shape"""
batch_size = array_ops.shape(inputs)[0]
num_elements = self.variable_shape.num_elements()
return (batch_size, num_elements)
def is_feature_column_v2(feature_columns):
"""Returns True if all feature columns are V2."""
for feature_column in feature_columns:
if not isinstance(feature_column, FeatureColumn):
return False
if not feature_column._is_v2_column: # pylint: disable=protected-access
return False
return True
def _create_weighted_sum(column, transformation_cache, state_manager,
sparse_combiner, weight_var):
"""Creates a weighted sum for a dense/categorical column for linear_model."""
if isinstance(column, CategoricalColumn):
return _create_categorical_column_weighted_sum(
column=column,
transformation_cache=transformation_cache,
state_manager=state_manager,
sparse_combiner=sparse_combiner,
weight_var=weight_var)
else:
return _create_dense_column_weighted_sum(
column=column,
transformation_cache=transformation_cache,
state_manager=state_manager,
weight_var=weight_var)
def _create_dense_column_weighted_sum(column, transformation_cache,
state_manager, weight_var):
"""Create a weighted sum of a dense column for linear_model."""
tensor = column.get_dense_tensor(transformation_cache, state_manager)
num_elements = column.variable_shape.num_elements()
batch_size = array_ops.shape(tensor)[0]
tensor = array_ops.reshape(tensor, shape=(batch_size, num_elements))
return math_ops.matmul(tensor, weight_var, name='weighted_sum')
class CategoricalColumn(FeatureColumn):
"""Represents a categorical feature.
A categorical feature typically handled with a `tf.SparseTensor` of IDs.
"""
IdWeightPair = collections.namedtuple( # pylint: disable=invalid-name
'IdWeightPair', ('id_tensor', 'weight_tensor'))
@abc.abstractproperty
def num_buckets(self):
"""Returns number of buckets in this sparse feature."""
pass
@abc.abstractmethod
def get_sparse_tensors(self, transformation_cache, state_manager):
"""Returns an IdWeightPair.
`IdWeightPair` is a pair of `SparseTensor`s which represents ids and
weights.
`IdWeightPair.id_tensor` is typically a `batch_size` x `num_buckets`
`SparseTensor` of `int64`. `IdWeightPair.weight_tensor` is either a
`SparseTensor` of `float` or `None` to indicate all weights should be
taken to be 1. If specified, `weight_tensor` must have exactly the same
shape and indices as `sp_ids`. Expected `SparseTensor` is same as parsing
output of a `VarLenFeature` which is a ragged matrix.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
"""
pass
def _create_categorical_column_weighted_sum(
column, transformation_cache, state_manager, sparse_combiner, weight_var):
# pylint: disable=g-doc-return-or-yield,g-doc-args
"""Create a weighted sum of a categorical column for linear_model.
Note to maintainer: As implementation details, the weighted sum is
implemented via embedding_lookup_sparse toward efficiency. Mathematically,
they are the same.
To be specific, conceptually, categorical column can be treated as multi-hot
vector. Say:
```python
x = [0 0 1] # categorical column input
w = [a b c] # weights
```
The weighted sum is `c` in this case, which is same as `w[2]`.
Another example is
```python
x = [0 1 1] # categorical column input
w = [a b c] # weights
```
The weighted sum is `b + c` in this case, which is same as `w[2] + w[3]`.
For both cases, we can implement weighted sum via embedding_lookup with
sparse_combiner = "sum".
"""
sparse_tensors = column.get_sparse_tensors(transformation_cache,
state_manager)
id_tensor = sparse_ops.sparse_reshape(sparse_tensors.id_tensor, [
array_ops.shape(sparse_tensors.id_tensor)[0], -1
])
weight_tensor = sparse_tensors.weight_tensor
if weight_tensor is not None:
weight_tensor = sparse_ops.sparse_reshape(
weight_tensor, [array_ops.shape(weight_tensor)[0], -1])
return embedding_ops.safe_embedding_lookup_sparse(
weight_var,
id_tensor,
sparse_weights=weight_tensor,
combiner=sparse_combiner,
name='weighted_sum')
class SequenceDenseColumn(FeatureColumn):
"""Represents dense sequence data."""
TensorSequenceLengthPair = collections.namedtuple( # pylint: disable=invalid-name
'TensorSequenceLengthPair', ('dense_tensor', 'sequence_length'))
@abc.abstractmethod
def get_sequence_dense_tensor(self, transformation_cache, state_manager):
"""Returns a `TensorSequenceLengthPair`.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
"""
pass
class FeatureTransformationCache(object):
"""Handles caching of transformations while building the model.
`FeatureColumn` specifies how to digest an input column to the network. Some
feature columns require data transformations. This class caches those
transformations.
Some features may be used in more than one place. For example, one can use a
bucketized feature by itself and a cross with it. In that case we
should create only one bucketization op instead of creating ops for each
feature column separately. To handle re-use of transformed columns,
`FeatureTransformationCache` caches all previously transformed columns.
Example:
We're trying to use the following `FeatureColumn`s:
```python
bucketized_age = fc.bucketized_column(fc.numeric_column("age"), ...)
keywords = fc.categorical_column_with_hash_buckets("keywords", ...)
age_X_keywords = fc.crossed_column([bucketized_age, "keywords"])
... = linear_model(features,
[bucketized_age, keywords, age_X_keywords]
```
If we transform each column independently, then we'll get duplication of
bucketization (one for cross, one for bucketization itself).
The `FeatureTransformationCache` eliminates this duplication.
"""
def __init__(self, features):
"""Creates a `FeatureTransformationCache`.
Args:
features: A mapping from feature column to objects that are `Tensor` or
`SparseTensor`, or can be converted to same via
`sparse_tensor.convert_to_tensor_or_sparse_tensor`. A `string` key
signifies a base feature (not-transformed). A `FeatureColumn` key
means that this `Tensor` is the output of an existing `FeatureColumn`
which can be reused.
"""
self._features = features.copy()
self._feature_tensors = {}
def set(self, key, value):
if key in self._feature_tensors:
self._feature_tensors[key] = value
elif key in self._features:
self._features[key] = value
else:
raise ValueError("LazyBUilder set error: Key name not appear "
"in Lazybuilder, key name: ", key)
def get_features(self):
feature_result = self._features.copy()
feature_result.update(self._feature_tensors)
return feature_result
def get(self, key, state_manager):
"""Returns a `Tensor` for the given key.
A `str` key is used to access a base feature (not-transformed). When a
`FeatureColumn` is passed, the transformed feature is returned if it
already exists, otherwise the given `FeatureColumn` is asked to provide its
transformed output, which is then cached.
Args:
key: a `str` or a `FeatureColumn`.
state_manager: A StateManager object that holds the FeatureColumn state.
Returns:
The transformed `Tensor` corresponding to the `key`.
Raises:
ValueError: if key is not found or a transformed `Tensor` cannot be
computed.
"""
if key in self._feature_tensors:
# FeatureColumn is already transformed or converted.
return self._feature_tensors[key]
if key in self._features:
feature_tensor = self._get_raw_feature_as_tensor(key)
self._feature_tensors[key] = feature_tensor
return feature_tensor
if isinstance(key, six.string_types):
raise ValueError('Feature {} is not in features dictionary.'.format(key))
if not isinstance(key, FeatureColumn):
raise TypeError('"key" must be either a "str" or "FeatureColumn". '
'Provided: {}'.format(key))
column = key
logging.debug('Transforming feature_column %s.', column)
transformed = column.transform_feature(self, state_manager)
if transformed is None:
raise ValueError('Column {} is not supported.'.format(column.name))
self._feature_tensors[column] = transformed
return transformed
def _get_raw_feature_as_tensor(self, key):
"""Gets the raw_feature (keyed by `key`) as `tensor`.
The raw feature is converted to (sparse) tensor and maybe expand dim.
For both `Tensor` and `SparseTensor`, the rank will be expanded (to 2) if
the rank is 1. This supports dynamic rank also. For rank 0 raw feature, will
error out as it is not supported.
Args:
key: A `str` key to access the raw feature.
Returns:
A `Tensor` or `SparseTensor`.
Raises:
ValueError: if the raw feature has rank 0.
"""
raw_feature = self._features[key]
feature_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(
raw_feature)
def expand_dims(input_tensor):
# Input_tensor must have rank 1.
if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
return sparse_ops.sparse_reshape(
input_tensor, [array_ops.shape(input_tensor)[0], 1])
else:
return array_ops.expand_dims(input_tensor, -1)
rank = feature_tensor.get_shape().ndims
if rank is not None:
if rank == 0:
raise ValueError(
'Feature (key: {}) cannot have rank 0. Given: {}'.format(
key, feature_tensor))
return feature_tensor if rank != 1 else expand_dims(feature_tensor)
# Handle dynamic rank.
with ops.control_dependencies([
check_ops.assert_positive(
array_ops.rank(feature_tensor),
message='Feature (key: {}) cannot have rank 0. Given: {}'.format(
key, feature_tensor))]):
return control_flow_ops.cond(
math_ops.equal(1, array_ops.rank(feature_tensor)),
lambda: expand_dims(feature_tensor),
lambda: feature_tensor)
# TODO(ptucker): Move to third_party/tensorflow/python/ops/sparse_ops.py
def _to_sparse_input_and_drop_ignore_values(input_tensor, ignore_value=None):
"""Converts a `Tensor` to a `SparseTensor`, dropping ignore_value cells.
If `input_tensor` is already a `SparseTensor`, just return it.
Args:
input_tensor: A string or integer `Tensor`.
ignore_value: Entries in `dense_tensor` equal to this value will be
absent from the resulting `SparseTensor`. If `None`, default value of
`dense_tensor`'s dtype will be used ('' for `str`, -1 for `int`).
Returns:
A `SparseTensor` with the same shape as `input_tensor`.
Raises:
ValueError: when `input_tensor`'s rank is `None`.
"""
input_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(
input_tensor)
if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
return input_tensor
with ops.name_scope(None, 'to_sparse_input', (input_tensor, ignore_value,)):
if ignore_value is None:
if input_tensor.dtype == dtypes.string:
# Exception due to TF strings are converted to numpy objects by default.
ignore_value = ''
elif input_tensor.dtype.is_integer:
ignore_value = -1 # -1 has a special meaning of missing feature
else:
# NOTE: `as_numpy_dtype` is a property, so with the parentheses this is
# constructing a new numpy object of the given type, which yields the
# default value for that type.
ignore_value = input_tensor.dtype.as_numpy_dtype()
ignore_value = math_ops.cast(
ignore_value, input_tensor.dtype, name='ignore_value')
indices = array_ops.where_v2(
math_ops.not_equal(input_tensor, ignore_value), name='indices')
return sparse_tensor_lib.SparseTensor(
indices=indices,
values=array_ops.gather_nd(input_tensor, indices, name='values'),
dense_shape=array_ops.shape(
input_tensor, out_type=dtypes.int64, name='dense_shape'))
def _normalize_feature_columns(feature_columns):
"""Normalizes the `feature_columns` input.
This method converts the `feature_columns` to list type as best as it can. In
addition, verifies the type and other parts of feature_columns, required by
downstream library.
Args:
feature_columns: The raw feature columns, usually passed by users.
Returns:
The normalized feature column list.
Raises:
ValueError: for any invalid inputs, such as empty, duplicated names, etc.
"""
if isinstance(feature_columns, FeatureColumn):
feature_columns = [feature_columns]
if isinstance(feature_columns, collections_abc.Iterator):
feature_columns = list(feature_columns)
if isinstance(feature_columns, dict):
raise ValueError('Expected feature_columns to be iterable, found dict.')
for column in feature_columns:
if not isinstance(column, FeatureColumn):
raise ValueError('Items of feature_columns must be a FeatureColumn. '
'Given (type {}): {}.'.format(type(column), column))
if not feature_columns:
raise ValueError('feature_columns must not be empty.')
name_to_column = {}
for column in feature_columns:
if column.name in name_to_column:
raise ValueError('Duplicate feature column name found for columns: {} '
'and {}. This usually means that these columns refer to '
'same base feature. Either one must be discarded or a '
'duplicated but renamed item must be inserted in '
'features dict.'.format(column,
name_to_column[column.name]))
name_to_column[column.name] = column
return sorted(feature_columns, key=lambda x: x.name)
class NumericColumn(
DenseColumn,
fc_old._DenseColumn, # pylint: disable=protected-access
collections.namedtuple(
'NumericColumn',
('key', 'shape', 'default_value', 'dtype', 'normalizer_fn'))):
"""see `numeric_column`."""
@property
def _is_v2_column(self):
return True
@property
def name(self):
"""See `FeatureColumn` base class."""
return self.key
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return {
self.key:
parsing_ops.FixedLenFeature(self.shape, self.dtype,
self.default_value)
}
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.parse_example_spec
def _transform_input_tensor(self, input_tensor):
if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
raise ValueError(
'The corresponding Tensor of numerical column must be a Tensor. '
'SparseTensor is not supported. key: {}'.format(self.key))
if self.normalizer_fn is not None:
input_tensor = self.normalizer_fn(input_tensor)
return math_ops.cast(input_tensor, dtypes.float32)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
input_tensor = inputs.get(self.key)
return self._transform_input_tensor(input_tensor)
def transform_feature(self, transformation_cache, state_manager):
"""See `FeatureColumn` base class.
In this case, we apply the `normalizer_fn` to the input tensor.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
Returns:
Normalized input tensor.
Raises:
ValueError: If a SparseTensor is passed in.
"""
input_tensor = transformation_cache.get(self.key, state_manager)
return self._transform_input_tensor(input_tensor)
@property
def variable_shape(self):
"""See `DenseColumn` base class."""
return tensor_shape.TensorShape(self.shape)
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _variable_shape(self):
return self.variable_shape
def get_dense_tensor(self, transformation_cache, state_manager):
"""Returns dense `Tensor` representing numeric feature.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
Returns:
Dense `Tensor` created within `transform_feature`.
"""
# Feature has been already transformed. Return the intermediate
# representation created by _transform_feature.
return transformation_cache.get(self, state_manager)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
del weight_collections
del trainable
return inputs.get(self)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.key]
def _get_config(self):
"""See 'FeatureColumn` base class."""
config = dict(zip(self._fields, self))
config['normalizer_fn'] = generic_utils.serialize_keras_object(
self.normalizer_fn)
config['dtype'] = self.dtype.name
return config
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['normalizer_fn'] = generic_utils.deserialize_keras_object(
config['normalizer_fn'], custom_objects=custom_objects)
kwargs['dtype'] = dtypes.as_dtype(config['dtype'])
return cls(**kwargs)
class SparseNumericColumn(
DenseColumn, CategoricalColumn,
fc_old._DenseColumn, # pylint: disable=protected-access
fc_old._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple(
'SparseNumericColumn',
('key', 'shape', 'dtype'))):
"""see `sparse_numeric_column`."""
@property
def _is_v2_column(self):
return True
@property
def name(self):
"""See `FeatureColumn` base class."""
return self.key
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return {
self.key:
parsing_ops.VarLenFeature(self.dtype)
}
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.parse_example_spec
def _transform_input_tensor(self, input_tensor):
if not isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
raise ValueError(
'The corresponding Tensor of sparse numerical column must be a SparseTensor. '
'Tensor is not supported. key: {}'.format(self.key))
fc_utils.assert_float_or_int(
input_tensor.dtype,
prefix='column_name: {} input_tensor'.format(self.key))
if self.dtype.is_integer != input_tensor.dtype.is_integer:
raise ValueError(
'Column dtype and SparseTensors dtype must be compatible. '
'key: {}, column dtype: {}, tensor dtype: {}'.format(
self.key, self.dtype, input_tensor.dtype))
return input_tensor
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
input_tensor = inputs.get(self.key)
return self._transform_input_tensor(input_tensor)
def transform_feature(self, transformation_cache, state_manager):
"""See `FeatureColumn` base class.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
Returns:
Input tensor.
Raises:
ValueError: If a Tensor is passed in.
"""
input_tensor = transformation_cache.get(self.key, state_manager)
return self._transform_input_tensor(input_tensor)
@property
def variable_shape(self):
"""See `DenseColumn` base class."""
if self.shape is not None:
return tensor_shape.TensorShape(self.shape)
else:
# not define shape
return None
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _variable_shape(self):
return self.variable_shape
def output_shape(self, inputs):
"""See `DenseColumn` base class."""
if self.shape is not None:
batch_size = array_ops.shape(inputs)[0]
num_elements = self.variable_shape.num_elements()
return (batch_size, num_elements)
else:
# keep origin sparse tensor's dense shape
return array_ops.shape(inputs)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _output_shape(self, inputs):
return self.output_shape(inputs)
def get_dense_tensor(self, transformation_cache, state_manager):
"""Returns dense `Tensor` representing sparse numeric feature.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
Returns:
Dense `Tensor` created within `transform_feature`.
"""
# Feature has been already transformed. Return the intermediate
# representation created by _transform_feature.
input_tensor = transformation_cache.get(self, state_manager)
return sparse_ops.sparse_tensor_to_dense(input_tensor, default_value=0)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
del weight_collections
del trainable
input_tensor = inputs.get(self)
return sparse_ops.sparse_tensor_to_dense(input_tensor, default_value=0)
@property
def num_buckets(self):
"""See `CategoricalColumn` base class."""
raise ValueError("sparse_numeric_column does not has attr `_num_buckets`"
" if you want to look up embedding with embedding column, "
"please use hashtable feature column.")
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.num_buckets
def get_sparse_tensors(self, transformation_cache, state_manager):
"""Returns `SparseTensor` representing sparse numeric feature."""
input_tensor = transformation_cache.get(self, state_manager)
return CategoricalColumn.IdWeightPair(input_tensor, None)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
"""Returns `SparseTensor` representing sparse numeric feature."""
del weight_collections
del trainable
return CategoricalColumn.IdWeightPair(inputs.get(self), None)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.key]
def _get_config(self):
"""See 'FeatureColumn` base class."""
config = dict(zip(self._fields, self))
config['dtype'] = self.dtype.name
return config
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['dtype'] = dtypes.as_dtype(config['dtype'])
return cls(**kwargs)
class BucketizedColumn(
DenseColumn,
CategoricalColumn,
fc_old._DenseColumn, # pylint: disable=protected-access
fc_old._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple('BucketizedColumn',
('source_column', 'boundaries'))):
"""See `bucketized_column`."""
@property
def _is_v2_column(self):
return (isinstance(self.source_column, FeatureColumn) and
self.source_column._is_v2_column) # pylint: disable=protected-access
@property
def name(self):
"""See `FeatureColumn` base class."""
return '{}_bucketized'.format(self.source_column.name)
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return self.source_column.parse_example_spec
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.source_column._parse_example_spec # pylint: disable=protected-access
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
"""Returns bucketized categorical `source_column` tensor."""
source_tensor = inputs.get(self.source_column)
return math_ops._bucketize( # pylint: disable=protected-access
source_tensor,
boundaries=self.boundaries)
def transform_feature(self, transformation_cache, state_manager):
"""Returns bucketized categorical `source_column` tensor."""
source_tensor = transformation_cache.get(self.source_column, state_manager)
return math_ops._bucketize( # pylint: disable=protected-access
source_tensor,
boundaries=self.boundaries)
@property
def variable_shape(self):
"""See `DenseColumn` base class."""
return tensor_shape.TensorShape(
tuple(self.source_column.shape) + (len(self.boundaries) + 1,))
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _variable_shape(self):
return self.variable_shape
def _get_dense_tensor_for_input_tensor(self, input_tensor):
return array_ops.one_hot(
indices=math_ops.cast(input_tensor, dtypes.int64),
depth=len(self.boundaries) + 1,
on_value=1.,
off_value=0.)
def get_dense_tensor(self, transformation_cache, state_manager):
"""Returns one hot encoded dense `Tensor`."""
input_tensor = transformation_cache.get(self, state_manager)
return self._get_dense_tensor_for_input_tensor(input_tensor)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
del weight_collections
del trainable
input_tensor = inputs.get(self)
return self._get_dense_tensor_for_input_tensor(input_tensor)
@property
def num_buckets(self):
"""See `CategoricalColumn` base class."""
# By construction, source_column is always one-dimensional.
return (len(self.boundaries) + 1) * self.source_column.shape[0]
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.num_buckets
def _get_sparse_tensors_for_input_tensor(self, input_tensor):
batch_size = array_ops.shape(input_tensor)[0]
# By construction, source_column is always one-dimensional.
source_dimension = self.source_column.shape[0]
i1 = array_ops.reshape(
array_ops.tile(
array_ops.expand_dims(math_ops.range(0, batch_size), 1),
[1, source_dimension]),
(-1,))
i2 = array_ops.tile(math_ops.range(0, source_dimension), [batch_size])
# Flatten the bucket indices and unique them across dimensions
# E.g. 2nd dimension indices will range from k to 2*k-1 with k buckets
bucket_indices = (
array_ops.reshape(input_tensor, (-1,)) +
(len(self.boundaries) + 1) * i2)
indices = math_ops.cast(
array_ops.transpose(array_ops.stack((i1, i2))), dtypes.int64)
dense_shape = math_ops.cast(
array_ops.stack([batch_size, source_dimension]), dtypes.int64)
sparse_tensor = sparse_tensor_lib.SparseTensor(
indices=indices,
values=bucket_indices,
dense_shape=dense_shape)
return CategoricalColumn.IdWeightPair(sparse_tensor, None)
def get_sparse_tensors(self, transformation_cache, state_manager):
"""Converts dense inputs to SparseTensor so downstream code can use it."""
input_tensor = transformation_cache.get(self, state_manager)
return self._get_sparse_tensors_for_input_tensor(input_tensor)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
"""Converts dense inputs to SparseTensor so downstream code can use it."""
del weight_collections
del trainable
input_tensor = inputs.get(self)
return self._get_sparse_tensors_for_input_tensor(input_tensor)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.source_column]
def _get_config(self):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import serialize_feature_column # pylint: disable=g-import-not-at-top
config = dict(zip(self._fields, self))
config['source_column'] = serialize_feature_column(self.source_column)
return config
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import deserialize_feature_column # pylint: disable=g-import-not-at-top
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['source_column'] = deserialize_feature_column(
config['source_column'], custom_objects, columns_by_name)
return cls(**kwargs)
class SparseBucketizedColumn(
DenseColumn,
CategoricalColumn,
fc_old._DenseColumn, # pylint: disable=protected-access
fc_old._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple('SparseBucketizedColumn',
('source_column', 'boundaries'))):
"""See `sparse_bucketized_column`."""
@property
def _is_v2_column(self):
return (isinstance(self.source_column, FeatureColumn) and
self.source_column._is_v2_column) # pylint: disable=protected-access
@property
def name(self):
"""See `FeatureColumn` base class."""
return '{}_sparse_bucketized'.format(self.source_column.name)
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return self.source_column.parse_example_spec
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.source_column._parse_example_spec # pylint: disable=protected-access
def _trasform_input_tensor(self, input_tensor):
input_tensor = _to_sparse_input_and_drop_ignore_values(input_tensor)
if not isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
raise ValueError('SparseBucketizeColumn input must be a SparseTensor.')
fc_utils.assert_float_or_int(
input_tensor.dtype,
prefix='column_name: {} input_tensor'.format(self.name))
return sparse_tensor_lib.SparseTensor(
input_tensor.indices,
math_ops._bucketize( # pylint: disable=protected-access
input_tensor.values,
boundaries=self.boundaries),
input_tensor.dense_shape)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
"""Returns bucketized categorical `source_column` tensor."""
source_tensor = inputs.get(self.source_column)
return self._trasform_input_tensor(source_tensor)
def transform_feature(self, transformation_cache, state_manager):
"""Returns bucketized categorical `source_column` tensor."""
source_tensor = transformation_cache.get(self.source_column, state_manager)
return self._trasform_input_tensor(source_tensor)
@property
def variable_shape(self):
"""See `DenseColumn` base class."""
if self.source_column.shape is not None:
return tensor_shape.TensorShape(
tuple(self.source_column.shape) + (len(self.boundaries) + 1,))
else:
return None
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _variable_shape(self):
return self.variable_shape
def output_shape(self, inputs):
"""See `DenseColumn` base class."""
if self.source_column.shape is not None:
batch_size = array_ops.shape(inputs)[0]
num_elements = self.variable_shape.num_elements()
return (batch_size, num_elements)
else:
# keep origin sparse tensor's dense shape
batch_size = array_ops.shape(inputs)[0]
num_elements = math_ops.reduce_prod(array_ops.shape(inputs)[1:])
return (batch_size, num_elements)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _output_shape(self, inputs):
return self.output_shape(inputs)
def _get_dense_tensor_for_input_tensor(self, input_tensor):
dense_tensor = sparse_ops.sparse_tensor_to_dense(input_tensor, default_value=0)
return array_ops.one_hot(
indices=math_ops.cast(dense_tensor, dtypes.int64),
depth=len(self.boundaries) + 1,
on_value=1.,
off_value=0.)
def get_dense_tensor(self, transformation_cache, state_manager):
"""Returns one hot encoded dense `Tensor`."""
input_tensor = transformation_cache.get(self, state_manager)
return self._get_dense_tensor_for_input_tensor(input_tensor)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
del weight_collections
del trainable
input_tensor = inputs.get(self)
return self._get_dense_tensor_for_input_tensor(input_tensor)
@property
def num_buckets(self):
"""See `CategoricalColumn` base class."""
# By construction, source_column is always one-dimensional.
return len(self.boundaries) + 1
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.num_buckets
def get_sparse_tensors(self, transformation_cache, state_manager):
"""Converts dense inputs to SparseTensor so downstream code can use it."""
input_tensor = transformation_cache.get(self, state_manager)
return CategoricalColumn.IdWeightPair(input_tensor, None)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
"""Converts dense inputs to SparseTensor so downstream code can use it."""
del weight_collections
del trainable
input_tensor = inputs.get(self)
return CategoricalColumn.IdWeightPair(input_tensor, None)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.source_column]
def _get_config(self):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import serialize_feature_column # pylint: disable=g-import-not-at-top
config = dict(zip(self._fields, self))
config['source_column'] = serialize_feature_column(self.source_column)
return config
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import deserialize_feature_column # pylint: disable=g-import-not-at-top
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['source_column'] = deserialize_feature_column(
config['source_column'], custom_objects, columns_by_name)
return cls(**kwargs)
class CutoffCategoricalColumn(
CategoricalColumn,
fc_old._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple('CutoffCategoricalColumn',
('categorical_column', 'cutoff_length',
'cutoff_side', 'cutoff_axis', 'reverse'))):
"""See `cutoff_categorical_column`."""
@property
def _is_v2_column(self):
return (isinstance(self.categorical_column, FeatureColumn) and
self.categorical_column._is_v2_column) # pylint: disable=protected-access
@property
def name(self):
"""See `FeatureColumn` base class."""
return '{}_cutoff'.format(self.categorical_column.name)
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return self.categorical_column.parse_example_spec
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.categorical_column._parse_example_spec # pylint: disable=protected-access
def _trasform_input_tensor(self, input_tensor):
if isinstance(input_tensor, (tuple, list)):
id_tensor = input_tensor[0]
weight_tensor = input_tensor[1]
else:
id_tensor = input_tensor
weight_tensor = None
id_tensor = sparse_ops.sparse_valid_cutoff(
id_tensor,
self.cutoff_axis,
self.cutoff_length,
self.cutoff_side,
self.reverse)
if weight_tensor is not None:
weight_tensor = sparse_ops.sparse_valid_cutoff(
weight_tensor,
self.cutoff_axis,
self.cutoff_length,
self.cutoff_side,
self.reverse)
return (id_tensor, weight_tensor)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
"""Returns tensor after cutoff."""
source_tensors = inputs.get(self.categorical_column)
return self._trasform_input_tensor(source_tensors)
def transform_feature(self, transformation_cache, state_manager):
"""Returns tensor after cutoff."""
source_tensors = transformation_cache.get(self.categorical_column, state_manager)
return self._trasform_input_tensor(source_tensors)
@property
def num_buckets(self):
"""See `CategoricalColumn` base class."""
return self.categorical_column.num_buckets
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.num_buckets
def get_sparse_tensors(self, transformation_cache, state_manager):
"""Converts dense inputs to SparseTensor so downstream code can use it."""
input_tensors = transformation_cache.get(self, state_manager)
return CategoricalColumn.IdWeightPair(input_tensors[0], input_tensors[1])
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
"""Converts dense inputs to SparseTensor so downstream code can use it."""
del weight_collections
del trainable
input_tensors = inputs.get(self)
return CategoricalColumn.IdWeightPair(input_tensors[0], input_tensors[1])
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.categorical_column]
def _get_config(self):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import serialize_feature_column # pylint: disable=g-import-not-at-top
config = dict(zip(self._fields, self))
config['categorical_column'] = serialize_feature_column(self.categorical_column)
config['cutoff_length'] = self.cutoff_length
config['cutoff_side'] = self.cutoff_side
config['cutoff_axis'] = self.cutoff_axis
config['reverse'] = self.reverse
return config
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import deserialize_feature_column # pylint: disable=g-import-not-at-top
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['categorical_column'] = deserialize_feature_column(
config['categorical_column'], custom_objects, columns_by_name)
kwargs['cutoff_length'] = config['categorical_column']
kwargs['cutoff_side'] = config['cutoff_side']
kwargs['cutoff_axis'] = config['cutoff_axis']
kwargs['reverse'] = config['reverse']
return cls(**kwargs)
class EmbeddingColumn(
DenseColumn,
SequenceDenseColumn,
fc_old._DenseColumn, # pylint: disable=protected-access
fc_old._SequenceDenseColumn, # pylint: disable=protected-access
collections.namedtuple(
'EmbeddingColumn',
('categorical_column', 'dimension', 'combiner', 'initializer',
'ckpt_to_load_from', 'tensor_name_in_ckpt', 'max_norm', 'trainable',
'coalesced_scope'))):
"""See `embedding_column`."""
@property
def _is_v2_column(self):
return (isinstance(self.categorical_column, FeatureColumn) and
self.categorical_column._is_v2_column) # pylint: disable=protected-access
@property
def name(self):
"""See `FeatureColumn` base class."""
return '{}_embedding'.format(self.categorical_column.name)
@property
def var_scope_name(self):
if self.coalesced_scope:
return self.coalesced_scope.get_coalesced_name_by_column(self)
else:
return self.name
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return self.categorical_column.parse_example_spec
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.categorical_column._parse_example_spec # pylint: disable=protected-access
def transform_feature(self, transformation_cache, state_manager):
"""Transforms underlying `categorical_column`."""
return transformation_cache.get(self.categorical_column, state_manager)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
return inputs.get(self.categorical_column)
@property
def variable_shape(self):
"""See `DenseColumn` base class."""
return tensor_shape.TensorShape([self.dimension])
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _variable_shape(self):
return self.variable_shape
def create_state(self, state_manager):
"""Creates the embedding lookup variable."""
if self.coalesced_scope:
self.coalesced_scope.create_state_by_column(
self)
else:
num_buckets = getattr(self.categorical_column, 'num_buckets',
self.categorical_column._num_buckets) # pylint: disable=protected-access
embedding_shape = (num_buckets, self.dimension)
state_manager.create_variable(
self,
name='embedding_weights',
shape=embedding_shape,
dtype=dtypes.float32,
trainable=self.trainable,
use_resource=True,
initializer=self.initializer)
def _get_dense_tensor_internal_helper(self, sparse_tensors,
embedding_weights):
sparse_ids = sparse_tensors.id_tensor
sparse_weights = sparse_tensors.weight_tensor
if self.ckpt_to_load_from is not None:
to_restore = embedding_weights
if isinstance(to_restore, variables.PartitionedVariable):
to_restore = to_restore._get_variable_list() # pylint: disable=protected-access
checkpoint_utils.init_from_checkpoint(self.ckpt_to_load_from, {
self.tensor_name_in_ckpt: to_restore
})
# Return embedding lookup result.
return embedding_ops.safe_embedding_lookup_sparse(
embedding_weights=embedding_weights,
sparse_ids=sparse_ids,
sparse_weights=sparse_weights,
combiner=self.combiner,
name='%s_weights' % self.name,
max_norm=self.max_norm)
def _get_dense_tensor_internal_adaptive_helper(self, sparse_tensors,
hash_embeddings, ev_embeddings):
sparse_ids = sparse_tensors.id_tensor
sparse_weights = sparse_tensors.weight_tensor
if self.ckpt_to_load_from is not None:
for to_restore in [hash_embeddings, ev_embeddings]:
if isinstance(to_restore, variables.PartitionedVariable):
to_restore = to_restore._get_variable_list() # pylint: disable=protected-access
checkpoint_utils.init_from_checkpoint(self.ckpt_to_load_from, {
self.tensor_name_in_ckpt: to_restore
})
# Return embedding lookup result.
return embedding_ops.safe_adaptive_embedding_lookup_sparse(
hash_embedding_weights=hash_embeddings,
ev_embedding_weights=ev_embeddings,
sparse_ids=sparse_ids,
hash_ev_ids=self.categorical_column.hash_ev_ids,
sparse_weights=sparse_weights,
combiner=self.combiner,
name='%s_weights' % self.name,
max_norm=self.max_norm,
adaptive_mask_tensor=self.categorical_column.adaptive_mask_tensor)
def _get_dense_tensor_internal(self, sparse_tensors, state_manager):
"""Private method that follows the signature of get_dense_tensor."""
embedding_weights = state_manager.get_variable(
self, name='embedding_weights')
return self._get_dense_tensor_internal_helper(sparse_tensors,
embedding_weights)
def _old_get_dense_tensor_internal(self, sparse_tensors, weight_collections,
trainable):
"""Private method that follows the signature of _get_dense_tensor."""
embedding_shape = (self.categorical_column._num_buckets, self.dimension) # pylint: disable=protected-access
if (weight_collections and
ops.GraphKeys.GLOBAL_VARIABLES not in weight_collections):
weight_collections.append(ops.GraphKeys.GLOBAL_VARIABLES)
if isinstance(self.categorical_column, AdaptiveEmbeddingCategoricalColumn) \
or isinstance(self.categorical_column, EmbeddingCategoricalColumn):
if self.categorical_column.partition_num is None:
partitioner = None
else:
partitioner = partitioned_variables.fixed_size_partitioner(self.categorical_column.partition_num)
if isinstance(self.categorical_column, AdaptiveEmbeddingCategoricalColumn):
ev_embeddings = variable_scope.get_embedding_variable_internal(
name="ev_weights",
embedding_dim=self.dimension,
initializer=self.initializer,
trainable=(trainable and self.trainable),
collections=weight_collections,
partitioner=partitioner,
ev_option=self.categorical_column.ev_option)
hash_embeddings = variable_scope.get_variable(
name="hash_weights",
shape=embedding_shape,
dtype=dtypes.float32,
initializer=self.initializer,
trainable=(trainable and self.trainable),
collections=weight_collections)
return self._get_dense_tensor_internal_adaptive_helper(sparse_tensors,
hash_embeddings, ev_embeddings)
elif isinstance(self.categorical_column, EmbeddingCategoricalColumn):
embedding_weights = variable_scope.get_embedding_variable_internal(
name='embedding_weights',
embedding_dim=self.dimension,
initializer=self.initializer,
trainable=self.trainable and trainable,
collections=weight_collections,
partitioner=partitioner,
ev_option=self.categorical_column.ev_option
)
return self._get_dense_tensor_internal_helper(sparse_tensors,
embedding_weights)
else:
embedding_weights = variable_scope.get_variable(
name='embedding_weights',
shape=embedding_shape,
dtype=dtypes.float32,
initializer=self.initializer,
trainable=self.trainable and trainable,
collections=weight_collections)
return self._get_dense_tensor_internal_helper(sparse_tensors,
embedding_weights)
def get_dense_tensor(self, transformation_cache, state_manager):
"""Returns tensor after doing the embedding lookup.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
Returns:
Embedding lookup tensor.
Raises:
ValueError: `categorical_column` is SequenceCategoricalColumn.
"""
if isinstance(self.categorical_column, SequenceCategoricalColumn):
raise ValueError(
'In embedding_column: {}. '
'categorical_column must not be of type SequenceCategoricalColumn. '
'Suggested fix A: If you wish to use DenseFeatures, use a '
'non-sequence categorical_column_with_*. '
'Suggested fix B: If you wish to create sequence input, use '
'SequenceFeatures instead of DenseFeatures. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
# Get sparse IDs and weights.
if self.coalesced_scope:
return self.coalesced_scope.get_dense_tensor_by_column_v2(
self, transformation_cache, state_manager)
sparse_tensors = self.categorical_column.get_sparse_tensors(
transformation_cache, state_manager)
return self._get_dense_tensor_internal(sparse_tensors, state_manager)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
if isinstance(
self.categorical_column,
(SequenceCategoricalColumn, fc_old._SequenceCategoricalColumn)): # pylint: disable=protected-access
raise ValueError(
'In embedding_column: {}. '
'categorical_column must not be of type _SequenceCategoricalColumn. '
'Suggested fix A: If you wish to use DenseFeatures, use a '
'non-sequence categorical_column_with_*. '
'Suggested fix B: If you wish to create sequence input, use '
'SequenceFeatures instead of DenseFeatures. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
if self.coalesced_scope:
return self.coalesced_scope.get_dense_tensor_by_column( # pylint: disable=protected-access
self, inputs, weight_collections, trainable)
sparse_tensors = self.categorical_column._get_sparse_tensors( # pylint: disable=protected-access
inputs, weight_collections, trainable)
return self._old_get_dense_tensor_internal(sparse_tensors,
weight_collections, trainable)
def get_sequence_dense_tensor(self, transformation_cache, state_manager):
"""See `SequenceDenseColumn` base class."""
if not isinstance(self.categorical_column, SequenceCategoricalColumn):
raise ValueError(
'In embedding_column: {}. '
'categorical_column must be of type SequenceCategoricalColumn '
'to use SequenceFeatures. '
'Suggested fix: Use one of sequence_categorical_column_with_*. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
raise NotImplementedError(
'get_sequence_dense_tensor function not implemented on coalesced mod')
sparse_tensors = self.categorical_column.get_sparse_tensors(
transformation_cache, state_manager)
dense_tensor = self._get_dense_tensor_internal(sparse_tensors,
state_manager)
sequence_length = fc_utils.sequence_length_from_sparse_tensor(
sparse_tensors.id_tensor)
return SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=dense_tensor, sequence_length=sequence_length)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sequence_dense_tensor(self,
inputs,
weight_collections=None,
trainable=None):
if not isinstance(
self.categorical_column,
(SequenceCategoricalColumn, fc_old._SequenceCategoricalColumn)): # pylint: disable=protected-access
raise ValueError(
'In embedding_column: {}. '
'categorical_column must be of type SequenceCategoricalColumn '
'to use SequenceFeatures. '
'Suggested fix: Use one of sequence_categorical_column_with_*. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
sparse_tensors = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access
dense_tensor = self._old_get_dense_tensor_internal(
sparse_tensors,
weight_collections=weight_collections,
trainable=trainable)
sequence_length = fc_utils.sequence_length_from_sparse_tensor(
sparse_tensors.id_tensor)
return SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=dense_tensor, sequence_length=sequence_length)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.categorical_column]
def _get_config(self):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import serialize_feature_column # pylint: disable=g-import-not-at-top
config = dict(zip(self._fields, self))
config['categorical_column'] = serialize_feature_column(
self.categorical_column)
config['initializer'] = initializers.serialize(self.initializer)
return config
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import deserialize_feature_column # pylint: disable=g-import-not-at-top
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['categorical_column'] = deserialize_feature_column(
config['categorical_column'], custom_objects, columns_by_name)
kwargs['initializer'] = initializers.deserialize(
config['initializer'], custom_objects=custom_objects)
return cls(**kwargs)
class SharedEmbeddingColumnV2(
DenseColumn,
SequenceDenseColumn,
fc_old._DenseColumn, # pylint: disable=protected-access
fc_old._SequenceDenseColumn, # pylint: disable=protected-access
collections.namedtuple(
'SharedEmbeddingColumnV2',
('categorical_column', 'dimension', 'shared_name','combiner',
'initializer', 'ckpt_to_load_from', 'tensor_name_in_ckpt',
'max_norm', 'trainable', 'coalesced_scope'))):
"""See `shared_embedding_column`."""
@property
def _is_v2_column(self):
return (isinstance(self.categorical_column, FeatureColumn) and
self.categorical_column._is_v2_column) # pylint: disable=protected-access
@property
def name(self):
"""See `FeatureColumn` base class."""
return '{}_shared_embedding_v2'.format(self.categorical_column.name)
@property
def var_scope_name(self):
if self.coalesced_scope:
return self.coalesced_scope.get_coalesced_name_by_column(self)
else:
return self.shared_name
@property
def embedding_name(self):
return self.shared_name
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return self.categorical_column.parse_example_spec
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.categorical_column._parse_example_spec # pylint: disable=protected-access
def transform_feature(self, transformation_cache, state_manager):
"""Transforms underlying `categorical_column`."""
return transformation_cache.get(self.categorical_column, state_manager)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
return inputs.get(self.categorical_column)
@property
def variable_shape(self):
"""See `DenseColumn` base class."""
return tensor_shape.TensorShape([self.dimension])
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _variable_shape(self):
return self.variable_shape
def get_embedding(self):
shared_embedding_collection = ops.get_collection(self.embedding_name)
if shared_embedding_collection:
if len(shared_embedding_collection) > 1:
raise ValueError(
'Collection {} can only contain one variable. '
'Suggested fix A: Choose a unique name for this collection. '
'Suggested fix B: Do not add any variables to this collection. '
'The feature_column library already adds a variable under the '
'hood.'.format(shared_embedding_collection))
embedding_weights = shared_embedding_collection[0]
return embedding_weights
else:
raise ValueError("Embedding not created yet.")
def create_state(self, state_manager):
"""Creates the embedding lookup variable."""
if self.coalesced_scope:
self.coalesced_scope.create_state_by_column(
self)
else:
shared_embedding_collection = ops.get_collection(self.embedding_name)
if shared_embedding_collection:
if len(shared_embedding_collection) > 1:
raise ValueError(
'Collection {} can only contain one variable. '
'Suggested fix A: Choose a unique name for this collection. '
'Suggested fix B: Do not add any variables to this collection. '
'The feature_column library already adds a variable under the '
'hood.'.format(shared_embedding_collection))
else:
num_buckets = getattr(self.categorical_column, 'num_buckets',
self.categorical_column._num_buckets) # pylint: disable=protected-access
embedding_shape = (num_buckets, self.dimension)
state_manager.create_variable(
self,
name='embedding_weights',
shape=embedding_shape,
dtype=dtypes.float32,
trainable=self.trainable,
use_resource=True,
initializer=self.initializer)
embedding_weights = state_manager.get_variable(
self, name='embedding_weights')
ops.add_to_collection(self.embedding_name,
embedding_weights)
def _get_dense_tensor_internal_helper(self, sparse_tensors,
embedding_weights):
sparse_ids = sparse_tensors.id_tensor
sparse_weights = sparse_tensors.weight_tensor
if self.ckpt_to_load_from is not None:
to_restore = embedding_weights
if isinstance(to_restore, variables.PartitionedVariable):
to_restore = to_restore._get_variable_list() # pylint: disable=protected-access
checkpoint_utils.init_from_checkpoint(self.ckpt_to_load_from, {
self.tensor_name_in_ckpt: to_restore
})
# Return embedding lookup result.
return embedding_ops.safe_embedding_lookup_sparse(
embedding_weights=embedding_weights,
sparse_ids=sparse_ids,
sparse_weights=sparse_weights,
combiner=self.combiner,
name='%s_weights' % self.name,
max_norm=self.max_norm)
def _get_dense_tensor_internal(self, sparse_tensors, state_manager):
"""Private method that follows the signature of get_dense_tensor."""
embedding_weights = self.get_embedding()
return self._get_dense_tensor_internal_helper(sparse_tensors,
embedding_weights)
def _old_get_dense_tensor_internal(self, sparse_tensors, weight_collections,
trainable):
"""Private method that follows the signature of _get_dense_tensor."""
embedding_shape = (self.categorical_column._num_buckets, self.dimension) # pylint: disable=protected-access
if (weight_collections and
ops.GraphKeys.GLOBAL_VARIABLES not in weight_collections):
weight_collections.append(ops.GraphKeys.GLOBAL_VARIABLES)
embedding_weights = variable_scope.get_variable(
name='embedding_weights',
shape=embedding_shape,
dtype=dtypes.float32,
initializer=self.initializer,
trainable=self.trainable and trainable,
collections=weight_collections)
return self._get_dense_tensor_internal_helper(sparse_tensors,
embedding_weights)
def get_dense_tensor(self, transformation_cache, state_manager):
"""Returns tensor after doing the embedding lookup.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
Returns:
Embedding lookup tensor.
Raises:
ValueError: `categorical_column` is SequenceCategoricalColumn.
"""
if isinstance(self.categorical_column, SequenceCategoricalColumn):
raise ValueError(
'In embedding_column: {}. '
'categorical_column must not be of type SequenceCategoricalColumn. '
'Suggested fix A: If you wish to use DenseFeatures, use a '
'non-sequence categorical_column_with_*. '
'Suggested fix B: If you wish to create sequence input, use '
'SequenceFeatures instead of DenseFeatures. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
# Get sparse IDs and weights.
if self.coalesced_scope:
return self.coalesced_scope.get_dense_tensor_by_column_v2(
self, transformation_cache, state_manager)
sparse_tensors = self.categorical_column.get_sparse_tensors(
transformation_cache, state_manager)
return self._get_dense_tensor_internal(sparse_tensors, state_manager)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
if isinstance(
self.categorical_column,
(SequenceCategoricalColumn, fc_old._SequenceCategoricalColumn)): # pylint: disable=protected-access
raise ValueError(
'In embedding_column: {}. '
'categorical_column must not be of type _SequenceCategoricalColumn. '
'Suggested fix A: If you wish to use DenseFeatures, use a '
'non-sequence categorical_column_with_*. '
'Suggested fix B: If you wish to create sequence input, use '
'SequenceFeatures instead of DenseFeatures. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
if self.coalesced_scope:
return self.coalesced_scope.get_dense_tensor_by_column( # pylint: disable=protected-access
self, inputs, weight_collections, trainable)
sparse_tensors = self.categorical_column._get_sparse_tensors( # pylint: disable=protected-access
inputs, weight_collections, trainable)
return self._old_get_dense_tensor_internal(sparse_tensors,
weight_collections, trainable)
def get_sequence_dense_tensor(self, transformation_cache, state_manager):
"""See `SequenceDenseColumn` base class."""
if not isinstance(self.categorical_column, SequenceCategoricalColumn):
raise ValueError(
'In embedding_column: {}. '
'categorical_column must be of type SequenceCategoricalColumn '
'to use SequenceFeatures. '
'Suggested fix: Use one of sequence_categorical_column_with_*. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
raise NotImplementedError(
'get_sequence_dense_tensor function not implemented on coalesced mod')
sparse_tensors = self.categorical_column.get_sparse_tensors(
transformation_cache, state_manager)
dense_tensor = self._get_dense_tensor_internal(sparse_tensors,
state_manager)
sequence_length = fc_utils.sequence_length_from_sparse_tensor(
sparse_tensors.id_tensor)
return SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=dense_tensor, sequence_length=sequence_length)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sequence_dense_tensor(self,
inputs,
weight_collections=None,
trainable=None):
if not isinstance(
self.categorical_column,
(SequenceCategoricalColumn, fc_old._SequenceCategoricalColumn)): # pylint: disable=protected-access
raise ValueError(
'In embedding_column: {}. '
'categorical_column must be of type SequenceCategoricalColumn '
'to use SequenceFeatures. '
'Suggested fix: Use one of sequence_categorical_column_with_*. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
sparse_tensors = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access
dense_tensor = self._old_get_dense_tensor_internal(
sparse_tensors,
weight_collections=weight_collections,
trainable=trainable)
sequence_length = fc_utils.sequence_length_from_sparse_tensor(
sparse_tensors.id_tensor)
return SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=dense_tensor, sequence_length=sequence_length)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.categorical_column]
def _get_config(self):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import serialize_feature_column # pylint: disable=g-import-not-at-top
config = dict(zip(self._fields, self))
config['categorical_column'] = serialize_feature_column(
self.categorical_column)
config['initializer'] = initializers.serialize(self.initializer)
return config
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import deserialize_feature_column # pylint: disable=g-import-not-at-top
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['categorical_column'] = deserialize_feature_column(
config['categorical_column'], custom_objects, columns_by_name)
kwargs['initializer'] = initializers.deserialize(
config['initializer'], custom_objects=custom_objects)
return cls(**kwargs)
class SequenceEmbeddingColumn(
DenseColumn,
fc_old._DenseColumn, # pylint: disable=protected-access
collections.namedtuple(
'SequenceEmbeddingColumn',
('dense_column', 'sequence_length'))):
"""See `sequence_embedding_column`."""
@property
def _is_v2_column(self):
return (isinstance(self.dense_column, FeatureColumn) and
self.dense_column._is_v2_column) # pylint: disable=protected-access
@property
def name(self):
"""See `FeatureColumn` base class."""
return '{}_sequence'.format(self.dense_column.name)
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
out_spec = {}
dense_spec = self.dense_column.parse_example_spec
for key, parsing in dense_spec.items():
if isinstance(parsing, parsing_ops.FixedLenFeature):
out_spec[key] = parsing_ops.FixedLenSequenceFeature(parsing.shape,
parsing.dtype,
True,
parsing.default_value)
else:
out_spec[key] = parsing
return out_spec
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
out_spec = {}
dense_spec = self.dense_column._parse_example_spec
for key, parsing in dense_spec.items():
if isinstance(parsing, parsing_ops.FixedLenFeature):
out_spec[key] = parsing_ops.FixedLenSequenceFeature(parsing.shape,
parsing.dtype,
True,
parsing.default_value)
else:
out_spec[key] = parsing
return out_spec
def create_state(self, state_manager):
return self.dense_column.create_state(state_manager)
def transform_feature(self, transformation_cache, state_manager):
"""Transforms underlying `categorical_column`."""
return transformation_cache.get(self.dense_column, state_manager)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
return inputs.get(self.dense_column)
@property
def variable_shape(self):
"""See `DenseColumn` base class."""
return self.dense_column.variable_shape
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _variable_shape(self):
return self.dense_column._variable_shape
def output_shape(self, inputs):
"""See `DenseColumn` base class."""
if self._variable_shape is not None:
num_elements = self.variable_shape.num_elements()
else:
num_elements = array_ops.shape(inputs)[-1]
return (-1, self.sequence_length, num_elements)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _output_shape(self, inputs):
if self._variable_shape is not None:
num_elements = self._variable_shape.num_elements()
else:
num_elements = array_ops.shape(inputs)[-1]
return (-1, self.sequence_length, num_elements)
def get_dense_tensor(self, transformation_cache, state_manager):
"""Returns tensor after doing the embedding lookup.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
Returns:
Embedding lookup tensor.
"""
return self.dense_column.get_dense_tensor(
transformation_cache,
state_manager)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
return self.dense_column._get_dense_tensor( # pylint: disable=protected-access
inputs=inputs,
weight_collections=weight_collections,
trainable=trainable)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.dense_column]
def _get_config(self):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import serialize_feature_column # pylint: disable=g-import-not-at-top
config = dict(zip(self._fields, self))
config['dense_column'] = serialize_feature_column(
self.dense_column)
return config
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import deserialize_feature_column # pylint: disable=g-import-not-at-top
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['dense_column'] = deserialize_feature_column(
config['dense_column'], custom_objects, columns_by_name)
return cls(**kwargs)
def _raise_shared_embedding_column_error():
raise ValueError('SharedEmbeddingColumns are not supported in '
'`linear_model` or `input_layer`. Please use '
'`DenseFeatures` or `LinearModel` instead.')
class SequenceMultiHashEmbeddingColumn(
DenseColumn,
fc_old._DenseColumn, # pylint: disable=protected-access
collections.namedtuple(
'SequenceMultiHashEmbeddingColumn',
('dense_column', 'sequence_length'))):
"""See `sequence_embedding_column`."""
@property
def _is_v2_column(self):
return (isinstance(self.dense_column, FeatureColumn) and
self.dense_column._is_v2_column) # pylint: disable=protected-access
@property
def name(self):
"""See `FeatureColumn` base class."""
return '{}_sequence'.format(self.dense_column.name)
@property
def categorical_column(self):
return self.dense_column.categorical_column
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
out_spec = {}
dense_spec = self.dense_column.parse_example_spec
for key, parsing in dense_spec.items():
if isinstance(parsing, parsing_ops.FixedLenFeature):
out_spec[key] = parsing_ops.FixedLenSequenceFeature(parsing.shape,
parsing.dtype,
True,
parsing.default_value)
else:
out_spec[key] = parsing
return out_spec
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
out_spec = {}
dense_spec = self.dense_column._parse_example_spec
for key, parsing in dense_spec.items():
if isinstance(parsing, parsing_ops.FixedLenFeature):
out_spec[key] = parsing_ops.FixedLenSequenceFeature(parsing.shape,
parsing.dtype,
True,
parsing.default_value)
else:
out_spec[key] = parsing
return out_spec
def transform_feature(self, transformation_cache, state_manager):
"""Transforms underlying `categorical_column`."""
return transformation_cache.get(self.dense_column, state_manager)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
return inputs.get(self.dense_column)
@property
def variable_shape(self):
"""See `DenseColumn` base class."""
return self.dense_column.variable_shape
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _variable_shape(self):
return self.dense_column._variable_shape
def output_shape(self, inputs):
"""See `DenseColumn` base class."""
if self._variable_shape is not None:
num_elements = self.variable_shape.num_elements()
else:
num_elements = array_ops.shape(inputs)[-1]
return (-1, self.sequence_length, num_elements)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _output_shape(self, inputs):
if self._variable_shape is not None:
num_elements = self._variable_shape.num_elements()
else:
num_elements = array_ops.shape(inputs)[-1]
return (-1, self.sequence_length, num_elements)
def get_dense_tensor(self, transformation_cache, state_manager):
"""Returns tensor after doing the embedding lookup.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
Returns:
Embedding lookup tensor.
"""
return self.dense_column.get_dense_tensor(
transformation_cache,
state_manager)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
return self.dense_column._get_dense_tensor( # pylint: disable=protected-access
inputs=inputs,
weight_collections=weight_collections,
trainable=trainable)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.dense_column]
def _get_config(self):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import serialize_feature_column # pylint: disable=g-import-not-at-top
config = dict(zip(self._fields, self))
config['dense_column'] = serialize_feature_column(
self.dense_column)
return config
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import deserialize_feature_column # pylint: disable=g-import-not-at-top
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['dense_column'] = deserialize_feature_column(
config['dense_column'], custom_objects, columns_by_name)
return cls(**kwargs)
class SharedEmbeddingColumnCreator(tracking.AutoTrackable):
def __init__(self,
dimension,
initializer,
ckpt_to_load_from,
tensor_name_in_ckpt,
num_buckets,
trainable,
name='shared_embedding_column_creator'):
self._dimension = dimension
self._initializer = initializer
self._ckpt_to_load_from = ckpt_to_load_from
self._tensor_name_in_ckpt = tensor_name_in_ckpt
self._num_buckets = num_buckets
self._trainable = trainable
self._name = name
# Map from graph keys to embedding_weight variables.
self._embedding_weights = {}
def __call__(self,
categorical_column,
combiner,
max_norm,
coalesced_scope=None):
return SharedEmbeddingColumn(categorical_column,
self,
combiner,
max_norm,
coalesced_scope)
@property
def embedding_weights(self):
key = ops.get_default_graph()._graph_key # pylint: disable=protected-access
if key not in self._embedding_weights:
embedding_shape = (self._num_buckets, self._dimension)
var = variable_scope.get_variable(
name=self._name,
shape=embedding_shape,
dtype=dtypes.float32,
initializer=self._initializer,
trainable=self._trainable)
if self._ckpt_to_load_from is not None:
to_restore = var
if isinstance(to_restore, variables.PartitionedVariable):
to_restore = to_restore._get_variable_list() # pylint: disable=protected-access
checkpoint_utils.init_from_checkpoint(
self._ckpt_to_load_from, {self._tensor_name_in_ckpt: to_restore})
self._embedding_weights[key] = var
return self._embedding_weights[key]
@property
def dimension(self):
return self._dimension
@property
def name(self):
return self._name
class SharedEmbeddingColumn(
DenseColumn,
SequenceDenseColumn,
fc_old._DenseColumn, # pylint: disable=protected-access
fc_old._SequenceDenseColumn, # pylint: disable=protected-access
collections.namedtuple(
'SharedEmbeddingColumn',
('categorical_column', 'shared_embedding_column_creator', 'combiner',
'max_norm', 'coalesced_scope'))):
"""See `embedding_column`."""
@property
def _is_v2_column(self):
return True
@property
def name(self):
"""See `FeatureColumn` base class."""
return '{}_shared_embedding'.format(self.categorical_column.name)
@property
def var_scope_name(self):
if self.coalesced_scope:
return self.coalesced_scope.get_coalesced_name_by_column(self)
else:
return self.name
@property
def embedding_name(self):
return self.shared_embedding_column_creator.name
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return self.categorical_column.parse_example_spec
@property
def _parse_example_spec(self):
return _raise_shared_embedding_column_error()
def transform_feature(self, transformation_cache, state_manager):
"""See `FeatureColumn` base class."""
return transformation_cache.get(self.categorical_column, state_manager)
def _transform_feature(self, inputs):
return _raise_shared_embedding_column_error()
@property
def variable_shape(self):
"""See `DenseColumn` base class."""
return tensor_shape.TensorShape(
[self.shared_embedding_column_creator.dimension])
@property
def _variable_shape(self):
return _raise_shared_embedding_column_error()
def create_state(self, state_manager):
"""Creates the embedding lookup variable."""
if self.coalesced_scope:
self.coalesced_scope.create_state_by_column(
self)
else:
super(SharedEmbeddingColumn, self).create_state(state_manager)
def _get_dense_tensor_internal(self, transformation_cache, state_manager):
"""Private method that follows the signature of _get_dense_tensor."""
# This method is called from a variable_scope with name _var_scope_name,
# which is shared among all shared embeddings. Open a name_scope here, so
# that the ops for different columns have distinct names.
with ops.name_scope(None, default_name=self.name):
# Get sparse IDs and weights.
sparse_tensors = self.categorical_column.get_sparse_tensors(
transformation_cache, state_manager)
sparse_ids = sparse_tensors.id_tensor
sparse_weights = sparse_tensors.weight_tensor
embedding_weights = self.shared_embedding_column_creator.embedding_weights
# Return embedding lookup result.
return embedding_ops.safe_embedding_lookup_sparse(
embedding_weights=embedding_weights,
sparse_ids=sparse_ids,
sparse_weights=sparse_weights,
combiner=self.combiner,
name='%s_weights' % self.name,
max_norm=self.max_norm)
def get_dense_tensor(self, transformation_cache, state_manager):
"""Returns the embedding lookup result."""
if isinstance(self.categorical_column, SequenceCategoricalColumn):
raise ValueError(
'In embedding_column: {}. '
'categorical_column must not be of type SequenceCategoricalColumn. '
'Suggested fix A: If you wish to use DenseFeatures, use a '
'non-sequence categorical_column_with_*. '
'Suggested fix B: If you wish to create sequence input, use '
'SequenceFeatures instead of DenseFeatures. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
if self.coalesced_scope:
return self.coalesced_scope.get_dense_tensor_by_column_v2(
self, transformation_cache, state_manager)
return self._get_dense_tensor_internal(transformation_cache, state_manager)
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
return _raise_shared_embedding_column_error()
def get_sequence_dense_tensor(self, transformation_cache, state_manager):
"""See `SequenceDenseColumn` base class."""
if not isinstance(self.categorical_column, SequenceCategoricalColumn):
raise ValueError(
'In embedding_column: {}. '
'categorical_column must be of type SequenceCategoricalColumn '
'to use SequenceFeatures. '
'Suggested fix: Use one of sequence_categorical_column_with_*. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
if self.coalesced_scope:
raise NotImplementedError(
'get_sequence_dense_tensor function not implemented on coalesced mod')
dense_tensor = self._get_dense_tensor_internal(transformation_cache,
state_manager)
sparse_tensors = self.categorical_column.get_sparse_tensors(
transformation_cache, state_manager)
sequence_length = fc_utils.sequence_length_from_sparse_tensor(
sparse_tensors.id_tensor)
return SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=dense_tensor, sequence_length=sequence_length)
def _get_sequence_dense_tensor(self,
inputs,
weight_collections=None,
trainable=None):
return _raise_shared_embedding_column_error()
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.categorical_column]
def _get_config(self):
"""See 'FeatureColumn` base class."""
raise NotImplementedError()
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
raise NotImplementedError()
def _check_shape(shape, key):
"""Returns shape if it's valid, raises error otherwise."""
assert shape is not None
if not nest.is_sequence(shape):
shape = [shape]
shape = tuple(shape)
for dimension in shape:
if not isinstance(dimension, int):
raise TypeError('shape dimensions must be integer. '
'shape: {}, key: {}'.format(shape, key))
if dimension < 1:
raise ValueError('shape dimensions must be greater than 0. '
'shape: {}, key: {}'.format(shape, key))
return shape
class SharedMultiHashEmbeddingColumn(
DenseColumn,
SequenceDenseColumn,
fc_old._DenseColumn, # pylint: disable=protected-access
fc_old._SequenceDenseColumn, # pylint: disable=protected-access
collections.namedtuple(
'EmbeddingColumn',
('categorical_column', 'dimension', 'shared_name','hash_combiner',
'combiner', 'initializer', 'ckpt_to_load_from', 'tensor_name_in_ckpt',
'max_norm', 'trainable', 'coalesced_scope'))):
"""See `embedding_column`."""
@property
def _is_v2_column(self):
return True
@property
def name(self):
"""See `FeatureColumn` base class."""
return '{}_shared_multi_hash_embedding'.format(self.categorical_column.name)
@property
def var_scope_name(self):
return self.shared_name
@property
def embedding_name(self):
return self.shared_name
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return self.categorical_column.parse_example_spec
@property
def _parse_example_spec(self):
return _raise_shared_embedding_column_error()
def transform_feature(self, transformation_cache, state_manager):
"""See `FeatureColumn` base class."""
return transformation_cache.get(self.categorical_column, state_manager)
def _transform_feature(self, inputs):
return _raise_shared_embedding_column_error()
@property
def variable_shape(self):
"""See `DenseColumn` base class."""
return tensor_shape.TensorShape([self.dimension])
@property
def _variable_shape(self):
return _raise_shared_embedding_column_error()
def get_embedding(self):
shared_embedding_collection = ops.get_collection(self.embedding_name)
if shared_embedding_collection:
if len(shared_embedding_collection) > 1:
raise ValueError(
'Collection {} can only contain one variable. '
'Suggested fix A: Choose a unique name for this collection. '
'Suggested fix B: Do not add any variables to this collection. '
'The feature_column library already adds a variable under the '
'hood.'.format(shared_embedding_collection))
embedding_weights = shared_embedding_collection[0]
return embedding_weights
else:
raise ValueError("Embedding not created yet.")
def create_state(self, state_manager):
"""Creates the embedding lookup variable."""
shared_embedding_collection = ops.get_collection(self.embedding_name)
if shared_embedding_collection:
if len(shared_embedding_collection) > 1:
raise ValueError(
'Collection {} can only contain one variable. '
'Suggested fix A: Choose a unique name for this collection. '
'Suggested fix B: Do not add any variables to this collection. '
'The feature_column library already adds a variable under the '
'hood.'.format(shared_embedding_collection))
else:
num_buckets = getattr(self.categorical_column, 'num_buckets',
self.categorical_column._num_buckets) # pylint: disable=protected-access
embedding_shape = (num_buckets, self.dimension)
state_manager.create_variable(
self,
name='embedding_weights',
shape=embedding_shape,
dtype=dtypes.float32,
trainable=self.trainable,
use_resource=True,
initializer=self.initializer)
embedding_weights = state_manager.get_variable(
self, name='embedding_weights')
ops.add_to_collection(self.embedding_name,
embedding_weights)
def _get_dense_tensor_internal_helper(self, sparse_tensors,
embedding_weights):
sparse_ids = sparse_tensors.id_tensor
sparse_weights = sparse_tensors.weight_tensor
if self.ckpt_to_load_from is not None:
to_restore = embedding_weights
if isinstance(to_restore, variables.PartitionedVariable):
to_restore = to_restore._get_variable_list() # pylint: disable=protected-access
checkpoint_utils.init_from_checkpoint(self.ckpt_to_load_from, {
self.tensor_name_in_ckpt: to_restore
})
# Return embedding lookup result.
return embedding_ops.safe_embedding_lookup_multi_dim(
embedding_weights=embedding_weights,
sparse_ids=sparse_ids,
sparse_weights=sparse_weights,
combiners=[self.combiner, self.hash_combiner],
name='%s_weights' % self.name,
max_norm=self.max_norm,
weight_axis=-2)
def _get_dense_tensor_internal(self, sparse_tensors, state_manager):
"""Private method that follows the signature of get_dense_tensor."""
embedding_weights = self.get_embedding()
return self._get_dense_tensor_internal_helper(sparse_tensors,
embedding_weights)
def get_dense_tensor(self, transformation_cache, state_manager):
"""Returns the embedding lookup result."""
if isinstance(self.categorical_column, SequenceCategoricalColumn):
raise ValueError(
'In embedding_column: {}. '
'categorical_column must not be of type SequenceCategoricalColumn. '
'Suggested fix A: If you wish to use DenseFeatures, use a '
'non-sequence categorical_column_with_*. '
'Suggested fix B: If you wish to create sequence input, use '
'SequenceFeatures instead of DenseFeatures. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
if self.coalesced_scope:
return self.coalesced_scope.get_dense_tensor_by_column_v2(
self, transformation_cache, state_manager)
sparse_tensors = self.categorical_column.get_sparse_tensors(
transformation_cache, state_manager)
return self._get_dense_tensor_internal(sparse_tensors, state_manager)
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
return _raise_shared_embedding_column_error()
def get_sequence_dense_tensor(self, transformation_cache, state_manager):
"""See `SequenceDenseColumn` base class."""
if not isinstance(self.categorical_column, SequenceCategoricalColumn):
raise ValueError(
'In embedding_column: {}. '
'categorical_column must be of type SequenceCategoricalColumn '
'to use SequenceFeatures. '
'Suggested fix: Use one of sequence_categorical_column_with_*. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
dense_tensor = self._get_dense_tensor_internal(transformation_cache,
state_manager)
sparse_tensors = self.categorical_column.get_sparse_tensors(
transformation_cache, state_manager)
sequence_length = fc_utils.sequence_length_from_sparse_tensor(
sparse_tensors.id_tensor)
return SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=dense_tensor, sequence_length=sequence_length)
def _get_sequence_dense_tensor(self,
inputs,
weight_collections=None,
trainable=None):
return _raise_shared_embedding_column_error()
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.categorical_column]
def _get_config(self):
"""See 'FeatureColumn` base class."""
raise NotImplementedError()
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
raise NotImplementedError()
_global_coalesced_scopes = []
@tf_export('feature_column.coalesced_embedding_scope')
@contextlib.contextmanager
def coalesced_embedding_scope(name=None, num_partitions=None):
global _global_coalesced_scopes
scope = CoalescedEmbeddingScope(name, num_partitions)
_global_coalesced_scopes.append(scope)
yield scope
scope = _global_coalesced_scopes.pop()
scope.build()
def current_coalesced_scope():
global _global_coalesced_scopes
return None if len(_global_coalesced_scopes) == 0 else \
_global_coalesced_scopes[-1]
@tf_export('feature_column.CoalescedEmbeddingScope')
class CoalescedEmbeddingScope(coalesced_utils.CoalescedScopeBase):
def __init__(self, name=None, num_partitions=None):
if name is None:
name = 'CoalescedEmbedding'
self._num_partitions = num_partitions
super(CoalescedEmbeddingScope, self).__init__(name)
def allowed_column_types(self):
return CoalescedEmbeddingColumn._COALESCING_TYPES
def build(self):
if self._built:
return
cluster = collections.defaultdict(list)
for name, column in self._columns.items():
h = coalesced_utils.make_cluster_signature(column)
cluster[h].append((name, column))
for h, names_and_columns in cluster.items():
names_and_columns.sort(key=lambda x: x[1].name)
for h, names_and_columns in cluster.items():
names, columns = zip(*names_and_columns)
coalesced_name = self.get_name()
coalesced_column = CoalescedEmbeddingColumn(
columns, coalesced_name, self._num_partitions)
for name in names:
self._coalesced_map[name] = coalesced_column
self._built = True
class CoalescedEmbeddingColumn(object):
"""Coalescing _EmbeddingColumns into one according to signature.
Args:
Raises:
"""
_COALESCING_TYPES = (
EmbeddingColumn,
SharedEmbeddingColumn,
SharedEmbeddingColumnV2,
SharedMultiHashEmbeddingColumn,
)
def __init__(self, columns, name, num_partitions=None):
for i, c in enumerate(columns):
if not isinstance(c, CoalescedEmbeddingColumn._COALESCING_TYPES):
raise ValueError('columns must be a list of EmbeddingColumns, ',
'Given {} at index {}'.format(c, i))
if len(columns) == 0:
raise ValueError('columns cannot be empty')
coalesced_utils.check_coalesced_columns_compatible(columns)
for c in columns:
if c not in coalesced_utils.get_embedding_signature():
raise ValueError('signature not found for column: {}'.format(c))
if num_partitions is not None:
self._partitioner = \
partitioned_variables.fixed_size_partitioner(num_partitions)
else:
self._partitioner = variable_scope.get_variable_scope().partitioner
if self._partitioner is None:
logging.log(logging.WARN, 'No partitioner found in outer variable'
' scopes, use default: fixed_size_partitioner(1)')
self._partitioner = partitioned_variables.fixed_size_partitioner(1)
self._columns = columns
self._runtime_columns = collections.defaultdict(list)
self._runtime_col_multi_hash = {}
self.build_runtime_columns(columns)
self._name = name
self._default_attr = coalesced_utils.get_embedding_signature()[columns[0]]
self._unique_columns, self._indices_map = \
coalesced_utils.deduplicate_shared_embedding(self._columns)
save_slice_infos, tensor_slices, total_size = \
coalesced_utils.build_slice_info(self._unique_columns, self._partitioner)
self._save_slice_infos = save_slice_infos
self._tensor_slices = tensor_slices
self._local_offsets = []
for save_slice_list in save_slice_infos:
offset = 0
offset_list = []
for save_slice in save_slice_list:
offset_list.append(offset)
offset += save_slice.var_shape[0]
self._local_offsets.append(offset_list)
self._global_offsets = [[0 for j in range(len(tensor_slices))]
for i in range(len(save_slice_infos))]
offset = 0
for i in range(len(tensor_slices)):
for j in range(len(save_slice_infos)):
self._global_offsets[j][i] = offset
ts = tensor_slices[i][j]
offset += ts[0].stop - ts[0].start
self._has_replace_var_name = False
self._var_name_replace_maps = []
self._total_bucket_size = total_size
def _get_attr_from_signature(self, sig, attr):
sig = json.loads(sig)
return sig.get(attr)
def build_runtime_columns(self, columns):
for i, column in enumerate(columns):
h = coalesced_utils._make_runtime_signature(column)
self._runtime_columns[h].append((i, column))
self._runtime_col_multi_hash[h] = self._get_attr_from_signature(h, 'hash_combiner')
@property
def name(self):
return self._name
@property
def columns(self):
return self._columns
def _get_unique_index(self, column):
if hasattr(column, 'embedding_name'):
name = column.embedding_name
else:
name = column.name
if name not in self._indices_map:
raise ValueError('column {} not coalesced'.format(name))
return self._indices_map[name]
def encode(self, data, index):
if not isinstance(data, sparse_tensor_lib.SparseTensor):
raise ValueError('data should be a SparseTensor, Given {}'.format(data))
values = gen_feature_column_ops.coalesced_bucketized_embedding_encode(
math_ops.cast(data.values, dtype=dtypes.int64),
self._local_offsets[index],
self._global_offsets[index])
return sparse_tensor_lib.SparseTensor(indices=data.indices,
values=values,
dense_shape=data.dense_shape)
def make_sparse_inputs(self, transformation_cache, state_manager):
result_list = []
for h, runtime_columns in self._runtime_columns.items():
ids_list = []
weights_list = []
weight_type = None
for c in runtime_columns:
sparse_tensors = c[1].categorical_column.get_sparse_tensors(
transformation_cache, state_manager)
ids, weights = fc_utils.parse_sparse_data(sparse_tensors)
if ids is None:
raise ValueError('sparse ids cannot be None')
index = self._get_unique_index(c[1])
ids_list.append(self.encode(ids, index))
weights_list.append(weights)
if weights is not None:
if weight_type is None:
weight_type = weights.dtype
elif weight_type != weights.dtype:
raise ValueError('all weights should have same dtype, but got '
'{} and {}'.format(weight_type, weights.dtype))
if weight_type is None:
weight_type = dtypes.float32
format_rank = 3 if self._runtime_col_multi_hash[h] else 2
result = coalesced_utils.coalesce_sparse_data(ids_list, weights_list, weight_type, format_rank=format_rank)
result_list.append(result)
return result_list
def _check_weight_slice_compatible(self, weight):
if isinstance(weight, variables.PartitionedVariable):
parts = list(weight)
if len(parts) != len(self._tensor_slices):
raise ValueError('Variable parts num not equal to tensor slices num')
weight_slice_offset = []
offset = 0
for part in parts:
weight_slice_offset.append(offset)
offset += int(part.shape[0])
for i in range(len(parts)):
if weight_slice_offset[i] != self._global_offsets[0][i]:
raise ValueError('Variable parts not equal to tensor slices')
else:
if len(self._tensor_slices) != 1:
raise ValueError('Coalesced weights is not partitioned, but '
'tensor slices has {} parts'.format(
len(self._tensor_slices)))
def _get_embedding_name(self):
if not hasattr(self, '_embedding_weights'):
raise ValueError('get_embedding_name function should be called after create embeddings.')
if isinstance(self._embedding_weights, variables.PartitionedVariable):
name = list(self._embedding_weights)[0].name
offset = len(name.split('/')[-1]) + 1
else:
name = self._embedding_weights.name
offset = len(name.split(':')[-1]) + 1
name = name[:-offset]
return name
def _generate_shared_column_replace_map(self):
for i, column in enumerate(self._unique_columns):
if isinstance(column, SharedEmbeddingColumn):
var_scope_name = variable_scope.get_variable_scope().name
var_name = column.embedding_name
shared_emb_name = var_scope_name + "/" + var_name if len(var_scope_name) > 0 else var_name
self._var_name_replace_maps.append(
(self._get_embedding_name(), shared_emb_name))
else:
self._var_name_replace_maps.append((self._name, column.embedding_name))
def _replace_slot_name(self, embedding_weights):
if self._has_replace_var_name:
return
self._generate_shared_column_replace_map()
if isinstance(embedding_weights, variables.PartitionedVariable):
# set save_slice_info
parts = list(embedding_weights)
for i, part in enumerate(parts):
tensor_slices = self._tensor_slices[i]
save_slices = [slice_list[i] for slice_list in self._save_slice_infos]
raw_info = part._save_slice_info
for j, save_slice in enumerate(save_slices):
save_slice.full_name = raw_info.full_name.replace(
self._var_name_replace_maps[j][0], self._var_name_replace_maps[j][1])
save_info = coalesced_utils.CoalescedSaveSliceInfo(
raw_info.full_name, raw_info.full_shape, raw_info.var_offset,
raw_info.var_shape, raw_info.var_full_name, save_slices,
tensor_slices)
part._set_save_slice_info(save_info)
else:
# TODO
pass
self._has_replace_var_name = True
def get_or_create_embedding_weights(self):
if not hasattr(self, '_embedding_weights'):
dimension = self._default_attr.dimension
dtype = self._default_attr.dtype
initializer = self._default_attr.initializer
trainable = self._default_attr.trainable
embedding_weights = variable_scope.get_variable(
"embedding_weights",
shape=[self._total_bucket_size, dimension],
dtype=dtype,
initializer=initializer,
trainable=trainable,
partitioner=self._partitioner)
# check slice info match with global offsets
self._check_weight_slice_compatible(embedding_weights)
self._embedding_weights = embedding_weights
return self._embedding_weights
def _embedding_lookup_sparse(self, embedding_weights, ids, weights, combiner, hash_combiner=''):
if not hash_combiner:
return embedding_ops.safe_embedding_lookup_sparse(
embedding_weights=embedding_weights,
sparse_ids=ids,
sparse_weights=weights,
combiner=combiner)
else:
return embedding_ops.safe_embedding_lookup_multi_dim(
embedding_weights=embedding_weights,
sparse_ids=ids,
sparse_weights=weights,
combiners=[combiner, hash_combiner],
weight_axis=-2)
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
return _raise_shared_embedding_column_error()
def get_dense_tensor(self, transformation_cache, state_manager):
embedding_weights = self.get_or_create_embedding_weights()
lookup_input_list = self.make_sparse_inputs(
transformation_cache, state_manager)
self._replace_slot_name(embedding_weights)
# Return embedding lookup result.
embedding_outputs = []
for lookup_input, runtime_pair in zip(*(lookup_input_list,
self._runtime_columns.items())):
hash_combiner = self._runtime_col_multi_hash[runtime_pair[0]]
cids_and_columns = runtime_pair[1]
cids, columns = zip(*cids_and_columns)
embeddings = self._embedding_lookup_sparse(
embedding_weights,
lookup_input[0],
lookup_input[1],
coalesced_utils.get_signature_attributes(columns[0]).combiner,
hash_combiner)
values = array_ops.split(embeddings, lookup_input[2])
results = []
for value, origin_shape, col in zip(values, lookup_input[3], columns):
origin_rank = array_ops.size(origin_shape)
if coalesced_utils.get_signature_attributes(col).combiner == 'tile':
real_dim = array_ops.gather(origin_shape, origin_rank - 1) * self._default_attr.dimension
value = array_ops.slice(value,
[0, 0],
[-1, real_dim])
value = array_ops.reshape(
value,
array_ops.concat([
array_ops.slice(origin_shape, [0], [origin_rank - 1]),
array_ops.slice(array_ops.shape(value), [1], [-1])
], 0))
results.append(value)
embedding_outputs.extend(zip(cids, results))
return list(zip(*sorted(embedding_outputs)))[1]
def _check_shape(shape, key):
"""Returns shape if it's valid, raises error otherwise."""
assert shape is not None
if not nest.is_sequence(shape):
shape = [shape]
shape = tuple(shape)
for dimension in shape:
if not isinstance(dimension, int):
raise TypeError('shape dimensions must be integer. '
'shape: {}, key: {}'.format(shape, key))
if dimension < 1:
raise ValueError('shape dimensions must be greater than 0. '
'shape: {}, key: {}'.format(shape, key))
return shape
class HashedCategoricalColumn(
CategoricalColumn,
fc_old._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple('HashedCategoricalColumn',
('key', 'hash_bucket_size', 'dtype'))):
"""see `categorical_column_with_hash_bucket`."""
@property
def _is_v2_column(self):
return True
@property
def name(self):
"""See `FeatureColumn` base class."""
return self.key
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return {self.key: parsing_ops.VarLenFeature(self.dtype)}
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.parse_example_spec
def _transform_input_tensor(self, input_tensor):
"""Hashes the values in the feature_column."""
if not isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
raise ValueError('SparseColumn input must be a SparseTensor.')
fc_utils.assert_string_or_int(
input_tensor.dtype,
prefix='column_name: {} input_tensor'.format(self.key))
if self.dtype.is_integer != input_tensor.dtype.is_integer:
raise ValueError(
'Column dtype and SparseTensors dtype must be compatible. '
'key: {}, column dtype: {}, tensor dtype: {}'.format(
self.key, self.dtype, input_tensor.dtype))
if self.dtype == dtypes.string:
sparse_values = input_tensor.values
else:
sparse_values = string_ops.as_string(input_tensor.values)
sparse_id_values = string_ops.string_to_hash_bucket_fast(
sparse_values, self.hash_bucket_size, name='lookup')
return sparse_tensor_lib.SparseTensor(
input_tensor.indices, sparse_id_values, input_tensor.dense_shape)
def transform_feature(self, transformation_cache, state_manager):
"""Hashes the values in the feature_column."""
input_tensor = _to_sparse_input_and_drop_ignore_values(
transformation_cache.get(self.key, state_manager))
return self._transform_input_tensor(input_tensor)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
input_tensor = _to_sparse_input_and_drop_ignore_values(inputs.get(self.key))
return self._transform_input_tensor(input_tensor)
@property
def num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return self.hash_bucket_size
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.num_buckets
def get_sparse_tensors(self, transformation_cache, state_manager):
"""See `CategoricalColumn` base class."""
return CategoricalColumn.IdWeightPair(
transformation_cache.get(self, state_manager), None)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
del weight_collections
del trainable
return CategoricalColumn.IdWeightPair(inputs.get(self), None)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.key]
def _get_config(self):
"""See 'FeatureColumn` base class."""
config = dict(zip(self._fields, self))
config['dtype'] = self.dtype.name
return config
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['dtype'] = dtypes.as_dtype(config['dtype'])
return cls(**kwargs)
class EmbeddingCategoricalColumn(
CategoricalColumn,
fc_old._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple('HashedCategoricalColumn',
('key', 'dtype', 'partition_num', 'ev_option'))):
@property
def _is_v2_column(self):
return True
@property
def name(self):
"""See `FeatureColumn` base class."""
return self.key
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return {self.key: parsing_ops.VarLenFeature(self.dtype)}
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.parse_example_spec
def _transform_input_tensor(self, input_tensor):
"""Hashes the values in the feature_column."""
if not isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
raise ValueError('SparseColumn input must be a SparseTensor.')
fc_utils.assert_string_or_int(
input_tensor.dtype,
prefix='column_name: {} input_tensor'.format(self.key))
if self.dtype.is_integer != input_tensor.dtype.is_integer:
raise ValueError(
'Column dtype and SparseTensors dtype must be compatible. '
'key: {}, column dtype: {}, tensor dtype: {}'.format(
self.key, self.dtype, input_tensor.dtype))
if self.dtype == dtypes.string:
max_value = np.iinfo(dtypes.int64.as_numpy_dtype).max
sparse_id_values = string_ops.string_to_hash_bucket_fast(
input_tensor.values, max_value)
else:
sparse_id_values = input_tensor.values
return sparse_tensor_lib.SparseTensor(
input_tensor.indices, sparse_id_values, input_tensor.dense_shape)
def transform_feature(self, transformation_cache, state_manager):
"""Hashes the values in the feature_column."""
input_tensor = _to_sparse_input_and_drop_ignore_values(
transformation_cache.get(self.key, state_manager))
return self._transform_input_tensor(input_tensor)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
input_tensor = _to_sparse_input_and_drop_ignore_values(inputs.get(self.key))
return self._transform_input_tensor(input_tensor)
@property
def num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return 1
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return 1
def get_sparse_tensors(self, transformation_cache, state_manager):
"""See `CategoricalColumn` base class."""
return CategoricalColumn.IdWeightPair(
transformation_cache.get(self, state_manager), None)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
del weight_collections
del trainable
return CategoricalColumn.IdWeightPair(inputs.get(self), None)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.key]
def _get_config(self):
"""See 'FeatureColumn` base class."""
config = dict(zip(self._fields, self))
config['dtype'] = self.dtype.name
return config
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['dtype'] = dtypes.as_dtype(config['dtype'])
return cls(**kwargs)
class AdaptiveEmbeddingCategoricalColumn(
CategoricalColumn,
fc_old._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple('AdaptiveEmbeddingCategoricalColumn',
('key', 'hash_bucket_size', 'dtype',
'partition_num', 'ev_option',
#'adaptive_mask_tensor', 'hash_ev_ids'))):
#'hash_ev_ids'))):
))):
@property
def _is_v2_column(self):
return True
@property
def name(self):
"""See `FeatureColumn` base class."""
return self.key
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return {self.key: parsing_ops.VarLenFeature(self.dtype)}
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.parse_example_spec
def set_adaptive_mask_tensor(self, adaptive_mask_tensor):
self.adaptive_mask_tensor = adaptive_mask_tensor
def _transform_input_tensor(self, input_tensor):
flat_ids = array_ops.reshape(input_tensor.values, [-1])
original_indices = math_ops.range(array_ops.size(flat_ids))
parts = data_flow_ops.dynamic_partition(original_indices, self.adaptive_mask_tensor, 2)
spids_part = data_flow_ops.dynamic_partition(flat_ids, self.adaptive_mask_tensor, 2)
if self.dtype == dtypes.string:
hash_ids = string_ops.string_to_hash_bucket_fast(
spids_part[0], self.hash_bucket_size, name="lookup_hash")
ev_ids = string_ops.string_to_hash_bucket_fast(
spids_part[1], np.iinfo(dtypes.int64.as_numpy_dtype).max)
self.hash_ev_ids = string_ops.string_to_hash_bucket_fast(
spids_part[1], self.hash_bucket_size, name="lookup_hash_ev")
sparse_id_values = data_flow_ops.dynamic_stitch(parts, [hash_ids, ev_ids])
else:
hash_ids = string_ops.string_to_hash_bucket_fast(
string_ops.as_string(spids_part[0]), self.hash_bucket_size, name="lookup_hash")
self.hash_ev_ids = string_ops.string_to_hash_bucket_fast(
string_ops.as_string(spids_part[1]), self.hash_bucket_size, name="lookup_hash_ev")
ev_ids = spids_part[1]
sparse_id_values = data_flow_ops.dynamic_stitch(parts, [hash_ids, ev_ids])
return sparse_tensor_lib.SparseTensor(input_tensor.indices, sparse_id_values,
input_tensor.dense_shape)
def transform_feature(self, transformation_cache, state_manager):
"""Hashes the values in the feature_column."""
input_tensor = _to_sparse_input_and_drop_ignore_values(
transformation_cache.get(self.key, state_manager))
return self._transform_input_tensor(input_tensor)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
input_tensor = _to_sparse_input_and_drop_ignore_values(inputs.get(self.key))
return self._transform_input_tensor(input_tensor)
@property
def num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return self.hash_bucket_size
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.num_buckets
def get_sparse_tensors(self, transformation_cache, state_manager):
"""See `CategoricalColumn` base class."""
return CategoricalColumn.IdWeightPair(
transformation_cache.get(self, state_manager), None)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
del weight_collections
del trainable
return CategoricalColumn.IdWeightPair(inputs.get(self), None)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.key]
def _get_config(self):
"""See 'FeatureColumn` base class."""
config = dict(zip(self._fields, self))
config['dtype'] = self.dtype.name
return config
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['dtype'] = dtypes.as_dtype(config['dtype'])
return cls(**kwargs)
class HashOnlyCategoricalColumn(
CategoricalColumn,
fc_old._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple('HashOnlyCategoricalColumn',
('key', 'hash_type', 'allow_neg', 'dtype'))):
"""see `categorical_column_with_hash`."""
@property
def _is_v2_column(self):
return True
@property
def name(self):
"""See `FeatureColumn` base class."""
return self.key
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return {self.key: parsing_ops.VarLenFeature(self.dtype)}
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.parse_example_spec
def _transform_input_tensor(self, input_tensor):
"""Hashes the values in the feature_column."""
if not isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
raise ValueError('SparseColumn input must be a SparseTensor.')
fc_utils.assert_string_or_int(
input_tensor.dtype,
prefix='column_name: {} input_tensor'.format(self.key))
if self.dtype.is_integer != input_tensor.dtype.is_integer:
raise ValueError(
'Column dtype and SparseTensors dtype must be compatible. '
'key: {}, column dtype: {}, tensor dtype: {}'.format(
self.key, self.dtype, input_tensor.dtype))
if self.dtype == dtypes.string:
sparse_values = input_tensor.values
else:
sparse_values = string_ops.as_string(input_tensor.values)
sparse_id_values = string_ops.string_to_hash(sparse_values,
hash_type=self.hash_type,
allow_neg=self.allow_neg)
return sparse_tensor_lib.SparseTensor(
input_tensor.indices, sparse_id_values, input_tensor.dense_shape)
def transform_feature(self, transformation_cache, state_manager):
"""Hashes the values in the feature_column."""
input_tensor = _to_sparse_input_and_drop_ignore_values(
transformation_cache.get(self.key, state_manager))
return self._transform_input_tensor(input_tensor)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
input_tensor = _to_sparse_input_and_drop_ignore_values(inputs.get(self.key))
return self._transform_input_tensor(input_tensor)
@property
def num_buckets(self):
"""Returns number of buckets in this sparse feature."""
raise ValueError("categorical_column_with_hash does not has attr `_num_buckets`"
" if you want to look up embedding with embedding column, "
"please use hashtable feature column.")
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.num_buckets
def get_sparse_tensors(self, transformation_cache, state_manager):
"""See `CategoricalColumn` base class."""
return CategoricalColumn.IdWeightPair(
transformation_cache.get(self, state_manager), None)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
del weight_collections
del trainable
return CategoricalColumn.IdWeightPair(inputs.get(self), None)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.key]
def _get_config(self):
"""See 'FeatureColumn` base class."""
config = dict(zip(self._fields, self))
config['dtype'] = self.dtype.name
return config
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['dtype'] = dtypes.as_dtype(config['dtype'])
return cls(**kwargs)
class MultiHashedCategoricalColumn(
CategoricalColumn,
fc_old._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple('MultiHashedCategoricalColumn',
('key', 'hash_bucket_size', 'hash_types', 'dtype'))):
"""see `categorical_column_with_multi_hash_bucket`."""
@property
def _is_v2_column(self):
return True
@property
def name(self):
"""See `FeatureColumn` base class."""
return self.key
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return {self.key: parsing_ops.VarLenFeature(self.dtype)}
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.parse_example_spec
def _transform_input_tensor(self, input_tensor):
"""Hashes the values in the feature_column."""
if not isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
raise ValueError('SparseColumn input must be a SparseTensor.')
fc_utils.assert_string_or_int(
input_tensor.dtype,
prefix='column_name: {} input_tensor'.format(self.key))
if self.dtype.is_integer != input_tensor.dtype.is_integer:
raise ValueError(
'Column dtype and SparseTensors dtype must be compatible. '
'key: {}, column dtype: {}, tensor dtype: {}'.format(
self.key, self.dtype, input_tensor.dtype))
if self.dtype == dtypes.string:
sparse_values = input_tensor.values
else:
sparse_values = string_ops.as_string(input_tensor.values)
sparse_values = string_ops.string_to_hash(sparse_values,
hash_type='farm',
allow_neg=False,
num_buckets=self.hash_bucket_size)
sparse_values = string_ops.as_string(sparse_values)
hash_sparse_tensors = []
new_dense_shape = array_ops.concat([input_tensor.dense_shape, [1]], axis=0)
for i, hash_type in enumerate(self.hash_types):
sparse_bucket_values = string_ops.string_to_hash(sparse_values,
hash_type=hash_type,
allow_neg=False,
num_buckets=self.hash_bucket_size)
new_sp = sparse_tensor_lib.SparseTensor(input_tensor.indices, sparse_bucket_values, input_tensor.dense_shape)
hash_sparse_tensors.append(sparse_ops.sparse_reshape(new_sp, shape=new_dense_shape))
output_tensor = sparse_ops.sparse_concat(sp_inputs=hash_sparse_tensors, axis=-1, name="multihash_concat")
return output_tensor
def transform_feature(self, transformation_cache, state_manager):
"""Hashes the values in the feature_column."""
input_tensor = _to_sparse_input_and_drop_ignore_values(
transformation_cache.get(self.key, state_manager))
return self._transform_input_tensor(input_tensor)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
input_tensor = _to_sparse_input_and_drop_ignore_values(inputs.get(self.key))
return self._transform_input_tensor(input_tensor)
@property
def num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return self.hash_bucket_size
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.num_buckets
def get_sparse_tensors(self, transformation_cache, state_manager):
"""See `CategoricalColumn` base class."""
return CategoricalColumn.IdWeightPair(
transformation_cache.get(self, state_manager), None)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
del weight_collections
del trainable
return CategoricalColumn.IdWeightPair(inputs.get(self), None)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.key]
def _get_config(self):
"""See 'FeatureColumn` base class."""
config = dict(zip(self._fields, self))
config['dtype'] = self.dtype.name
return config
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['dtype'] = dtypes.as_dtype(config['dtype'])
return cls(**kwargs)
class VocabularyFileCategoricalColumn(
CategoricalColumn,
fc_old._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple('VocabularyFileCategoricalColumn',
('key', 'vocabulary_file', 'vocabulary_size',
'num_oov_buckets', 'dtype', 'default_value'))):
"""See `categorical_column_with_vocabulary_file`."""
@property
def _is_v2_column(self):
return True
@property
def name(self):
"""See `FeatureColumn` base class."""
return self.key
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return {self.key: parsing_ops.VarLenFeature(self.dtype)}
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.parse_example_spec
def _transform_input_tensor(self, input_tensor, state_manager=None):
"""Creates a lookup table for the vocabulary."""
if self.dtype.is_integer != input_tensor.dtype.is_integer:
raise ValueError(
'Column dtype and SparseTensors dtype must be compatible. '
'key: {}, column dtype: {}, tensor dtype: {}'.format(
self.key, self.dtype, input_tensor.dtype))
fc_utils.assert_string_or_int(
input_tensor.dtype,
prefix='column_name: {} input_tensor'.format(self.key))
key_dtype = self.dtype
if input_tensor.dtype.is_integer:
# `index_table_from_file` requires 64-bit integer keys.
key_dtype = dtypes.int64
input_tensor = math_ops.cast(input_tensor, dtypes.int64)
name = '{}_lookup'.format(self.key)
table = lookup_ops.index_table_from_file(
vocabulary_file=self.vocabulary_file,
num_oov_buckets=self.num_oov_buckets,
vocab_size=self.vocabulary_size,
default_value=self.default_value,
key_dtype=key_dtype,
name=name)
if state_manager is not None:
state_manager.add_resource(self, name, table)
return table.lookup(input_tensor)
def transform_feature(self, transformation_cache, state_manager):
"""Creates a lookup table for the vocabulary."""
input_tensor = _to_sparse_input_and_drop_ignore_values(
transformation_cache.get(self.key, state_manager))
return self._transform_input_tensor(input_tensor, state_manager)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
input_tensor = _to_sparse_input_and_drop_ignore_values(inputs.get(self.key))
return self._transform_input_tensor(input_tensor)
@property
def num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return self.vocabulary_size + self.num_oov_buckets
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.num_buckets
def get_sparse_tensors(self, transformation_cache, state_manager):
"""See `CategoricalColumn` base class."""
return CategoricalColumn.IdWeightPair(
transformation_cache.get(self, state_manager), None)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
del weight_collections
del trainable
return CategoricalColumn.IdWeightPair(inputs.get(self), None)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.key]
def _get_config(self):
"""See 'FeatureColumn` base class."""
config = dict(zip(self._fields, self))
config['dtype'] = self.dtype.name
return config
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['dtype'] = dtypes.as_dtype(config['dtype'])
return cls(**kwargs)
class VocabularyListCategoricalColumn(
CategoricalColumn,
fc_old._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple(
'VocabularyListCategoricalColumn',
('key', 'vocabulary_list', 'dtype', 'default_value', 'num_oov_buckets'))
):
"""See `categorical_column_with_vocabulary_list`."""
@property
def _is_v2_column(self):
return True
@property
def name(self):
"""See `FeatureColumn` base class."""
return self.key
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return {self.key: parsing_ops.VarLenFeature(self.dtype)}
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.parse_example_spec
def _transform_input_tensor(self, input_tensor, state_manager=None):
"""Creates a lookup table for the vocabulary list."""
if self.dtype.is_integer != input_tensor.dtype.is_integer:
raise ValueError(
'Column dtype and SparseTensors dtype must be compatible. '
'key: {}, column dtype: {}, tensor dtype: {}'.format(
self.key, self.dtype, input_tensor.dtype))
fc_utils.assert_string_or_int(
input_tensor.dtype,
prefix='column_name: {} input_tensor'.format(self.key))
key_dtype = self.dtype
if input_tensor.dtype.is_integer:
# `index_table_from_tensor` requires 64-bit integer keys.
key_dtype = dtypes.int64
input_tensor = math_ops.cast(input_tensor, dtypes.int64)
name = '{}_lookup'.format(self.key)
table = lookup_ops.index_table_from_tensor(
vocabulary_list=tuple(self.vocabulary_list),
default_value=self.default_value,
num_oov_buckets=self.num_oov_buckets,
dtype=key_dtype,
name=name)
if state_manager is not None:
state_manager.add_resource(self, name, table)
return table.lookup(input_tensor)
def transform_feature(self, transformation_cache, state_manager):
"""Creates a lookup table for the vocabulary list."""
input_tensor = _to_sparse_input_and_drop_ignore_values(
transformation_cache.get(self.key, state_manager))
return self._transform_input_tensor(input_tensor, state_manager)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
input_tensor = _to_sparse_input_and_drop_ignore_values(inputs.get(self.key))
return self._transform_input_tensor(input_tensor)
@property
def num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return len(self.vocabulary_list) + self.num_oov_buckets
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.num_buckets
def get_sparse_tensors(self, transformation_cache, state_manager):
"""See `CategoricalColumn` base class."""
return CategoricalColumn.IdWeightPair(
transformation_cache.get(self, state_manager), None)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
del weight_collections
del trainable
return CategoricalColumn.IdWeightPair(inputs.get(self), None)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.key]
def _get_config(self):
"""See 'FeatureColumn` base class."""
config = dict(zip(self._fields, self))
config['dtype'] = self.dtype.name
return config
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['dtype'] = dtypes.as_dtype(config['dtype'])
return cls(**kwargs)
class IdentityCategoricalColumn(
CategoricalColumn,
fc_old._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple('IdentityCategoricalColumn',
('key', 'number_buckets', 'default_value'))):
"""See `categorical_column_with_identity`."""
@property
def _is_v2_column(self):
return True
@property
def name(self):
"""See `FeatureColumn` base class."""
return self.key
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return {self.key: parsing_ops.VarLenFeature(dtypes.int64)}
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.parse_example_spec
def _transform_input_tensor(self, input_tensor):
"""Returns a SparseTensor with identity values."""
if not input_tensor.dtype.is_integer:
raise ValueError(
'Invalid input, not integer. key: {} dtype: {}'.format(
self.key, input_tensor.dtype))
values = math_ops.cast(input_tensor.values, dtypes.int64, name='values')
num_buckets = math_ops.cast(
self.num_buckets, dtypes.int64, name='num_buckets')
zero = math_ops.cast(0, dtypes.int64, name='zero')
if self.default_value is None:
# Fail if values are out-of-range.
assert_less = check_ops.assert_less(
values,
num_buckets,
data=(values, num_buckets),
message='Bucket index for categorical column '
'"{}" exceeds number of buckets'.format(self.name),
name='assert_less_than_num_buckets')
assert_greater = check_ops.assert_greater_equal(
values,
zero,
data=(values,),
message='Negative bucket index for categorical column "{}"'.format(
self.name),
name='assert_greater_or_equal_0')
with ops.control_dependencies((assert_less, assert_greater)):
values = array_ops.identity(values)
else:
# Assign default for out-of-range values.
values = array_ops.where_v2(
math_ops.logical_or(
values < zero, values >= num_buckets, name='out_of_range'),
array_ops.fill(
dims=array_ops.shape(values),
value=math_ops.cast(self.default_value, dtypes.int64),
name='default_values'), values)
return sparse_tensor_lib.SparseTensor(
indices=input_tensor.indices,
values=values,
dense_shape=input_tensor.dense_shape)
def transform_feature(self, transformation_cache, state_manager):
"""Returns a SparseTensor with identity values."""
input_tensor = _to_sparse_input_and_drop_ignore_values(
transformation_cache.get(self.key, state_manager))
return self._transform_input_tensor(input_tensor)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
input_tensor = _to_sparse_input_and_drop_ignore_values(inputs.get(self.key))
return self._transform_input_tensor(input_tensor)
@property
def num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return self.number_buckets
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.num_buckets
def get_sparse_tensors(self, transformation_cache, state_manager):
"""See `CategoricalColumn` base class."""
return CategoricalColumn.IdWeightPair(
transformation_cache.get(self, state_manager), None)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
del weight_collections
del trainable
return CategoricalColumn.IdWeightPair(inputs.get(self), None)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.key]
def _get_config(self):
"""See 'FeatureColumn` base class."""
return dict(zip(self._fields, self))
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
return cls(**kwargs)
class WeightedCategoricalColumn(
CategoricalColumn,
fc_old._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple(
'WeightedCategoricalColumn',
('categorical_column', 'weight_feature_key', 'dtype'))):
"""See `weighted_categorical_column`."""
@property
def _is_v2_column(self):
return (isinstance(self.categorical_column, FeatureColumn) and
self.categorical_column._is_v2_column) # pylint: disable=protected-access
@property
def name(self):
"""See `FeatureColumn` base class."""
return '{}_weighted_by_{}'.format(
self.categorical_column.name, self.weight_feature_key)
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
config = self.categorical_column.parse_example_spec
if self.weight_feature_key in config:
raise ValueError('Parse config {} already exists for {}.'.format(
config[self.weight_feature_key], self.weight_feature_key))
config[self.weight_feature_key] = parsing_ops.VarLenFeature(self.dtype)
return config
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
config = self.categorical_column._parse_example_spec # pylint: disable=protected-access
if self.weight_feature_key in config:
raise ValueError('Parse config {} already exists for {}.'.format(
config[self.weight_feature_key], self.weight_feature_key))
config[self.weight_feature_key] = parsing_ops.VarLenFeature(self.dtype)
return config
@property
def num_buckets(self):
"""See `DenseColumn` base class."""
return self.categorical_column.num_buckets
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.categorical_column._num_buckets # pylint: disable=protected-access
def _transform_weight_tensor(self, weight_tensor):
if weight_tensor is None:
raise ValueError('Missing weights {}.'.format(self.weight_feature_key))
weight_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(
weight_tensor)
if self.dtype != weight_tensor.dtype.base_dtype:
raise ValueError('Bad dtype, expected {}, but got {}.'.format(
self.dtype, weight_tensor.dtype))
if not isinstance(weight_tensor, sparse_tensor_lib.SparseTensor):
# The weight tensor can be a regular Tensor. In this case, sparsify it.
weight_tensor = _to_sparse_input_and_drop_ignore_values(
weight_tensor, ignore_value=0.0)
if not weight_tensor.dtype.is_floating:
weight_tensor = math_ops.cast(weight_tensor, dtypes.float32)
return weight_tensor
def transform_feature(self, transformation_cache, state_manager):
"""Applies weights to tensor generated from `categorical_column`'."""
weight_tensor = transformation_cache.get(self.weight_feature_key,
state_manager)
weight_tensor = self._transform_weight_tensor(weight_tensor)
return (transformation_cache.get(self.categorical_column, state_manager),
weight_tensor)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
"""Applies weights to tensor generated from `categorical_column`'."""
weight_tensor = inputs.get(self.weight_feature_key)
weight_tensor = self._transform_weight_tensor(weight_tensor)
return (inputs.get(self.categorical_column), weight_tensor)
def get_sparse_tensors(self, transformation_cache, state_manager):
"""See `CategoricalColumn` base class."""
tensors = transformation_cache.get(self, state_manager)
return CategoricalColumn.IdWeightPair(tensors[0], tensors[1])
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
del weight_collections
del trainable
tensors = inputs.get(self)
return CategoricalColumn.IdWeightPair(tensors[0], tensors[1])
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.categorical_column, self.weight_feature_key]
def _get_config(self):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import serialize_feature_column # pylint: disable=g-import-not-at-top
config = dict(zip(self._fields, self))
config['categorical_column'] = serialize_feature_column(
self.categorical_column)
config['dtype'] = self.dtype.name
return config
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import deserialize_feature_column # pylint: disable=g-import-not-at-top
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['categorical_column'] = deserialize_feature_column(
config['categorical_column'], custom_objects, columns_by_name)
kwargs['dtype'] = dtypes.as_dtype(config['dtype'])
return cls(**kwargs)
class WeightedMultiHashedCategoricalColumn(
CategoricalColumn,
fc_old._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple(
'WeightedMultiHashedCategoricalColumn',
('categorical_column', 'weight_feature_key', 'dtype'))):
"""See `weighted_multi_hash_categorical_column`."""
@property
def _is_v2_column(self):
return (isinstance(self.categorical_column, FeatureColumn) and
self.categorical_column._is_v2_column) # pylint: disable=protected-access
@property
def name(self):
"""See `FeatureColumn` base class."""
return '{}_weighted_by_{}'.format(
self.categorical_column.name, self.weight_feature_key)
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
config = self.categorical_column.parse_example_spec
if self.weight_feature_key in config:
raise ValueError('Parse config {} already exists for {}.'.format(
config[self.weight_feature_key], self.weight_feature_key))
config[self.weight_feature_key] = parsing_ops.VarLenFeature(self.dtype)
config[self.weight_feature_key].weighted_key = True
return config
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
config = self.categorical_column._parse_example_spec # pylint: disable=protected-access
if self.weight_feature_key in config:
raise ValueError('Parse config {} already exists for {}.'.format(
config[self.weight_feature_key], self.weight_feature_key))
config[self.weight_feature_key] = parsing_ops.VarLenFeature(self.dtype)
config[self.weight_feature_key].weighted_key = True
return config
@property
def num_buckets(self):
"""See `DenseColumn` base class."""
return self.categorical_column.num_buckets
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.categorical_column._num_buckets # pylint: disable=protected-access
def _transform_weight_tensor(self, weight_tensor):
if weight_tensor is None:
raise ValueError('Missing weights {}.'.format(self.weight_feature_key))
weight_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(
weight_tensor)
if self.dtype != weight_tensor.dtype.base_dtype:
raise ValueError('Bad dtype, expected {}, but got {}.'.format(
self.dtype, weight_tensor.dtype))
if not isinstance(weight_tensor, sparse_tensor_lib.SparseTensor):
# The weight tensor can be a regular Tensor. In this case, sparsify it.
weight_tensor = _to_sparse_input_and_drop_ignore_values(
weight_tensor, ignore_value=0.0)
if not weight_tensor.dtype.is_floating:
weight_tensor = math_ops.to_float(weight_tensor)
new_dense_shape = array_ops.concat([weight_tensor.dense_shape, [1]], axis=0)
weight_tensor = sparse_ops.sparse_reshape(weight_tensor, shape=new_dense_shape)
weight_tensors = [weight_tensor] * len(self.categorical_column.hash_types)
weight_tensor = sparse_ops.sparse_concat(sp_inputs=weight_tensors, axis=-1, name="multihash_weighted_concat")
return weight_tensor
def transform_feature(self, transformation_cache, state_manager):
"""Applies weights to tensor generated from `categorical_column`'."""
weight_tensor = transformation_cache.get(self.weight_feature_key,
state_manager)
weight_tensor = self._transform_weight_tensor(weight_tensor)
return (transformation_cache.get(self.categorical_column, state_manager),
weight_tensor)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
"""Applies weights to tensor generated from `categorical_column`'."""
weight_tensor = inputs.get(self.weight_feature_key)
weight_tensor = self._transform_weight_tensor(weight_tensor)
return (inputs.get(self.categorical_column), weight_tensor)
def get_sparse_tensors(self, transformation_cache, state_manager):
"""See `CategoricalColumn` base class."""
tensors = transformation_cache.get(self, state_manager)
return CategoricalColumn.IdWeightPair(tensors[0], tensors[1])
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
del weight_collections
del trainable
tensors = inputs.get(self)
return CategoricalColumn.IdWeightPair(tensors[0], tensors[1])
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.categorical_column, self.weight_feature_key]
def _get_config(self):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import serialize_feature_column # pylint: disable=g-import-not-at-top
config = dict(zip(self._fields, self))
config['categorical_column'] = serialize_feature_column(
self.categorical_column)
config['dtype'] = self.dtype.name
return config
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import deserialize_feature_column # pylint: disable=g-import-not-at-top
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['categorical_column'] = deserialize_feature_column(
config['categorical_column'], custom_objects, columns_by_name)
kwargs['dtype'] = dtypes.as_dtype(config['dtype'])
return cls(**kwargs)
class CrossedColumn(
CategoricalColumn,
fc_old._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple('CrossedColumn',
('keys', 'hash_bucket_size', 'hash_key'))):
"""See `crossed_column`."""
@property
def _is_v2_column(self):
for key in _collect_leaf_level_keys(self):
if isinstance(key, six.string_types):
continue
if not isinstance(key, FeatureColumn):
return False
if not key._is_v2_column: # pylint: disable=protected-access
return False
return True
@property
def name(self):
"""See `FeatureColumn` base class."""
feature_names = []
for key in _collect_leaf_level_keys(self):
if isinstance(key, (FeatureColumn, fc_old._FeatureColumn)): # pylint: disable=protected-access
feature_names.append(key.name)
else: # key must be a string
feature_names.append(key)
return '_X_'.join(sorted(feature_names))
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
config = {}
for key in self.keys:
if isinstance(key, FeatureColumn):
config.update(key.parse_example_spec)
elif isinstance(key, fc_old._FeatureColumn): # pylint: disable=protected-access
config.update(key._parse_example_spec) # pylint: disable=protected-access
else: # key must be a string
config.update({key: parsing_ops.VarLenFeature(dtypes.string)})
return config
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.parse_example_spec
def transform_feature(self, transformation_cache, state_manager):
"""Generates a hashed sparse cross from the input tensors."""
feature_tensors = []
for key in _collect_leaf_level_keys(self):
if isinstance(key, six.string_types):
feature_tensors.append(transformation_cache.get(key, state_manager))
elif isinstance(key, (fc_old._CategoricalColumn, CategoricalColumn)): # pylint: disable=protected-access
ids_and_weights = key.get_sparse_tensors(transformation_cache,
state_manager)
if ids_and_weights.weight_tensor is not None:
raise ValueError(
'crossed_column does not support weight_tensor, but the given '
'column populates weight_tensor. '
'Given column: {}'.format(key.name))
feature_tensors.append(ids_and_weights.id_tensor)
else:
raise ValueError('Unsupported column type. Given: {}'.format(key))
return sparse_ops.sparse_cross_hashed(
inputs=feature_tensors,
num_buckets=self.hash_bucket_size,
hash_key=self.hash_key)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
"""Generates a hashed sparse cross from the input tensors."""
feature_tensors = []
for key in _collect_leaf_level_keys(self):
if isinstance(key, six.string_types):
feature_tensors.append(inputs.get(key))
elif isinstance(key, (CategoricalColumn, fc_old._CategoricalColumn)): # pylint: disable=protected-access
ids_and_weights = key._get_sparse_tensors(inputs) # pylint: disable=protected-access
if ids_and_weights.weight_tensor is not None:
raise ValueError(
'crossed_column does not support weight_tensor, but the given '
'column populates weight_tensor. '
'Given column: {}'.format(key.name))
feature_tensors.append(ids_and_weights.id_tensor)
else:
raise ValueError('Unsupported column type. Given: {}'.format(key))
return sparse_ops.sparse_cross_hashed(
inputs=feature_tensors,
num_buckets=self.hash_bucket_size,
hash_key=self.hash_key)
@property
def num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return self.hash_bucket_size
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.num_buckets
def get_sparse_tensors(self, transformation_cache, state_manager):
"""See `CategoricalColumn` base class."""
return CategoricalColumn.IdWeightPair(
transformation_cache.get(self, state_manager), None)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
"""See `CategoricalColumn` base class."""
del weight_collections
del trainable
return CategoricalColumn.IdWeightPair(inputs.get(self), None)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return list(self.keys)
def _get_config(self):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import serialize_feature_column # pylint: disable=g-import-not-at-top
config = dict(zip(self._fields, self))
config['keys'] = tuple([serialize_feature_column(fc) for fc in self.keys])
return config
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import deserialize_feature_column # pylint: disable=g-import-not-at-top
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['keys'] = tuple([
deserialize_feature_column(c, custom_objects, columns_by_name)
for c in config['keys']
])
return cls(**kwargs)
def _collect_leaf_level_keys(cross):
"""Collects base keys by expanding all nested crosses.
Args:
cross: A `CrossedColumn`.
Returns:
A list of strings or `CategoricalColumn` instances.
"""
leaf_level_keys = []
for k in cross.keys:
if isinstance(k, CrossedColumn):
leaf_level_keys.extend(_collect_leaf_level_keys(k))
else:
leaf_level_keys.append(k)
return leaf_level_keys
def _prune_invalid_ids(sparse_ids, sparse_weights):
"""Prune invalid IDs (< 0) from the input ids and weights."""
is_id_valid = math_ops.greater_equal(sparse_ids.values, 0)
if sparse_weights is not None:
is_id_valid = math_ops.logical_and(
is_id_valid,
array_ops.ones_like(sparse_weights.values, dtype=dtypes.bool))
sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_id_valid)
if sparse_weights is not None:
sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_id_valid)
return sparse_ids, sparse_weights
def _prune_invalid_weights(sparse_ids, sparse_weights):
"""Prune invalid weights (< 0) from the input ids and weights."""
if sparse_weights is not None:
is_weights_valid = math_ops.greater(sparse_weights.values, 0)
sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_weights_valid)
sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_weights_valid)
return sparse_ids, sparse_weights
class IndicatorColumn(
DenseColumn,
SequenceDenseColumn,
fc_old._DenseColumn, # pylint: disable=protected-access
fc_old._SequenceDenseColumn, # pylint: disable=protected-access
collections.namedtuple('IndicatorColumn', ('categorical_column'))):
"""Represents a one-hot column for use in deep networks.
Args:
categorical_column: A `CategoricalColumn` which is created by
`categorical_column_with_*` function.
"""
@property
def _is_v2_column(self):
return (isinstance(self.categorical_column, FeatureColumn) and
self.categorical_column._is_v2_column) # pylint: disable=protected-access
@property
def name(self):
"""See `FeatureColumn` base class."""
return '{}_indicator'.format(self.categorical_column.name)
def _transform_id_weight_pair(self, id_weight_pair):
id_tensor = id_weight_pair.id_tensor
weight_tensor = id_weight_pair.weight_tensor
# If the underlying column is weighted, return the input as a dense tensor.
if weight_tensor is not None:
weighted_column = sparse_ops.sparse_merge(
sp_ids=id_tensor,
sp_values=weight_tensor,
vocab_size=int(self._variable_shape[-1]))
# Remove (?, -1) index.
weighted_column = sparse_ops.sparse_slice(weighted_column, [0, 0],
weighted_column.dense_shape)
# Use scatter_nd to merge duplicated indices if existed,
# instead of sparse_tensor_to_dense.
return array_ops.scatter_nd(weighted_column.indices,
weighted_column.values,
weighted_column.dense_shape)
dense_id_tensor = sparse_ops.sparse_tensor_to_dense(
id_tensor, default_value=-1)
# One hot must be float for tf.concat reasons since all other inputs to
# input_layer are float32.
one_hot_id_tensor = array_ops.one_hot(
dense_id_tensor,
depth=self._variable_shape[-1],
on_value=1.0,
off_value=0.0)
# Reduce to get a multi-hot per example.
return math_ops.reduce_sum(one_hot_id_tensor, axis=[-2])
def transform_feature(self, transformation_cache, state_manager):
"""Returns dense `Tensor` representing feature.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
Returns:
Transformed feature `Tensor`.
Raises:
ValueError: if input rank is not known at graph building time.
"""
id_weight_pair = self.categorical_column.get_sparse_tensors(
transformation_cache, state_manager)
return self._transform_id_weight_pair(id_weight_pair)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
id_weight_pair = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access
return self._transform_id_weight_pair(id_weight_pair)
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return self.categorical_column.parse_example_spec
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.categorical_column._parse_example_spec # pylint: disable=protected-access
@property
def variable_shape(self):
"""Returns a `TensorShape` representing the shape of the dense `Tensor`."""
if isinstance(self.categorical_column, FeatureColumn):
return tensor_shape.TensorShape([1, self.categorical_column.num_buckets])
else:
return tensor_shape.TensorShape([1, self.categorical_column._num_buckets]) # pylint: disable=protected-access
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _variable_shape(self):
return tensor_shape.TensorShape([1, self.categorical_column._num_buckets]) # pylint: disable=protected-access
def get_dense_tensor(self, transformation_cache, state_manager):
"""Returns dense `Tensor` representing feature.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
Returns:
Dense `Tensor` created within `transform_feature`.
Raises:
ValueError: If `categorical_column` is a `SequenceCategoricalColumn`.
"""
if isinstance(self.categorical_column, SequenceCategoricalColumn):
raise ValueError(
'In indicator_column: {}. '
'categorical_column must not be of type SequenceCategoricalColumn. '
'Suggested fix A: If you wish to use DenseFeatures, use a '
'non-sequence categorical_column_with_*. '
'Suggested fix B: If you wish to create sequence input, use '
'SequenceFeatures instead of DenseFeatures. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
# Feature has been already transformed. Return the intermediate
# representation created by transform_feature.
return transformation_cache.get(self, state_manager)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
del weight_collections
del trainable
if isinstance(
self.categorical_column,
(SequenceCategoricalColumn, fc_old._SequenceCategoricalColumn)): # pylint: disable=protected-access
raise ValueError(
'In indicator_column: {}. '
'categorical_column must not be of type _SequenceCategoricalColumn. '
'Suggested fix A: If you wish to use DenseFeatures, use a '
'non-sequence categorical_column_with_*. '
'Suggested fix B: If you wish to create sequence input, use '
'SequenceFeatures instead of DenseFeatures. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
# Feature has been already transformed. Return the intermediate
# representation created by transform_feature.
return inputs.get(self)
def get_sequence_dense_tensor(self, transformation_cache, state_manager):
"""See `SequenceDenseColumn` base class."""
if not isinstance(self.categorical_column, SequenceCategoricalColumn):
raise ValueError(
'In indicator_column: {}. '
'categorical_column must be of type SequenceCategoricalColumn '
'to use SequenceFeatures. '
'Suggested fix: Use one of sequence_categorical_column_with_*. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
# Feature has been already transformed. Return the intermediate
# representation created by transform_feature.
dense_tensor = transformation_cache.get(self, state_manager)
sparse_tensors = self.categorical_column.get_sparse_tensors(
transformation_cache, state_manager)
sequence_length = fc_utils.sequence_length_from_sparse_tensor(
sparse_tensors.id_tensor)
return SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=dense_tensor, sequence_length=sequence_length)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sequence_dense_tensor(self,
inputs,
weight_collections=None,
trainable=None):
# Do nothing with weight_collections and trainable since no variables are
# created in this function.
del weight_collections
del trainable
if not isinstance(
self.categorical_column,
(SequenceCategoricalColumn, fc_old._SequenceCategoricalColumn)): # pylint: disable=protected-access
raise ValueError(
'In indicator_column: {}. '
'categorical_column must be of type _SequenceCategoricalColumn '
'to use SequenceFeatures. '
'Suggested fix: Use one of sequence_categorical_column_with_*. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
# Feature has been already transformed. Return the intermediate
# representation created by _transform_feature.
dense_tensor = inputs.get(self)
sparse_tensors = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access
sequence_length = fc_utils.sequence_length_from_sparse_tensor(
sparse_tensors.id_tensor)
return SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=dense_tensor, sequence_length=sequence_length)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.categorical_column]
def _get_config(self):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import serialize_feature_column # pylint: disable=g-import-not-at-top
config = dict(zip(self._fields, self))
config['categorical_column'] = serialize_feature_column(
self.categorical_column)
return config
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import deserialize_feature_column # pylint: disable=g-import-not-at-top
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['categorical_column'] = deserialize_feature_column(
config['categorical_column'], custom_objects, columns_by_name)
return cls(**kwargs)
def _verify_static_batch_size_equality(tensors, columns):
"""Verify equality between static batch sizes.
Args:
tensors: iterable of input tensors.
columns: Corresponding feature columns.
Raises:
ValueError: in case of mismatched batch sizes.
"""
# bath_size is a Dimension object.
expected_batch_size = None
for i in range(0, len(tensors)):
batch_size = tensor_shape.Dimension(tensor_shape.dimension_value(
tensors[i].shape[0]))
if batch_size.value is not None:
if expected_batch_size is None:
bath_size_column_index = i
expected_batch_size = batch_size
elif not expected_batch_size.is_compatible_with(batch_size):
raise ValueError(
'Batch size (first dimension) of each feature must be same. '
'Batch size of columns ({}, {}): ({}, {})'.format(
columns[bath_size_column_index].name, columns[i].name,
expected_batch_size, batch_size))
class SequenceCategoricalColumn(
CategoricalColumn,
fc_old._SequenceCategoricalColumn, # pylint: disable=protected-access
collections.namedtuple('SequenceCategoricalColumn',
('categorical_column'))):
"""Represents sequences of categorical data."""
@property
def _is_v2_column(self):
return (isinstance(self.categorical_column, FeatureColumn) and
self.categorical_column._is_v2_column) # pylint: disable=protected-access
@property
def name(self):
"""See `FeatureColumn` base class."""
return self.categorical_column.name
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return self.categorical_column.parse_example_spec
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.categorical_column._parse_example_spec # pylint: disable=protected-access
def transform_feature(self, transformation_cache, state_manager):
"""See `FeatureColumn` base class."""
return self.categorical_column.transform_feature(transformation_cache,
state_manager)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
return self.categorical_column._transform_feature(inputs) # pylint: disable=protected-access
@property
def num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return self.categorical_column.num_buckets
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.categorical_column._num_buckets # pylint: disable=protected-access
def _get_sparse_tensors_helper(self, sparse_tensors):
id_tensor = sparse_tensors.id_tensor
weight_tensor = sparse_tensors.weight_tensor
# Expands third dimension, if necessary so that embeddings are not
# combined during embedding lookup. If the tensor is already 3D, leave
# as-is.
shape = array_ops.shape(id_tensor)
# Compute the third dimension explicitly instead of setting it to -1, as
# that doesn't work for dynamically shaped tensors with 0-length at runtime.
# This happens for empty sequences.
target_shape = [shape[0], shape[1], math_ops.reduce_prod(shape[2:])]
id_tensor = sparse_ops.sparse_reshape(id_tensor, target_shape)
if weight_tensor is not None:
weight_tensor = sparse_ops.sparse_reshape(weight_tensor, target_shape)
return CategoricalColumn.IdWeightPair(id_tensor, weight_tensor)
def get_sparse_tensors(self, transformation_cache, state_manager):
"""Returns an IdWeightPair.
`IdWeightPair` is a pair of `SparseTensor`s which represents ids and
weights.
`IdWeightPair.id_tensor` is typically a `batch_size` x `num_buckets`
`SparseTensor` of `int64`. `IdWeightPair.weight_tensor` is either a
`SparseTensor` of `float` or `None` to indicate all weights should be
taken to be 1. If specified, `weight_tensor` must have exactly the same
shape and indices as `sp_ids`. Expected `SparseTensor` is same as parsing
output of a `VarLenFeature` which is a ragged matrix.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
"""
sparse_tensors = self.categorical_column.get_sparse_tensors(
transformation_cache, state_manager)
return self._get_sparse_tensors_helper(sparse_tensors)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
sparse_tensors = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access
return self._get_sparse_tensors_helper(sparse_tensors)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.categorical_column]
def _get_config(self):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import serialize_feature_column # pylint: disable=g-import-not-at-top
config = dict(zip(self._fields, self))
config['categorical_column'] = serialize_feature_column(
self.categorical_column)
return config
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import deserialize_feature_column # pylint: disable=g-import-not-at-top
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['categorical_column'] = deserialize_feature_column(
config['categorical_column'], custom_objects, columns_by_name)
return cls(**kwargs)
def _check_config_keys(config, expected_keys):
"""Checks that a config has all expected_keys."""
if set(config.keys()) != set(expected_keys):
raise ValueError('Invalid config: {}, expected keys: {}'.format(
config, expected_keys))
def _standardize_and_copy_config(config):
"""Returns a shallow copy of config with lists turned to tuples.
Keras serialization uses nest to listify everything.
This causes problems with the NumericColumn shape, which becomes
unhashable. We could try to solve this on the Keras side, but that
would require lots of tracking to avoid changing existing behavior.
Instead, we ensure here that we revive correctly.
Args:
config: dict that will be used to revive a Feature Column
Returns:
Shallow copy of config with lists turned to tuples.
"""
kwargs = config.copy()
for k, v in kwargs.items():
if isinstance(v, list):
kwargs[k] = tuple(v)
return kwargs
|
py | 1a538b2e4603d6c9ece887e3b0c9559f7ab56407 | import os
class Config:
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://vicky:aderazi@localhost/pitches'
SECRET_KEY = os.environ.get('SECRET_KEY')
SQLALCHEMY_TRACK_MODIFICATIONS = False
UPLOADED_PHOTOS_DEST ='app/static/photos'
# email configurations
MAIL_SERVER = 'smtp.googlemail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get("MAIL_USERNAME")
MAIL_PASSWORD = os.environ.get("MAIL_PASSWORD")
SIMPLEMDE_JS_IIFE = True
SIMPLEMDE_USE_CDN = True
class ProdConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get("DATABASE_URL")
pass
class DevConfig(Config):
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://vicky:aderazi@localhost/pitches'
DEBUG = True
class TestConfig(Config):
pass
config_options = {
'development':DevConfig,
'production':ProdConfig,
'test':TestConfig
} |
py | 1a538b77e95dae78e137a176ea945bb8742cc258 | #!/usr/bin/env python
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This is a complete rewrite of a file licensed as follows:
#
# Copyright (c) 2003, Frank Warmerdam <[email protected]>
# Copyright (c) 2008-2014, Even Rouault <even dot rouault at mines-paris . org>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
"""Test OGR handling of ESRI Shapefiles.
This is a rewrite of:
http://trac.osgeo.org/gdal/browser/trunk/autotest/ogr/ogr_shape.py.
"""
import os
from osgeo import ogr
import unittest
from autotest2.gcore import gcore_util
from autotest2.ogr import ogr_util
DRIVER = ogr_util.SHAPEFILE_DRIVER
EXT = '.shp'
def setUpModule():
ogr_util.SetupTestEnv()
def HaveGeos():
point1 = ogr.CreateGeometryFromWkt('POINT(10 20)')
point2 = ogr.CreateGeometryFromWkt('POINT(30 20)')
return point1.Union(point2) is not None
@ogr_util.SkipIfDriverMissing(DRIVER)
class OgrShapefileTest(ogr_util.DriverTestCase):
def setUp(self):
super(OgrShapefileTest, self).setUp(DRIVER, EXT)
def testReadPoint(self):
filepath = ogr_util.GetTestFilePath('shape/point/point.shp')
self.CheckOpen(filepath)
self.assertEqual(self.src.GetLayerCount(), 1)
layer = self.src.GetLayer()
self.assertEqual(layer.GetName(), 'point')
self.assertEqual(layer.GetFeatureCount(), 1)
self.assertEqual(layer.GetExtent(), (1.0, 1.0, 2.0, 2.0))
layer_defn = layer.GetLayerDefn()
self.assertEqual(layer_defn.GetFieldCount(), 1)
self.assertEqual(layer_defn.GetGeomType(), ogr.wkbPoint)
field_defn = layer_defn.GetFieldDefn(0)
self.assertEqual(field_defn.GetName(), 'FID')
self.assertEqual(field_defn.GetTypeName(), 'Integer64')
feature = layer.GetNextFeature()
self.assertEqual(feature.GetField(0), 0.0)
self.assertEqual(feature.GetFieldCount(), 1)
self.assertEqual(feature.GetGeomFieldCount(), 1)
self.assertEqual(feature.GetGeometryRef().ExportToWkt(), 'POINT (1 2)')
geometry_ref = feature.GetGeometryRef()
self.assertEqual(geometry_ref.GetPoint(), (1.0, 2.0, 0.0))
self.assertEqual(geometry_ref.GetPoint_2D(), (1.0, 2.0))
self.assertIsNone(geometry_ref.GetSpatialReference())
# These initial tests are overly complicated.
# TODO(schwehr): Test 01.
# TODO(schwehr): Test 02.
# TODO(schwehr): Test 03.
# TODO(schwehr): Test 04.
# TODO(schwehr): Test 05.
# TODO(schwehr): Test 06.
# TODO(schwehr): Test 07.
# TODO(schwehr): Test 08.
def test09SearchInsidePolyReturnNone(self):
filepath = ogr_util.GetTestFilePath('shape/simplepoly/poly.shp')
self.CheckOpen(filepath)
layer = self.src.GetLayer()
layer.SetSpatialFilterRect(-10, -130, 10, -110)
if HaveGeos():
self.assertEqual(layer.GetFeatureCount(), 0)
else:
self.assertEqual(layer.GetFeatureCount(), 1)
def test10SelectSomePolygonsByRegion(self):
filepath = ogr_util.GetTestFilePath('shape/simplepoly/poly.shp')
self.CheckOpen(filepath)
layer = self.src.GetLayer()
layer.SetSpatialFilterRect(-400, 22, -120, 400)
index = layer.GetLayerDefn().GetFieldIndex('FID')
fids = [feature.GetField(index) for feature in ogr_util.Features(layer)]
self.assertEqual(fids, [0, 4, 8])
def test11SelectAreaAndFidReturnNone(self):
filepath = ogr_util.GetTestFilePath('shape/simplepoly/poly.shp')
self.CheckOpen(filepath)
layer = self.src.GetLayer()
layer.SetAttributeFilter('FID = 5')
layer.SetSpatialFilterRect(-400, 22, -120, 400)
index = layer.GetLayerDefn().GetFieldIndex('FID')
fids = [feature.GetField(index) for feature in ogr_util.Features(layer)]
self.assertFalse(fids)
def test11SelectAreaAndFidReturnsOne(self):
filepath = ogr_util.GetTestFilePath('shape/simplepoly/poly.shp')
self.CheckOpen(filepath)
layer = self.src.GetLayer()
layer.SetAttributeFilter('FID = 4')
layer.SetSpatialFilterRect(-400, 22, -120, 400)
index = layer.GetLayerDefn().GetFieldIndex('FID')
fids = [feature.GetField(index) for feature in ogr_util.Features(layer)]
self.assertEqual(fids, [4])
def test12Multipolygon(self):
filepath = ogr_util.GetTestFilePath('shape/multipolygon/american-samoa.shp')
self.CheckOpen(filepath)
layer = self.src.GetLayer()
feature = layer.GetNextFeature()
geometry = feature.GetGeometryRef()
self.assertEqual(geometry.GetCoordinateDimension(), 2)
self.assertEqual(geometry.GetGeometryName(), 'MULTIPOLYGON')
self.assertEqual(geometry.GetGeometryCount(), 5)
point_counts = [15, 11, 17, 20, 9]
for geom_index in range(5):
poly = geometry.GetGeometryRef(geom_index)
self.assertEqual(poly.GetGeometryName(), 'POLYGON')
self.assertEqual(poly.GetGeometryCount(), 1)
self.assertEqual(poly.GetGeometryRef(0).GetPointCount(),
point_counts[geom_index])
def test13SetFeature(self):
with gcore_util.TestTemporaryDirectory(prefix='shape_setfeature') as tmpdir:
field_settings = (
('real_field', ogr.OFTReal, '1.23', '7.8', 7.8),
('int_field', ogr.OFTInteger, '2', '3', 3),
('str_field', ogr.OFTString, 'original', 'new', 'new')
)
for field_name, field_type, original, new, result in field_settings:
filepath = os.path.join(tmpdir, field_name + 'tmp.shp')
dst = self.driver.CreateDataSource(filepath)
layer = dst.CreateLayer('test_layer')
layer.CreateField(ogr.FieldDefn(field_name, field_type))
feature = ogr.Feature(layer.GetLayerDefn())
feature.SetField(field_name, original)
feature.SetGeometry(ogr.CreateGeometryFromWkt('POINT(4 5)'))
layer.CreateFeature(feature)
dst = None
dst = ogr.Open(filepath, update=True)
layer = dst.GetLayer()
feature = layer.GetFeature(0)
feature.SetField(field_name, new)
new_geom_str = 'POINT (9 0)'
feature.SetGeometry(ogr.CreateGeometryFromWkt(new_geom_str))
self.assertEqual(layer.SetFeature(feature), 0)
dst = None
self.CheckOpen(filepath)
layer = self.src.GetLayer()
feature = layer.GetFeature(0)
self.assertEqual(feature.GetField(0), result)
self.assertEqual(feature.GetGeometryRef().ExportToWkt(), new_geom_str)
if __name__ == '__main__':
unittest.main()
|
py | 1a538b95f7f7b7102b5bb17fbdccacc363010019 | #!/usr/bin/env python
"""Create a generator gen_seq() that creates the infinite geometric series:
1, 1/2, 1/4, 1/8...
Write a function first_N(num) that sums the first num values
Write a function until_small(epsilon) that sums the sequence until the
additional term is less than some small value epsilon.
Created on Sep 21, 2011
@author: paulross
"""
__author__ = 'Paul Ross'
__date__ = '2011-08-03'
__version__ = '0.1.0'
__rights__ = 'Copyright (c) 2011 Paul Ross. Copyright (c) 2015 AHL.'
import sys
import pytest
def gen_seq():
"""Generates the infinite geometric series 1, 1/2, 1/4, 1/8, ..."""
pass
def first_N(num):
"""Use gen_seq() to generate the sum of the first num values of the series."""
pass
def until_small(epsilon):
"""Use gen_seq() to generate the sum of the series until a value smaller
than epsilon is encountered."""
pass
def test_first_N():
assert 0.0 == first_N(0)
assert 1.0 == first_N(1)
assert 1.5 == first_N(2)
assert 1.75 == first_N(3)
assert 1.875 == first_N(4)
def test_until_small():
assert 1.5 == until_small(1.0 / 2)
assert 1.875 == until_small(1.0 / 8)
def main():
return pytest.main(__file__)
if __name__ == '__main__':
sys.exit(main())
|
py | 1a538ca1f102d1e86bc0935d24ff529f24072439 | """
WSGI config for ta_assistant_django project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ta_assistant_django.settings')
application = get_wsgi_application()
|
py | 1a538ccb1ca7b0e0a3f981f60a4cbc4aad22b64f | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-05-31 08:15
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('logger', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='logentry',
options={'ordering': ('-created',), 'permissions': (('see_all', "Can see every user's log entries"),)},
),
]
|
py | 1a538d6fe6823938a048fa4189eafcbc361ab3ac | import setuptools
with open("README.md", "r") as f:
long_description = f.read()
with open("requirements.txt") as f:
requires = f.read().splitlines()
setuptools.setup(
name="ondewo-client-utils",
version="0.1.0",
author="Ondewo GbmH",
author_email="[email protected]",
description="This library contains utilities and base classes for gRPC clients.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/ondewo/ondewo-client-utils-python",
packages=[
np for np in filter(lambda n: n.startswith("ondewo.") or n == "ondewo", setuptools.find_packages())
],
package_data={"ondewo.utils": ["py.typed"]},
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
"Development Status :: 3 - Alpha",
"Topic :: Software Development :: Libraries",
],
python_requires=">=2.6,!=3.0.*",
install_requires=requires,
)
|
py | 1a538f51b6597ea5a78be69882acb4148ec3397b | """
TODO:
Implement a test that proves file configs override rather than overwrite
the defaults. Unfortunately this functionality will have to be implemented
first.
"""
import os
from unittest import mock
import pytest
import requests
from pianodb.pianodb import (number_of_workers, gen_dummy_cmd, get_config,
get_track_features)
class MockPage:
def __init__(self, status_code=200, content=''):
self.status_code = status_code
self.content = content if content else """
<!-- https://www.pandora.com/great-jazz-trio/s-wonderful/take-5 -->
<div class="artist_name" title="The Great Jazz Trio">
<span>by</span>
<span itemprop="byArtist">
<a href="/great-jazz-trio" class="artist_link hash">The Great Jazz Trio</a>
</span>
</div>
<div class="album_title" title="'S Wonderful">
<span>on</span>
<a href="/great-jazz-trio/s-wonderful" itemprop="inAlbum" class="album_link hash">'S Wonderful</a>
</div>
<div class="song_features clearfix">
<h2>Features of This Track</h2>
a piano solo<br>
an acoustic bass solo<br>
a groove oriented approach<br>
vamping harmony<br>
<div style="display: none;">
unusual rhythms<br>
</div>
<p>These are just a few of the hundreds of attributes cataloged for this track by the Music Genome Project.</p>
<a href="#" class="show_more">show more</a>
</div>
"""
@mock.patch('pianodb.pianodb.multiprocessing')
def test_pianodb_number_of_workers_is_double_cpu_count_plus_one(mp):
"""
Test that ``pianodb`` determines the number of workers to be double the CPU
count plus one.
Note:
This test patches the multiprocessing.cpu_count function to return a
constant that does not depend on the actual CPU count.
"""
mp.cpu_count.return_value = 6
assert number_of_workers() == 13
def test_pianodb_can_generate_dummy_click_commands():
"""
Test that ``pianodb`` can generate dummy instances of ``Click.Command`` that
have the correct ``name``, ``help``, and ``short_help``.
"""
cmd = gen_dummy_cmd('dummy')
assert cmd.name == 'dummy'
assert cmd.help == ("This is an unimplimented pianobar eventcmd handler. "
"Calling this subcommand will do absolutely nothing.")
assert cmd.short_help == 'unimplimented pianobar eventcmd'
@mock.patch.dict(os.environ, {'HOME': '/home/cleesej'})
@mock.patch('builtins.open', create=True)
@mock.patch('tempfile.gettempdir')
@mock.patch('pianodb.pianodb.multiprocessing')
def test_pianodb_has_config_defaults(mp, tmpdir, mock_open):
"""
Test that ``pianodb`` has config defaults that are used when getting its
configuration. In the absence of an option defined in a config file the
``pianodb`` config should contain these defaults.
"""
database = '/home/cleesej/.config/pianobar/piano.db'
server_database = '/faketmp/piano.db'
# Pretend we have a CPU count of 4.
mp.cpu_count.return_value = 4
# Pretend we have a fake temp dir.
tmpdir.return_value = '/faketmp'
# Pretend open will read a file with nothing in it.
mock_open.side_effect = [
mock.mock_open(read_data="").return_value,
]
# This is probably a good rationale for having a global default config dict.
expected_config = {
'client': {
'remote': None,
'threshold': 10,
'token': None,
'database': database,
},
'server': {
'interface': 'localhost',
'port': 8000,
'workers': 9,
'database': server_database,
}
}
# overrides: os.environ, os.path, open, multiprocessing.cpu_count
config = get_config()
assert config == expected_config
@mock.patch.dict(os.environ, {'HOME': '/home/cleesej'})
@mock.patch('builtins.open', create=True)
@mock.patch('tempfile.gettempdir')
@mock.patch('pianodb.pianodb.multiprocessing')
def test_pianodb_can_load_configs_from_optional_path(mp, tmpdir, mock_open):
"""
Test that ``pianodb`` can load a config file from a path other than
its own internal default by using the optional ``path`` argument.
"""
# Pretend we have a CPU count of 8.
mp.cpu_count.return_value = 8
# Pretend we have a fake temp dir.
tmpdir.gettempdir.return_value = '/faketmp'
# Pretend open will read a file with nothing in it.
mock_open.side_effect = [
mock.mock_open(read_data="").return_value,
]
config = get_config(path='/spam/and/eggs')
mock_open.assert_called_once_with('/spam/and/eggs', 'r')
@mock.patch.dict(os.environ, {'HOME': '/home/cleesej'})
def test_pianodb_exits_fatally_without_a_config_file():
"""
Test that ``pianodb`` raises a ``SystemExit`` error with the appropriate
error message when attempting to load a nonexistent config.
"""
with pytest.raises(SystemExit) as err:
config = get_config(path='nonexistent')
assert str(err.value) == 'could not load config'
def test_pianodb_can_get_track_features(monkeypatch):
"""
Test that ``pianodb`` can extract track features from a specially formatted
web page.
"""
def _mock_page(url):
return MockPage()
monkeypatch.setattr(requests, 'get', _mock_page)
expected = [
'a piano solo',
'an acoustic bass solo',
'a groove oriented approach',
'vamping harmony',
'unusual rhythms',
]
assert get_track_features('https://fake-url.tld') == expected
def test_pianodb_track_features_empty_if_status_code_is_not_200(monkeypatch):
"""
Test that ``pianodb`` track features are empty when ``requests`` returns
a ``status_code`` that is not ``200``.
"""
def _mock_page(url):
return MockPage(status_code=418, content='teapot')
monkeypatch.setattr(requests, 'get', _mock_page)
assert get_track_features('https://fake-url.tld') == []
def test_pianodb_track_features_empty_if_requests_connection_error(monkeypatch):
"""
Test that ``pianodb`` track features are empty when ``requests`` raises a
``ConnectionError``.
"""
def _raise_connection_error(url):
raise requests.ConnectionError()
monkeypatch.setattr(requests, 'get', _raise_connection_error)
assert get_track_features('https://fake-url.tld') == []
|
py | 1a5390304451ef618005c5a41c63ce0bd4c4b7e1 | from cms.api import create_page
from djangocms_helper.base_test import BaseTestCase
from djangocms_reversion2.models import PageVersion
from djangocms_reversion2.utils import revert_page
from . import testutils
class PageRevisionCreateTestCase(BaseTestCase):
def test_a_revise_page(self):
language = 'en'
page = create_page(title='test_a_revise_page', template='page.html', language=language)
testutils.add_text(page, language, content=u"initial")
page_version = PageVersion.create_version(page.get_draft_object(), language,
version_parent=None, comment='', title='')
self.assertIsNotNone(page_version, msg='PageVersion creation failed')
def test_b_revise_page(self):
language = 'en'
draft = create_page(title='next', template='page.html', language=language).get_draft_object()
# create initial version
pv = PageVersion.create_version(draft, language, version_parent=None, comment='next', title='')
# we have a revised page containing the text 'initial' and add the text 'next'
testutils.add_text(draft, language, content=u"next")
html = testutils.get_html(request=self.get_page_request(draft, self.user))
self.assertIn('next', html, msg='could not add content')
# we check that the the revision does not contain 'next'
draft.refresh_from_db()
html = testutils.get_html(self.get_page_request(draft.page_versions.last().hidden_page, self.user))
self.assertNotIn('next', html, msg='content should not be added to an old revision')
try:
# now we create a new version
pv = PageVersion.create_version(draft, language, version_parent=None, comment='next', title='')
except AssertionError:
self.fail('Expected the page to be dirty, but it\'s clean')
# this version should contain the new text
draft.refresh_from_db()
html = testutils.get_html(request=self.get_page_request(pv.hidden_page, self.user))
self.assertIn('next', html, msg='new content is not in the latest version')
# now we revert to the old date
revert_page(draft.page_versions.first(), language)
html = testutils.get_html(request=self.get_page_request(draft, self.user))
self.assertNotIn('next', html, msg='new content is still in the page')
# def test_b_revise_page_fields(self):
# LANGUAGE = 'en'
# pr = PageRevision.objects.get(page_id=self.page.id, language=LANGUAGE)
# self.assertEqual(pr.revision.comment, self.COMMENT)
# self.assertEqual(pr.revision.user, self.user)
# self.assertEqual(pr.language, self.LANGUAGE)
#
# def test_c_revise_page_page_is_revised(self):
# self.assertTrue(is_revised(self.page, self.LANGUAGE))
# self.assertTrue(PageMarker.objects.filter(language=self.LANGUAGE, page=self.page).exists())
# self.assertEqual(PageMarker.objects.get(language=self.LANGUAGE, page=self.page).page_revision, self.page_revision)
#
# def test_d_revise_page_revise_again_unsuccessful(self):
# new_revision = revise_page(self.page, language=self.LANGUAGE)
# self.assertEqual(new_revision, None)
# self.assertEqual(1, self.page.pagerevision_set.count())
#
#
# class PageRevisionUnmarkPageTestCase(DR2BaseTestCase, TestCase):
# def setUp(self):
# super(PageRevisionUnmarkPageTestCase, self).setUp()
# self.added_plugins = self.add_text(self.page, n=1)
#
# def test_a_page_unmarked(self):
# self.assertFalse(is_revised(self.page, self.LANGUAGE))
# self.assertFalse(PageMarker.objects.filter(language=self.LANGUAGE, page=self.page).exists())
#
#
# class PageRevisionRevertTestCase(DR2BaseTestCase, TestCase):
# def setUp(self):
# super(PageRevisionRevertTestCase, self).setUp()
# self.added_plugins = self.add_text(self.page, n=1)
# self.page_marker = revert_page(self.page_revision, self.request)
# self.initial_html = {
# ph.slot: self.get_current_html(ph) for ph in self.page.placeholders.all()
# }
#
# def test_a_revert_deletion(self):
# print Text.objects.all()
# for pl in self.added_plugins:
# try:
# pl.refresh_from_db()
# self.fail()
# except ObjectDoesNotExist:
# pass
#
# def test_b_revert_auto_revision(self):
# self.assertEqual(2, self.page.pagerevision_set.count())
# auto_revision = self.page.pagerevision_set.latest('pk')
# self.assertEqual(auto_revision.revision.comment, AUTO_REVISION_COMMENT)
#
# def test_c_revert_correct_html(self):
# for placeholder in self.page.placeholders.all():
# slot = placeholder.slot
# html = self.get_current_html(placeholder)
# self.assertEqual(self.initial_html[slot], html, slot)
#
|
py | 1a5390737e6a1f6bf78139de13e1ca0929b82da4 | class InvalidTokenError(Exception):
pass
|
py | 1a5390e18d2020d40a36b566140f487ec2de70d4 | import os
import numpy as onp
from numpy.testing import assert_allclose
import pytest
from jax import jit, pmap, random, vmap
from jax.lib import xla_bridge
import jax.numpy as np
from jax.scipy.special import logit
import numpyro
import numpyro.distributions as dist
from numpyro.distributions import constraints
from numpyro.infer import HMC, MCMC, NUTS
from numpyro.infer.mcmc import hmc
from numpyro.infer.util import initialize_model
from numpyro.util import fori_collect
@pytest.mark.parametrize('kernel_cls', [HMC, NUTS])
@pytest.mark.parametrize('dense_mass', [False, True])
def test_unnormalized_normal_x64(kernel_cls, dense_mass):
true_mean, true_std = 1., 0.5
warmup_steps, num_samples = 1000, 8000
def potential_fn(z):
return 0.5 * np.sum(((z - true_mean) / true_std) ** 2)
init_params = np.array(0.)
kernel = kernel_cls(potential_fn=potential_fn, trajectory_length=8, dense_mass=dense_mass)
mcmc = MCMC(kernel, warmup_steps, num_samples)
mcmc.run(random.PRNGKey(0), init_params=init_params)
hmc_states = mcmc.get_samples()
assert_allclose(np.mean(hmc_states), true_mean, rtol=0.05)
assert_allclose(np.std(hmc_states), true_std, rtol=0.05)
if 'JAX_ENABLE_x64' in os.environ:
assert hmc_states.dtype == np.float64
def test_correlated_mvn():
# This requires dense mass matrix estimation.
D = 5
warmup_steps, num_samples = 5000, 8000
true_mean = 0.
a = np.tril(0.5 * np.fliplr(np.eye(D)) + 0.1 * np.exp(random.normal(random.PRNGKey(0), shape=(D, D))))
true_cov = np.dot(a, a.T)
true_prec = np.linalg.inv(true_cov)
def potential_fn(z):
return 0.5 * np.dot(z.T, np.dot(true_prec, z))
init_params = np.zeros(D)
kernel = NUTS(potential_fn=potential_fn, dense_mass=True)
mcmc = MCMC(kernel, warmup_steps, num_samples)
mcmc.run(random.PRNGKey(0), init_params=init_params)
samples = mcmc.get_samples()
assert_allclose(np.mean(samples), true_mean, atol=0.02)
assert onp.sum(onp.abs(onp.cov(samples.T) - true_cov)) / D**2 < 0.02
@pytest.mark.parametrize('kernel_cls', [HMC, NUTS])
def test_logistic_regression_x64(kernel_cls):
N, dim = 3000, 3
warmup_steps, num_samples = 1000, 8000
data = random.normal(random.PRNGKey(0), (N, dim))
true_coefs = np.arange(1., dim + 1.)
logits = np.sum(true_coefs * data, axis=-1)
labels = dist.Bernoulli(logits=logits).sample(random.PRNGKey(1))
def model(labels):
coefs = numpyro.sample('coefs', dist.Normal(np.zeros(dim), np.ones(dim)))
logits = np.sum(coefs * data, axis=-1)
return numpyro.sample('obs', dist.Bernoulli(logits=logits), obs=labels)
kernel = kernel_cls(model=model, trajectory_length=8)
mcmc = MCMC(kernel, warmup_steps, num_samples)
mcmc.run(random.PRNGKey(2), labels)
samples = mcmc.get_samples()
assert_allclose(np.mean(samples['coefs'], 0), true_coefs, atol=0.22)
if 'JAX_ENABLE_x64' in os.environ:
assert samples['coefs'].dtype == np.float64
def test_uniform_normal():
true_coef = 0.9
num_warmup, num_samples = 1000, 1000
def model(data):
alpha = numpyro.sample('alpha', dist.Uniform(0, 1))
loc = numpyro.sample('loc', dist.Uniform(0, alpha))
numpyro.sample('obs', dist.Normal(loc, 0.1), obs=data)
data = true_coef + random.normal(random.PRNGKey(0), (1000,))
kernel = NUTS(model=model)
mcmc = MCMC(kernel, num_warmup=num_warmup, num_samples=num_samples)
mcmc.run(random.PRNGKey(2), data, collect_warmup=True)
samples = mcmc.get_samples()
assert len(samples['loc']) == num_warmup + num_samples
assert_allclose(np.mean(samples['loc'], 0), true_coef, atol=0.05)
def test_improper_normal():
true_coef = 0.9
def model(data):
alpha = numpyro.sample('alpha', dist.Uniform(0, 1))
loc = numpyro.param('loc', 0., constraint=constraints.interval(0., alpha))
numpyro.sample('obs', dist.Normal(loc, 0.1), obs=data)
data = true_coef + random.normal(random.PRNGKey(0), (1000,))
kernel = NUTS(model=model)
mcmc = MCMC(kernel, num_warmup=1000, num_samples=1000)
mcmc.run(random.PRNGKey(0), data)
samples = mcmc.get_samples()
assert_allclose(np.mean(samples['loc'], 0), true_coef, atol=0.05)
@pytest.mark.parametrize('kernel_cls', [HMC, NUTS])
def test_beta_bernoulli_x64(kernel_cls):
warmup_steps, num_samples = 500, 20000
def model(data):
alpha = np.array([1.1, 1.1])
beta = np.array([1.1, 1.1])
p_latent = numpyro.sample('p_latent', dist.Beta(alpha, beta))
numpyro.sample('obs', dist.Bernoulli(p_latent), obs=data)
return p_latent
true_probs = np.array([0.9, 0.1])
data = dist.Bernoulli(true_probs).sample(random.PRNGKey(1), (1000, 2))
kernel = kernel_cls(model=model, trajectory_length=1.)
mcmc = MCMC(kernel, num_warmup=warmup_steps, num_samples=num_samples, progress_bar=False)
mcmc.run(random.PRNGKey(2), data)
samples = mcmc.get_samples()
assert_allclose(np.mean(samples['p_latent'], 0), true_probs, atol=0.05)
if 'JAX_ENABLE_x64' in os.environ:
assert samples['p_latent'].dtype == np.float64
@pytest.mark.parametrize('kernel_cls', [HMC, NUTS])
@pytest.mark.parametrize('dense_mass', [False, True])
def test_dirichlet_categorical_x64(kernel_cls, dense_mass):
warmup_steps, num_samples = 100, 20000
def model(data):
concentration = np.array([1.0, 1.0, 1.0])
p_latent = numpyro.sample('p_latent', dist.Dirichlet(concentration))
numpyro.sample('obs', dist.Categorical(p_latent), obs=data)
return p_latent
true_probs = np.array([0.1, 0.6, 0.3])
data = dist.Categorical(true_probs).sample(random.PRNGKey(1), (2000,))
kernel = kernel_cls(model, trajectory_length=1., dense_mass=dense_mass)
mcmc = MCMC(kernel, warmup_steps, num_samples, progress_bar=False)
mcmc.run(random.PRNGKey(2), data)
samples = mcmc.get_samples()
assert_allclose(np.mean(samples['p_latent'], 0), true_probs, atol=0.02)
if 'JAX_ENABLE_x64' in os.environ:
assert samples['p_latent'].dtype == np.float64
def test_change_point_x64():
# Ref: https://forum.pyro.ai/t/i-dont-understand-why-nuts-code-is-not-working-bayesian-hackers-mail/696
warmup_steps, num_samples = 500, 3000
def model(data):
alpha = 1 / np.mean(data)
lambda1 = numpyro.sample('lambda1', dist.Exponential(alpha))
lambda2 = numpyro.sample('lambda2', dist.Exponential(alpha))
tau = numpyro.sample('tau', dist.Uniform(0, 1))
lambda12 = np.where(np.arange(len(data)) < tau * len(data), lambda1, lambda2)
numpyro.sample('obs', dist.Poisson(lambda12), obs=data)
count_data = np.array([
13, 24, 8, 24, 7, 35, 14, 11, 15, 11, 22, 22, 11, 57,
11, 19, 29, 6, 19, 12, 22, 12, 18, 72, 32, 9, 7, 13,
19, 23, 27, 20, 6, 17, 13, 10, 14, 6, 16, 15, 7, 2,
15, 15, 19, 70, 49, 7, 53, 22, 21, 31, 19, 11, 18, 20,
12, 35, 17, 23, 17, 4, 2, 31, 30, 13, 27, 0, 39, 37,
5, 14, 13, 22,
])
kernel = NUTS(model=model)
mcmc = MCMC(kernel, warmup_steps, num_samples)
mcmc.run(random.PRNGKey(4), count_data)
samples = mcmc.get_samples()
tau_posterior = (samples['tau'] * len(count_data)).astype(np.int32)
tau_values, counts = onp.unique(tau_posterior, return_counts=True)
mode_ind = np.argmax(counts)
mode = tau_values[mode_ind]
assert mode == 44
if 'JAX_ENABLE_x64' in os.environ:
assert samples['lambda1'].dtype == np.float64
assert samples['lambda2'].dtype == np.float64
assert samples['tau'].dtype == np.float64
@pytest.mark.parametrize('with_logits', ['True', 'False'])
def test_binomial_stable_x64(with_logits):
# Ref: https://github.com/pyro-ppl/pyro/issues/1706
warmup_steps, num_samples = 200, 200
def model(data):
p = numpyro.sample('p', dist.Beta(1., 1.))
if with_logits:
logits = logit(p)
numpyro.sample('obs', dist.Binomial(data['n'], logits=logits), obs=data['x'])
else:
numpyro.sample('obs', dist.Binomial(data['n'], probs=p), obs=data['x'])
data = {'n': 5000000, 'x': 3849}
kernel = NUTS(model=model)
mcmc = MCMC(kernel, warmup_steps, num_samples)
mcmc.run(random.PRNGKey(2), data)
samples = mcmc.get_samples()
assert_allclose(np.mean(samples['p'], 0), data['x'] / data['n'], rtol=0.05)
if 'JAX_ENABLE_x64' in os.environ:
assert samples['p'].dtype == np.float64
def test_improper_prior():
true_mean, true_std = 1., 2.
num_warmup, num_samples = 1000, 8000
def model(data):
mean = numpyro.param('mean', 0.)
std = numpyro.param('std', 1., constraint=constraints.positive)
return numpyro.sample('obs', dist.Normal(mean, std), obs=data)
data = dist.Normal(true_mean, true_std).sample(random.PRNGKey(1), (2000,))
kernel = NUTS(model=model)
mcmc = MCMC(kernel, num_warmup, num_samples)
mcmc.run(random.PRNGKey(2), data)
samples = mcmc.get_samples()
assert_allclose(np.mean(samples['mean']), true_mean, rtol=0.05)
assert_allclose(np.mean(samples['std']), true_std, rtol=0.05)
@pytest.mark.parametrize('kernel_cls', [HMC, NUTS])
@pytest.mark.parametrize('adapt_step_size', [True, False])
def test_diverging(kernel_cls, adapt_step_size):
data = random.normal(random.PRNGKey(0), (1000,))
def model(data):
loc = numpyro.sample('loc', dist.Normal(0., 1.))
numpyro.sample('obs', dist.Normal(loc, 1), obs=data)
kernel = kernel_cls(model, step_size=10., adapt_step_size=adapt_step_size, adapt_mass_matrix=False)
num_warmup = num_samples = 1000
mcmc = MCMC(kernel, num_warmup, num_samples)
mcmc.run(random.PRNGKey(1), data, extra_fields=['diverging'], collect_warmup=True)
num_divergences = mcmc.get_extra_fields()['diverging'].sum()
if adapt_step_size:
assert num_divergences <= num_warmup
else:
assert_allclose(num_divergences, num_warmup + num_samples)
def test_prior_with_sample_shape():
data = {
"J": 8,
"y": np.array([28.0, 8.0, -3.0, 7.0, -1.0, 1.0, 18.0, 12.0]),
"sigma": np.array([15.0, 10.0, 16.0, 11.0, 9.0, 11.0, 10.0, 18.0]),
}
def schools_model():
mu = numpyro.sample('mu', dist.Normal(0, 5))
tau = numpyro.sample('tau', dist.HalfCauchy(5))
theta = numpyro.sample('theta', dist.Normal(mu, tau), sample_shape=(data['J'],))
numpyro.sample('obs', dist.Normal(theta, data['sigma']), obs=data['y'])
num_samples = 500
mcmc = MCMC(NUTS(schools_model), num_warmup=500, num_samples=num_samples)
mcmc.run(random.PRNGKey(0))
assert mcmc.get_samples()['theta'].shape == (num_samples, data['J'])
@pytest.mark.parametrize('num_chains', [1, 2])
@pytest.mark.parametrize('chain_method', ['parallel', 'sequential', 'vectorized'])
@pytest.mark.parametrize('progress_bar', [True, False])
@pytest.mark.filterwarnings("ignore:There are not enough devices:UserWarning")
def test_empty_model(num_chains, chain_method, progress_bar):
def model():
pass
mcmc = MCMC(NUTS(model), num_warmup=10, num_samples=10, num_chains=num_chains,
chain_method=chain_method, progress_bar=progress_bar)
mcmc.run(random.PRNGKey(0))
assert mcmc.get_samples() == {}
@pytest.mark.parametrize('use_init_params', [False, True])
@pytest.mark.parametrize('chain_method', ['parallel', 'sequential', 'vectorized'])
@pytest.mark.skipif('XLA_FLAGS' not in os.environ, reason='without this mark, we have duplicated tests in Travis')
def test_chain(use_init_params, chain_method):
N, dim = 3000, 3
num_chains = 2
num_warmup, num_samples = 5000, 5000
data = random.normal(random.PRNGKey(0), (N, dim))
true_coefs = np.arange(1., dim + 1.)
logits = np.sum(true_coefs * data, axis=-1)
labels = dist.Bernoulli(logits=logits).sample(random.PRNGKey(1))
def model(labels):
coefs = numpyro.sample('coefs', dist.Normal(np.zeros(dim), np.ones(dim)))
logits = np.sum(coefs * data, axis=-1)
return numpyro.sample('obs', dist.Bernoulli(logits=logits), obs=labels)
kernel = NUTS(model=model)
mcmc = MCMC(kernel, num_warmup, num_samples, num_chains=num_chains)
mcmc.chain_method = chain_method
init_params = None if not use_init_params else \
{'coefs': np.tile(np.ones(dim), num_chains).reshape(num_chains, dim)}
mcmc.run(random.PRNGKey(2), labels, init_params=init_params)
samples_flat = mcmc.get_samples()
assert samples_flat['coefs'].shape[0] == num_chains * num_samples
samples = mcmc.get_samples(group_by_chain=True)
assert samples['coefs'].shape[:2] == (num_chains, num_samples)
assert_allclose(np.mean(samples_flat['coefs'], 0), true_coefs, atol=0.21)
@pytest.mark.parametrize('kernel_cls', [HMC, NUTS])
@pytest.mark.parametrize('chain_method', [
pytest.param('parallel', marks=pytest.mark.xfail(
reason='jit+pmap does not work in CPU yet')),
'sequential',
'vectorized',
])
@pytest.mark.skipif('CI' in os.environ, reason="Compiling time the whole sampling process is slow.")
def test_chain_inside_jit(kernel_cls, chain_method):
# NB: this feature is useful for consensus MC.
# Caution: compiling time will be slow (~ 90s)
if chain_method == 'parallel' and xla_bridge.device_count() == 1:
pytest.skip('parallel method requires device_count greater than 1.')
warmup_steps, num_samples = 100, 2000
# Here are settings which is currently supported.
rng_key = random.PRNGKey(2)
step_size = 1.
target_accept_prob = 0.8
trajectory_length = 1.
# Not supported yet:
# + adapt_step_size
# + adapt_mass_matrix
# + max_tree_depth
# + num_warmup
# + num_samples
def model(data):
concentration = np.array([1.0, 1.0, 1.0])
p_latent = numpyro.sample('p_latent', dist.Dirichlet(concentration))
numpyro.sample('obs', dist.Categorical(p_latent), obs=data)
return p_latent
@jit
def get_samples(rng_key, data, step_size, trajectory_length, target_accept_prob):
kernel = kernel_cls(model, step_size=step_size, trajectory_length=trajectory_length,
target_accept_prob=target_accept_prob)
mcmc = MCMC(kernel, warmup_steps, num_samples, num_chains=2, chain_method=chain_method,
progress_bar=False)
mcmc.run(rng_key, data)
return mcmc.get_samples()
true_probs = np.array([0.1, 0.6, 0.3])
data = dist.Categorical(true_probs).sample(random.PRNGKey(1), (2000,))
samples = get_samples(rng_key, data, step_size, trajectory_length, target_accept_prob)
assert_allclose(np.mean(samples['p_latent'], 0), true_probs, atol=0.02)
def test_extra_fields():
def model():
numpyro.sample('x', dist.Normal(0, 1), sample_shape=(5,))
mcmc = MCMC(NUTS(model), 1000, 1000)
mcmc.run(random.PRNGKey(0), extra_fields=('num_steps', 'adapt_state.step_size'))
samples = mcmc.get_samples(group_by_chain=True)
assert samples['x'].shape == (1, 1000, 5)
stats = mcmc.get_extra_fields(group_by_chain=True)
assert 'num_steps' in stats
assert stats['num_steps'].shape == (1, 1000)
assert 'adapt_state.step_size' in stats
assert stats['adapt_state.step_size'].shape == (1, 1000)
@pytest.mark.parametrize('algo', ['HMC', 'NUTS'])
def test_functional_beta_bernoulli_x64(algo):
warmup_steps, num_samples = 500, 20000
def model(data):
alpha = np.array([1.1, 1.1])
beta = np.array([1.1, 1.1])
p_latent = numpyro.sample('p_latent', dist.Beta(alpha, beta))
numpyro.sample('obs', dist.Bernoulli(p_latent), obs=data)
return p_latent
true_probs = np.array([0.9, 0.1])
data = dist.Bernoulli(true_probs).sample(random.PRNGKey(1), (1000, 2))
init_params, potential_fn, constrain_fn = initialize_model(random.PRNGKey(2), model, data)
init_kernel, sample_kernel = hmc(potential_fn, algo=algo)
hmc_state = init_kernel(init_params,
trajectory_length=1.,
num_warmup=warmup_steps)
samples = fori_collect(0, num_samples, sample_kernel, hmc_state,
transform=lambda x: constrain_fn(x.z))
assert_allclose(np.mean(samples['p_latent'], 0), true_probs, atol=0.05)
if 'JAX_ENABLE_x64' in os.environ:
assert samples['p_latent'].dtype == np.float64
@pytest.mark.parametrize('algo', ['HMC', 'NUTS'])
@pytest.mark.parametrize('map_fn', [vmap, pmap])
@pytest.mark.skipif('XLA_FLAGS' not in os.environ, reason='without this mark, we have duplicated tests in Travis')
def test_functional_map(algo, map_fn):
if map_fn is pmap and xla_bridge.device_count() == 1:
pytest.skip('pmap test requires device_count greater than 1.')
true_mean, true_std = 1., 2.
warmup_steps, num_samples = 1000, 8000
def potential_fn(z):
return 0.5 * np.sum(((z - true_mean) / true_std) ** 2)
init_kernel, sample_kernel = hmc(potential_fn, algo=algo)
init_params = np.array([0., -1.])
rng_keys = random.split(random.PRNGKey(0), 2)
init_kernel_map = map_fn(lambda init_param, rng_key: init_kernel(
init_param, trajectory_length=9, num_warmup=warmup_steps, rng_key=rng_key))
init_states = init_kernel_map(init_params, rng_keys)
fori_collect_map = map_fn(lambda hmc_state: fori_collect(0, num_samples, sample_kernel, hmc_state,
transform=lambda x: x.z, progbar=False))
chain_samples = fori_collect_map(init_states)
assert_allclose(np.mean(chain_samples, axis=1), np.repeat(true_mean, 2), rtol=0.05)
assert_allclose(np.std(chain_samples, axis=1), np.repeat(true_std, 2), rtol=0.05)
def test_reuse_mcmc_run():
y1 = onp.random.normal(3, 0.1, (100,))
y2 = onp.random.normal(-3, 0.1, (100,))
def model(y_obs):
mu = numpyro.sample('mu', dist.Normal(0., 1.))
sigma = numpyro.sample("sigma", dist.HalfCauchy(3.))
numpyro.sample("y", dist.Normal(mu, sigma), obs=y_obs)
# Run MCMC on zero observations.
kernel = NUTS(model)
mcmc = MCMC(kernel, 200, 200)
mcmc.run(random.PRNGKey(32), y1)
# Run on data, re-using `mcmc`.
mcmc.run(random.PRNGKey(32), y2)
assert_allclose(mcmc.get_samples()['mu'].mean(), -3., atol=0.1)
def test_reuse_mcmc_pe_gen():
y1 = onp.random.normal(3, 0.1, (100,))
y2 = onp.random.normal(-3, 0.1, (100,))
def model(y_obs):
mu = numpyro.sample('mu', dist.Normal(0., 1.))
sigma = numpyro.sample("sigma", dist.HalfCauchy(3.))
numpyro.sample("y", dist.Normal(mu, sigma), obs=y_obs)
init_params, potential_fn, constrain_fn = initialize_model(random.PRNGKey(0), model,
y1, dynamic_args=True)
init_kernel, sample_kernel = hmc(potential_fn_gen=potential_fn)
init_state = init_kernel(init_params, num_warmup=300, model_args=(y1,))
@jit
def _sample(state_and_args):
hmc_state, model_args = state_and_args
return sample_kernel(hmc_state, (model_args,)), model_args
samples = fori_collect(0, 500, _sample, (init_state, y1),
transform=lambda state: constrain_fn(y1)(state[0].z))
assert_allclose(samples['mu'].mean(), 3., atol=0.1)
# Run on data, re-using `mcmc` - this should be much faster.
init_state = init_kernel(init_params, num_warmup=300, model_args=(y2,))
samples = fori_collect(0, 500, _sample, (init_state, y2),
transform=lambda state: constrain_fn(y2)(state[0].z))
assert_allclose(samples['mu'].mean(), -3., atol=0.1)
|
py | 1a5391d4d1cdbabd7335d33b1e3cd9a3be180bf9 | from torch.nn import LSTM, Linear, BatchNorm1d, Parameter
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class NoOp(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x
class STFT(nn.Module):
def __init__(
self,
n_fft=4096,
n_hop=1024,
center=False
):
super(STFT, self).__init__()
self.window = nn.Parameter(
torch.hann_window(n_fft),
requires_grad=False
)
self.n_fft = n_fft
self.n_hop = n_hop
self.center = center
def forward(self, x):
"""
Input: (nb_samples, nb_channels, nb_timesteps)
Output:(nb_samples, nb_channels, nb_bins, nb_frames, 2)
"""
nb_samples, nb_channels, nb_timesteps = np.shape(x)
# merge nb_samples and nb_channels for multichannel stft
x = x.reshape(nb_samples*nb_channels, -1)
# compute stft with parameters as close as possible scipy settings
stft_f = torch.stft(
x,
n_fft=self.n_fft, hop_length=self.n_hop,
window=self.window, center=self.center,
normalized=False, onesided=True,
pad_mode='reflect'
)
# reshape back to channel dimension
stft_f = stft_f.contiguous().view(
nb_samples, nb_channels, self.n_fft // 2 + 1, -1, 2
)
return stft_f
class Spectrogram(nn.Module):
def __init__(
self,
power=1,
mono=True
):
super(Spectrogram, self).__init__()
self.power = power
self.mono = mono
def forward(self, stft_f):
"""
Input: complex STFT
(nb_samples, nb_bins, nb_frames, 2)
Output: Power/Mag Spectrogram
(nb_frames, nb_samples, nb_channels, nb_bins)
"""
stft_f = stft_f.transpose(2, 3)
# take the magnitude
stft_f = stft_f.pow(2).sum(-1).pow(self.power / 2.0)
# downmix in the mag domain
if self.mono:
stft_f = torch.mean(stft_f, 1, keepdim=True)
# permute output for LSTM convenience
return stft_f.permute(2, 0, 1, 3)
class OpenUnmix(nn.Module):
def __init__(
self,
n_fft=4096,
n_hop=1024,
input_is_spectrogram=False,
hidden_size=512,
nb_channels=2,
sample_rate=44100,
nb_layers=3,
input_mean=None,
input_scale=None,
max_bin=None,
unidirectional=False,
power=1,
):
"""
Input: (nb_samples, nb_channels, nb_timesteps)
or (nb_frames, nb_samples, nb_channels, nb_bins)
Output: Power/Mag Spectrogram
(nb_frames, nb_samples, nb_channels, nb_bins)
"""
super(OpenUnmix, self).__init__()
self.nb_output_bins = n_fft // 2 + 1
if max_bin:
self.nb_bins = max_bin
else:
self.nb_bins = self.nb_output_bins
self.hidden_size = hidden_size
self.stft = STFT(n_fft=n_fft, n_hop=n_hop)
self.spec = Spectrogram(power=power, mono=(nb_channels == 1))
self.register_buffer('sample_rate', torch.tensor(sample_rate))
if input_is_spectrogram:
self.transform = NoOp()
else:
self.transform = nn.Sequential(self.stft, self.spec)
self.fc1 = Linear(
self.nb_bins*nb_channels, hidden_size,
bias=False
)
self.bn1 = BatchNorm1d(hidden_size)
if unidirectional:
lstm_hidden_size = hidden_size
else:
lstm_hidden_size = hidden_size // 2
self.lstm = LSTM(
input_size=hidden_size,
hidden_size=lstm_hidden_size,
num_layers=nb_layers,
bidirectional=not unidirectional,
batch_first=False,
dropout=0.4,
)
self.fc2 = Linear(
in_features=hidden_size*2,
out_features=hidden_size,
bias=False
)
self.bn2 = BatchNorm1d(hidden_size)
self.fc3 = Linear(
in_features=hidden_size,
out_features=self.nb_output_bins*nb_channels,
bias=False
)
self.bn3 = BatchNorm1d(self.nb_output_bins*nb_channels)
if input_mean is not None:
input_mean = torch.from_numpy(
-input_mean[:self.nb_bins]
).float()
else:
input_mean = torch.zeros(self.nb_bins)
if input_scale is not None:
input_scale = torch.from_numpy(
1.0/input_scale[:self.nb_bins]
).float()
else:
input_scale = torch.ones(self.nb_bins)
self.input_mean = Parameter(input_mean)
self.input_scale = Parameter(input_scale)
self.output_scale = Parameter(
torch.ones(self.nb_output_bins).float()
)
self.output_mean = Parameter(
torch.ones(self.nb_output_bins).float()
)
def forward(self, x):
# check for waveform or spectrogram
# transform to spectrogram if (nb_samples, nb_channels, nb_timesteps)
# and reduce feature dimensions, therefore we reshape
x = self.transform(x)
nb_frames, nb_samples, nb_channels, nb_bins = x.data.shape
mix = x.detach().clone()
# crop
x = x[..., :self.nb_bins]
# shift and scale input to mean=0 std=1 (across all bins)
x += self.input_mean
x *= self.input_scale
# to (nb_frames*nb_samples, nb_channels*nb_bins)
# and encode to (nb_frames*nb_samples, hidden_size)
x = self.fc1(x.reshape(-1, nb_channels*self.nb_bins))
# normalize every instance in a batch
x = self.bn1(x)
x = x.reshape(nb_frames, nb_samples, self.hidden_size)
# squash range ot [-1, 1]
x = torch.tanh(x)
# apply 3-layers of stacked LSTM
lstm_out = self.lstm(x)
# lstm skip connection
x = torch.cat([x, lstm_out[0]], -1)
# first dense stage + batch norm
x = self.fc2(x.reshape(-1, x.shape[-1]))
x = self.bn2(x)
x = F.relu(x)
# second dense stage + layer norm
x = self.fc3(x)
x = self.bn3(x)
# reshape back to original dim
x = x.reshape(nb_frames, nb_samples, nb_channels, self.nb_output_bins)
# apply output scaling
x *= self.output_scale
x += self.output_mean
# since our output is non-negative, we can apply RELU
x = F.relu(x) * mix
return x
|
py | 1a5392b6ab14b674551750a5fc1c9b4265176805 | EMBED_SIZE = 200
NUM_LAYERS = 2
LR = 0.0001
MAX_GRAD_NORM = 5.0
PAD_ID = 0
UNK_ID = 1
START_ID = 2
EOS_ID = 3
CONV_SIZE = 3
# sanity
# BUCKETS = [(55, 50)]
# BATCH_SIZE = 10
# NUM_EPOCHS = 50
# NUM_SAMPLES = 498
# HIDDEN_SIZE = 400
# test
BUCKETS = [(30, 30), (55, 50)]
BATCH_SIZE = 20
NUM_EPOCHS = 3
NUM_SAMPLES = 498
HIDDEN_SIZE = 400
# experiment 1
# BUCKETS = [(16, 28), (31, 28), (51, 28)]
# BATCH_SIZE = 400
# NUM_EPOCHS = 5
# NUM_SAMPLES = 40960
# HIDDEN_SIZE = 400
# experiment 2
# BUCKETS = [(102, 28)]
# BATCH_SIZE = 300
# NUM_EPOCHS = 5
# NUM_SAMPLES = 40960
# HIDDEN_SIZE = 250
|
py | 1a5392ca8f2c910a9234e89c5789063c5c284be5 | # -*- coding: utf-8 -*-
"""
Created on Tue May 30 10:31:52 2017
@author: robin
"""
|
py | 1a5392e0ff168c82fdf499afb4ebab0d70a1180a | # module
from __future__ import print_function
import argparse
from tqdm import tqdm
import torch
import torch.nn.functional as F
from torchvision import datasets, transforms
from torchvision.utils import save_image
import time
import torch.nn as nn
from SSGE import Attack,resnet18
import torchvision
from attack import uap_sgd
import random
import matplotlib.pyplot as plt
import numpy as np
from torchsummary import summary
from resnet import resnet18
from resnet2 import RESNET18
from model import vgg11_bn
from test import clipping_info,loss_cal
import math
class VGG1(nn.Module):
'''
VGG model
'''
def __init__(self, features):
super(VGG1, self).__init__()
self.features = features
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(512, 512),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(512, 512),
nn.ReLU(True),
nn.Linear(512, 10),
)
# Initialize weights
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3,stride=1, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfg = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M',
512, 512, 512, 512, 'M'],
}
def vgg11_bn1(num_classes=10):
"""VGG 11-layer model (configuration "A") with batch normalization"""
return VGG1(make_layers(cfg['A'], batch_norm=True))
parser = argparse.ArgumentParser(description='Deep-Leak')
parser.add_argument('--epsilon', type=float, default=0.3, metavar='EPS', help='L-infinity perturbation limit for PGD attack')
parser.add_argument('--batch-size', '-b', type=int, default=256, metavar='N', help='input batch size for training (default: 500)')
parser.add_argument('--epochs', type=int, default=125, metavar='N', help='number of epochs to train (default: 20)')
parser.add_argument('--no_train', type=int, default=0, metavar='N', help='no training algorithm')
parser.add_argument('--learning-rate', type=float, default=0.02, help='learning rate')
parser.add_argument('--error', type=float, default=0.01, help='error rate')
parser.add_argument('--momentum', type=float, default=0.9, help='learning momentum')
parser.add_argument('--percentage', type=float, default=1, help='learning momentum')
parser.add_argument('--lambdas', type=float, default=0.0001, help='learning momentum')
parser.add_argument('--adv_model', default='./results/baseline_MNIST_classifier.pt', metavar='FILE', help='location of trained classifier')
parser.add_argument('--layer', type=int, default=6, metavar='N', help='Layer Number')
parser.add_argument('--evaluate', type=int, default=1, help='set to 1 to evaluate our trained adversary model in adv_model2/set to 0 to train a model with our method +PGD/else trains with our adversary only')
parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)')
args = parser.parse_args()
print(args)
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
## normalize layer
class Normalize_layer(nn.Module):
def __init__(self, mean, std):
super(Normalize_layer, self).__init__()
self.mean = nn.Parameter(torch.Tensor(mean).unsqueeze(1).unsqueeze(1), requires_grad=False)
self.std = nn.Parameter(torch.Tensor(std).unsqueeze(1).unsqueeze(1), requires_grad=False)
def forward(self, input):
return input.sub(self.mean).div(self.std)
mean = [x / 255 for x in [129.3, 124.1, 112.4]]
std = [x / 255 for x in [68.2, 65.4, 70.4]]
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
#transforms.AutoAugment(transforms.AutoAugmentPolicy.CIFAR10),
transforms.ToTensor(),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)
train_loader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, shuffle=False, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
test_loader = torch.utils.data.DataLoader(testset, batch_size=args.batch_size, shuffle=False, num_workers=2)
attacker = Attack(dataloader=None,
attack_method='pgd', epsilon=args.epsilon)
def lasso_var(var,var1):
"We will use this function for positive and negative half of the distribution"
return (var1.mean() -var).abs().sum()
# Train baseline classifier on clean data
def train_baseline(classifier, adv_classifier, recordf, record,record7,record6,record5,record4,record3,record2,class_opt, device, epoch,lambdas):
classifier.train()
for batch_idx, (data, target) in enumerate(train_loader):
if batch_idx == 16:
break
data, target = data.to(device), target.to(device)
'''output = adv_classifier (data)
pred = output.argmax(dim=1, keepdim=True)
target = pred.view(-1)'''
class_opt.zero_grad() # Update the classifier
loss = F.cross_entropy(classifier(data), target)
loss_term = 0
cc = 0
for name, param in classifier.named_modules():
if isinstance(param, nn.Linear) or isinstance(param, nn.Conv2d) :
cc += 1
if cc < args.layer:
loss_term += lambdas * (lasso_var(param.weight.view(-1)[record[cc]][param.weight.view(-1)[record[cc]]>=0],param.weight[param.weight >=0]) + lasso_var(param.weight.view(-1)[record[cc]][param.weight.view(-1)[record[cc]]<0],param.weight[param.weight < 0]))
done = 1
#print(loss_term)
loss += loss_term
loss.backward()
class_opt.step()
if batch_idx % 10 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
return loss
# Tests classifier on clean data or attacker output
def test(classifier, attacker1, device, epoch):
classifier.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = classifier(data)
test_loss += F.cross_entropy(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
return correct
def functional(classifier, model, attacker1, device, epoch):
classifier.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = classifier(data)
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
output1 = model(data)
pred1 = output1.argmax(dim=1, keepdim=True) # get the index of the max log-probability
pred1= pred1.view(target.size())
test_loss += F.cross_entropy(output, pred1, reduction='sum').item() # sum up batch loss
correct += pred.eq(pred1.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('Test set: Average loss: {:.4f}, Functional Accuracy: {}/{} ({:.2f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
return correct
## attacking the classifier with black-box adversary generated from model.
def adv_test(classifier, model,attacker, device, epoch):
classifier.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
data, target = data.to(device), target.to(device)
data = attacker.attack_method(
model, data, target)
output = classifier(data)
test_loss += F.cross_entropy(output, target.cuda(), reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
net_f = vgg11_bn(10)
net_f.load_state_dict(torch.load(args.adv_model))
net1 = torch.nn.Sequential(
Normalize_layer(mean,std),
net_f
)
adv_classifier = net1.to(device)
print("hi")
print("Test accuracy of the model" )
corr = test(adv_classifier, attacker, device, epoch=0)
import copy
net_f = vgg11_bn1(10)
classifier2 = torch.nn.Sequential(
Normalize_layer(mean,std),
net_f
)
classifier2 = classifier2.cuda()
class_adv = torch.optim.Adam(classifier2.parameters())
scheduler = torch.optim.lr_scheduler.MultiStepLR(class_adv, milestones=[30,60,90], gamma=0.1)
summary(classifier2, (3, 32, 32))
cc= 0
corr = functional(classifier2, adv_classifier,attacker, device, epoch=0)
count =0
for name, module in classifier2.named_modules():
if isinstance(module, nn.BatchNorm2d):
count+=1
module.weight.data.uniform_(0.01, 0.5)
module.bias.data[:] = 0
if isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d):
count+=1
module.weight.data.uniform_(-0.05, 0.05)
print(cc,count)
cc+=1
if args.no_train ==1:
for name, module in classifier2.named_modules():
if isinstance(module, nn.BatchNorm2d):
count+=1
module.weight.data[:] = 0
module.bias.data[:] = 0
if isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d):
count+=1
module.weight.data[:] = 0
print(cc,count)
cc+=1
recordr = {} ## all bits
recordf = {} ## MSB + 7
record = {} ## only MSB
recordm = {} ## MSB + any number
record7 = {} ## MSB + 6
record6 = {} ## MSB + 5
record5 = {} ## MSB + 4
record4 = {} ## MSB + 3
record3 = {} ## MSB + 2
record2 = {} ## MSB + 1
recorde = {} ## ERROR MSB
# oldperc = torch.tensor([0.5,0.055,0.056,0.055,0.067,0.077,0.078]) # layer-wise percentage
# 80 perc = torch.tensor([0.25,0.05,0.05,0.05,0.1,0.15,0.15])
'''
new:
90: torch.tensor([0.58,0.033,0.056,0.044,0.056,0.067,0.078])
80: torch.tensor([0.3125,0.0625,0.0625,0.0625,0.0875,0.1,0.125])
60: torch.tensor([0.133,0.033,0.033,0.05,0.067,0.12,0.2])
'''
perc = torch.tensor([0.58,0.033,0.056,0.044,0.056,0.067,0.078])
cc = 0
for name, module in adv_classifier.named_modules():
if isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d):
cc+=1
if cc < args.layer:
tot = module.weight.data.view(-1).size()[0]
p_tot = int(args.percentage*tot)
step_f= int(p_tot*perc[0])
step_7= int(p_tot*perc[1]) + step_f
step_6 = int(p_tot*perc[2]) + step_7
step_5 = int(p_tot*perc[3]) + step_6
step_4 = int(p_tot*perc[4]) + step_5
step_3 = int(p_tot*perc[5]) + step_4
step_2 = int(p_tot*perc[6]) + step_3
recordr[cc] = torch.Tensor(random.sample(range(0,tot), p_tot)).long()
recordm[cc] = recordr[cc]
recorde[cc] = recordr[cc][0:int(p_tot* args.error)]
print("hi")
print(cc)
print(recordm[cc].size()[0])
recordf[cc] = recordr[cc][0:step_f]
record7[cc] = recordr[cc][step_f:step_7]
record6[cc] = recordr[cc][step_7:step_6]
record5[cc] = recordr[cc][step_6:step_5]
record4[cc] = recordr[cc][step_5:step_4]
record3[cc] = recordr[cc][step_4:step_3]
record2[cc] = recordr[cc][step_3:step_2]
record[cc] = recordr[cc][step_2:]
print(recordf[cc].size()[0]/tot,recordf[cc].size()[0]/tot,record7[cc].size()[0]/tot,record6[cc].size()[0]/tot,record5[cc].size()[0]/tot,
record4[cc].size()[0]/tot,record3[cc].size()[0]/tot,record2[cc].size()[0]/tot,record[cc].size()[0]/tot)
cc= 0
for name, module in classifier2.named_modules():
if isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d):
cc +=1
print(cc)
m=0
for name1, module1 in adv_classifier.named_modules():
if isinstance(module1, nn.Linear) or isinstance(module1, nn.Conv2d):
m+=1
if cc==m:
if cc < args.layer:
module.weight.data.view(-1)[recordm[cc]].uniform_(0.001, 0.1)
module.weight.data.view(-1)[recordm[cc]] = module.weight.data.view(-1)[recordm[cc]] * module.weight.data.view(-1)[recordm[cc]].sign() * module1.weight.data.view(-1)[recordm[cc]].clone().sign()
module.weight.data.view(-1)[recorde[cc]] = module.weight.data.view(-1)[recorde[cc]] * (-1)
#module.weight.data.view(-1)[recordm[cc][0:int(recordm[cc].size()[0]*args.error)]] *= -1
total = 0
for name, module in classifier2.named_modules():
if isinstance(module, nn.Conv2d):
ss = module.weight.data.size()
total += ss[0]*ss[1]*ss[2]*ss[3]
print(total)
for name, module in classifier2.named_modules():
if isinstance(module, nn.Linear):
ss = module.weight.data.size()
total += ss[0]*ss[1]
print(ss[0]*ss[1])
print(total)
corrr = test(classifier2, None, device, epoch=0)
best_acc = 0
t0 = time.time()
print("Attacking the Classifier with white-box PGD" )
adv_test(adv_classifier,adv_classifier,attacker, device, 0)
_ = functional(classifier2, adv_classifier,attacker, device, epoch=0)
best_acc = 0
t0 = time.time()
print("Attacking the Classifier with hammer leak" )
adv_test(adv_classifier,classifier2,attacker, device, 0)
count =0
losses = np.zeros([args.epochs])
if args.evaluate==0:
print('Training both baseline classifier classifiers')
# Classification model setup
scheduler.step()
for epoch in range(1, args.epochs + 1):
losses[epoch-1] = train_baseline(classifier2, adv_classifier,recordf,recordm,record7,record6,record5,record4,record3,record2,class_adv, device, epoch,args.lambdas)
classifier2.eval()
if epoch == 109:
args.lambdas = 0
cc= 0
for name, module in classifier2.named_modules():
if isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d):
cc +=1
m=0
for name1, module1 in adv_classifier.named_modules():
if isinstance(module1, nn.Linear) or isinstance(module1, nn.Conv2d):
m+=1
if cc==m:
print((module.weight.data.view(-1).sign() - module1.weight.data.view(-1).sign()).abs().sum())
if (epoch+1)%5 == 0 and epoch < 111:
cc= 0
for name, module in classifier2.named_modules():
if isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d):
cc +=1
m=0
for name1, module1 in adv_classifier.named_modules():
if isinstance(module1, nn.Linear) or isinstance(module1, nn.Conv2d):
m+=1
if cc==m:
if cc<args.layer:
print(cc)
module.weight.data.view(-1)[recordm[cc]] = module.weight.data.view(-1)[recordm[cc]].abs() * module1.weight.data.view(-1)[recordm[cc]].sign()
module.weight.data.view(-1)[recorde[cc]] = module.weight.data.view(-1)[recorde[cc]] * (-1)
#module.weight.data.view(-1)[recordm[cc][0:int(recordm[cc].size()[0]*args.error)]] *= -1
#module.weight.data.view(-1)[recordf[cc]] = module1.weight.data.view(-1)[recordf[cc]]
print((module.weight.data.view(-1).sign() - module1.weight.data.view(-1).sign()).abs().sum())
accs = test(classifier2, None, device, epoch)
_ = functional(classifier2, adv_classifier,attacker, device, epoch=0)
if epoch == 111:
classifier2 = torch.load('nm2.pt')
if best_acc < accs:
best_acc = accs
torch.save(classifier2, 'nm2.pt')
classifier2 = torch.load('nm2.pt')
plt.plot(losses)
plt.xlabel("Iterations")
plt.ylabel("Loss term")
plt.savefig("figure.png")
accs = test(classifier2, None, device, epoch)
_ = functional(classifier2, adv_classifier,attacker, device, epoch=0)
cc= 0
for name, module in classifier2.named_modules():
if isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d):
cc +=1
print(cc)
m=0
for name1, module1 in adv_classifier.named_modules():
if isinstance(module1, nn.Linear) or isinstance(module1, nn.Conv2d):
m+=1
if cc==m:
print((module.weight.data.view(-1).sign() - module1.weight.data.view(-1).sign()).abs().sum())
t0 = time.time()
print("Attacking PGD trained Classifier with Black-box PGD" )
adv_test(adv_classifier,classifier2,attacker, device, 0)
torch.cuda.current_stream().synchronize()
t1= time.time()
print(" Black-PGD Attack Time:",'{} seconds'.format(t1 - t0)) |
py | 1a5392ef6aa7f32e28500811d689153aec2384f5 | """This file and its contents are licensed under the Apache License 2.0. Please see the included NOTICE for copyright information and LICENSE for a copy of the license.
"""
from rest_framework.exceptions import APIException
from rest_framework import status
class LabelStudioError(Exception):
pass
class LabelStudioAPIException(APIException):
status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
default_detail = 'Unknown error'
class LabelStudioDatabaseException(LabelStudioAPIException):
default_detail = 'Error executing database query'
class LabelStudioDatabaseLockedException(LabelStudioAPIException):
default_detail = "Sqlite <a href='https://docs.djangoproject.com/en/3.1/ref/databases/#database-is-locked-errors'>doesn't operate well</a> on multiple transactions. \
Please be patient and try update your pages, or ping us on Slack to get more about production-ready db"
class ProjectExistException(LabelStudioAPIException):
status_code = status.HTTP_422_UNPROCESSABLE_ENTITY
default_detail = 'Project with the same title already exists'
|
py | 1a53933094526c6664642eeadb78832bdae36cb5 | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "opencv_tests"
PROJECT_SPACE_DIR = "/home/kinova/MillenCapstone/MadalynMillenCapstone/install"
PROJECT_VERSION = "1.13.0"
|
py | 1a539429e54b69d7d5936b2637953a4045729a27 | from ._ModelJointsState import *
from ._PlugCommand import *
|
py | 1a539435d534a780d449ebe1eb1cc8598d0c672b | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import contextlib
from telemetry import decorators
from telemetry import page as page_module
from telemetry import story
from telemetry.page import cache_temperature
from telemetry.testing import browser_test_case
from telemetry.timeline import tracing_config
from tracing.trace_data import trace_data
class CacheTempeartureTests(browser_test_case.BrowserTestCase):
def __init__(self, *args, **kwargs):
super(CacheTempeartureTests, self).__init__(*args, **kwargs)
self._full_trace = None
@contextlib.contextmanager
def captureTrace(self):
tracing_controller = self._browser.platform.tracing_controller
options = tracing_config.TracingConfig()
options.enable_chrome_trace = True
tracing_controller.StartTracing(options)
try:
yield
finally:
self._full_trace = tracing_controller.StopTracing()
def traceMarkers(self):
if not self._full_trace:
return set()
chrome_trace = self._full_trace.GetTraceFor(trace_data.CHROME_TRACE_PART)
return set(
event['name']
for event in chrome_trace['traceEvents']
if event['cat'] == 'blink.console')
@decorators.Enabled('has tabs')
def testEnsureAny(self):
with self.captureTrace():
story_set = story.StorySet()
page = page_module.Page(
'http://google.com', page_set=story_set,
cache_temperature=cache_temperature.ANY, name='http://google.com')
cache_temperature.EnsurePageCacheTemperature(page, self._browser)
markers = self.traceMarkers()
self.assertNotIn('telemetry.internal.ensure_diskcache.start', markers)
self.assertNotIn('telemetry.internal.warmCache.start', markers)
@decorators.Enabled('has tabs')
@decorators.Disabled('chromeos')
def testEnsurePCv1Cold(self):
with self.captureTrace():
story_set = story.StorySet()
page = page_module.Page(
'http://google.com', page_set=story_set,
cache_temperature=cache_temperature.PCV1_COLD, name='http://google.com')
cache_temperature.EnsurePageCacheTemperature(page, self._browser)
markers = self.traceMarkers()
self.assertIn('telemetry.internal.ensure_diskcache.start', markers)
self.assertIn('telemetry.internal.ensure_diskcache.end', markers)
@decorators.Enabled('has tabs')
def testEnsurePCv1WarmAfterPCv1ColdRun(self):
with self.captureTrace():
story_set = story.StorySet()
page = page_module.Page(
'http://google.com', page_set=story_set,
cache_temperature=cache_temperature.PCV1_COLD, name='http://google.com')
cache_temperature.EnsurePageCacheTemperature(page, self._browser)
previous_page = page
page = page_module.Page(
'http://google.com', page_set=story_set,
cache_temperature=cache_temperature.PCV1_WARM, name='http://google.com')
cache_temperature.EnsurePageCacheTemperature(
page, self._browser, previous_page)
markers = self.traceMarkers()
self.assertNotIn('telemetry.internal.warmCache.start', markers)
@decorators.Enabled('has tabs')
@decorators.Disabled('chromeos')
def testEnsurePCv1WarmFromScratch(self):
with self.captureTrace():
story_set = story.StorySet()
page = page_module.Page(
'http://google.com', page_set=story_set,
cache_temperature=cache_temperature.PCV1_WARM, name='http://google.com')
cache_temperature.EnsurePageCacheTemperature(page, self._browser)
markers = self.traceMarkers()
self.assertIn('telemetry.internal.warmCache.start', markers)
self.assertIn('telemetry.internal.warmCache.end', markers)
|
py | 1a539466d19266b8d019165cecbf07bb30acc438 | from django.db import models
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
class PhasmaDevice(models.Model):
mac = models.CharField(_('media access control address'),
max_length=127,
primary_key=True,
help_text=_("MAC (Media Access Control) address of phasma device.")
)
name = models.CharField(_('name'),
max_length=127,
help_text=_("Name of phasma device.")
)
date_added = models.DateTimeField(_('date added'),
default=timezone.now,
help_text=_("Date when phasma device was added.")
)
date_updated = models.DateTimeField(_('date updated'),
auto_now=True,
help_text=_("Date when phasma device was updated.")
)
class Meta:
ordering = ('-date_added',)
def __str__(self) -> str:
return self.mac
|
py | 1a539541c751d93ac4411fd6b977e58a82db66b9 | """ebonik URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
|
py | 1a539569abfdab13f324f13b6975beaf504433fe | __author__ = 'bptripp'
import numpy as np
import matplotlib.pyplot as plt
import cPickle
from quaternion import angle_between_quaterions
# def interpolate(point, angle, points, angles, values, sigma_p=.01, sigma_a=(4*np.pi/180)):
# """
# Gaussian kernel smoothing.
# """
# # q = to_quaternion(get_rotation_matrix(point, angle))
# # print(angle)
#
# weights = np.zeros(len(values))
# # foo = np.zeros(len(values))
# # bar = np.zeros(len(values))
# for i in range(len(values)):
# # q_i = to_quaternion(get_rotation_matrix(points[:,i], angles[:,i]))
#
# # print(q_i)
#
# # angle = angle_between_quaterions(q, q_i)
# # print(angle)
#
# position_distance = np.linalg.norm(point - points[:,i])
# angle_distance = angle[2] - angles[2,i];
#
# # weights[i] = np.exp( -(angle**2/2/sigma_a**2) )
# weights[i] = np.exp( -(angle_distance**2/2/sigma_a**2 + position_distance**2/2/sigma_p**2) )
# # weights[i] = np.exp( -(angle**2/2/sigma_a**2 + distance**2/2/sigma_p**2) )
# # foo[i] = np.exp( -(angle**2/2/sigma_a**2) )
# # bar[i] = np.exp( -(distance**2/2/sigma_p**2) )
#
# # print(weights)
# # print(np.sum(weights))
# # print(np.sum(foo))
# # print(np.sum(bar))
# return np.sum(weights * np.array(values)) / np.sum(weights)
def interpolate(quaternion, distance, quaternions, distances, values, sigma_a=(4*np.pi/180), sigma_d=.01):
"""
Gaussian kernel smoothing.
"""
weights = np.zeros(len(values))
angle_threshold = np.cos(1.25*sigma_a) # I think this corresponds to twice this angle between quaternions
distance_threshold = 2.5*sigma_d
# attempt fast estimate (only considering within-threshold points) ...
c = 0
for i in range(len(values)):
distance_difference = np.abs(distance - distances[i])
if distance_difference < distance_threshold and np.dot(quaternion, quaternions[i]) > angle_threshold:
c += 1
angle_difference = np.abs(angle_between_quaterions(quaternion, quaternions[i]))
weights[i] = np.exp( -(angle_difference**2/2/sigma_a**2 + distance_difference**2/2/sigma_d**2) )
# slow estimate if not enough matches ...
# print(c)
if c <= 3:
# print('slow estimate ' + str(c))
for i in range(len(values)):
distance_difference = np.abs(distance - distances[i])
angle_difference = np.abs(angle_between_quaterions(quaternion, quaternions[i]))
weights[i] = np.exp( -(angle_difference**2/2/sigma_a**2 + distance_difference**2/2/sigma_d**2) )
# print(weights)
# print(values)
return np.sum(weights * np.array(values)) / np.sum(weights)
def check_interpolate():
from perspective import get_quaternion_distance
point = np.array([1e-6,.1,.1])
angle = np.array([0,0,.9])
points = np.array([[1e-6,.1,.1], [1e-6,.12,.1]]).T
angles = np.array([[0,0,1], [0,0,1]]).T
values = np.array([0,1])
quaternion, distance = get_quaternion_distance(point[:,np.newaxis], angle[:,np.newaxis])
quaternions, distances = get_quaternion_distance(points, angles)
# print(quaternion)
# print(distance)
# print(quaternions)
# print(distances)
# estimate = interpolate(point, angle, points, angles, values, sigma_p=.01, sigma_a=(4*np.pi/180))
estimate = interpolate(quaternion[0], distance[0], quaternions, distances, values, sigma_d=.01, sigma_a=(4*np.pi/180))
print(estimate)
def test_interpolation_accuracy(points, angles, metrics, n_examples):
"""
Compare interpolated vs. actual metrics by leaving random
examples out of interpolation set and estimating them.
"""
from perspective import get_quaternion_distance
quaternions, distances = get_quaternion_distance(points, angles)
actuals = []
interpolateds = []
for i in range(n_examples):
print(i)
one = np.random.randint(0, len(metrics))
others = range(one)
others.extend(range(one+1, len(metrics)))
others = np.array(others)
actuals.append(metrics[one])
interpolated = interpolate(quaternions[one,:], distances[one], quaternions[others,:], distances[others], metrics[others],
sigma_d=.01, sigma_a=(8*np.pi/180))
interpolateds.append(interpolated)
# print(interpolated - metrics[one])
# print(np.corrcoef(actuals, interpolateds))
return actuals, interpolateds
def plot_interp_error_vs_density():
with open('spatula-perspectives-smoothed.pkl', 'rb') as f:
(points, angles, metrics, collisions, smoothed) = cPickle.load(f)
metrics = np.array(metrics)
smoothed = np.array(smoothed)
numbers = [250, 500, 1000, 2000, 4000]
metric_errors = []
smoothed_errors = []
for n in numbers:
actuals, interpolateds = test_interpolation_accuracy(points[:,:n], angles[:,:n], metrics[:n], 500)
metric_errors.append(np.mean( (np.array(actuals)-np.array(interpolateds))**2 )**.5)
actuals, interpolateds = test_interpolation_accuracy(points[:,:n], angles[:,:n], smoothed[:n], 500)
smoothed_errors.append(np.mean( (np.array(actuals)-np.array(interpolateds))**2 )**.5)
plt.plot(numbers, smoothed_errors)
plt.plot(numbers, metric_errors)
plt.show()
if __name__ == '__main__':
# check_interpolate()
plot_interp_error_vs_density()
|
py | 1a53958c8eeecfd1152e58812519e54dee11d748 | from rest_framework import serializers
from .models import *
class CitySerializer(serializers.ModelSerializer):
add_time = serializers.DateTimeField(read_only=True)
class Meta:
model = CityDict
fields = '__all__'
class CourseOrganizationSerializer(serializers.ModelSerializer):
add_time = serializers.DateTimeField(read_only=True)
class Meta:
model = CourseOrg
fields = '__all__'
class TeacherSerializer(serializers.ModelSerializer):
add_time = serializers.DateTimeField(read_only=True)
class Meta:
model = Teacher
fields = '__all__' |
py | 1a5396edcf4ef54e9c4a878426558fc82945e23f | """Represents a Concrete Strategy Object class for parsing PDF files.
References:
Lesson 4, Concept 8: Exercise - Strategy Objects
https://classroom.udacity.com/nanodegrees/nd303/parts/bdd52131-b22e-4c57-b3f2-a03951c9d514/modules/5fe343a0-2926-4953-81bc-485ee835e1c6/lessons/cac8a587-58ea-44d2-927f-0c9badb7a8e9/concepts/8e2fb5c6-33ef-4b5b-a01d-8f422a88fa1b
Lesson 5, Concept 7: Exercise - Complex Strategy:
https://classroom.udacity.com/nanodegrees/nd303/parts/bdd52131-b22e-4c57-b3f2-a03951c9d514/modules/5fe343a0-2926-4953-81bc-485ee835e1c6/lessons/93decac5-5e75-4573-b28e-ad1218ec04d3/concepts/6733fc76-b1a7-4c42-9a67-622af43b8cd5
"""
from typing import List
import subprocess
import os
import random
from .IngestorInterface import IngestorInterface
from .QuoteModel import QuoteModel
class PDFIngestor(IngestorInterface):
"""Create an Concrete Class Object for parsing PDF file pathways.
param allowed_extensions: File pathway allowed in this ingestor.
"""
allowed_extensions = ['pdf']
@classmethod
def parse(cls, path: str) -> List[QuoteModel]:
"""Ingest PDF File, return list of quotes.
param path {str}: PDF file pathway, origin of quotes.
return: Quotes stored in PDF file.
"""
if not cls.can_ingest(path):
raise Exception('PDF-Only Diet, Cannot Ingest!')
tmp = f'./tmp/{random.randint(0, 100000000)}.txt'
call = subprocess.call(['pdftotext', path, tmp])
file_ref = open(tmp, "r")
quotes = []
for line in file_ref.readlines():
line = line.strip('\n\r').strip()
if len(line) > 0:
parse = line.split(' - ')
meme_text = QuoteModel(parse[0], parse[1])
quotes.append(meme_text)
file_ref.close()
os.remove(tmp)
return quotes
|
py | 1a53971940d461d6fefa797d87e97b6d499f19cd | from six.moves import xrange
import tensorflow as tf
from .var_layer import VarLayer
from ..tf import sparse_tensor_diag_matmul
def conv(features, adj, weights):
degree = tf.sparse_reduce_sum(adj, axis=1) + 1
degree = tf.cast(degree, tf.float32)
degree = tf.pow(degree, -0.5)
adj = sparse_tensor_diag_matmul(adj, degree, transpose=True)
adj = sparse_tensor_diag_matmul(adj, degree, transpose=False)
output = tf.sparse_tensor_dense_matmul(adj, features)
features = tf.transpose(features)
features = tf.multiply(tf.multiply(degree, features), degree)
features = tf.transpose(features)
output = output + features
return tf.matmul(output, weights)
class GCNN(VarLayer):
def __init__(self, in_channels, out_channels, adjs, **kwargs):
self.adjs = adjs
super(GCNN, self).__init__(
weight_shape=[in_channels, out_channels],
bias_shape=[out_channels],
**kwargs)
def _call(self, inputs):
batch_size = len(inputs)
outputs = []
for i in xrange(batch_size):
output = conv(inputs[i], self.adjs[i], self.vars['weights'])
if self.bias:
output = tf.nn.bias_add(output, self.vars['bias'])
output = self.act(output)
outputs.append(output)
return outputs
|
py | 1a5397528cdf688d708612a3b02b62abe0fa4b0a | from abc import ABC, abstractmethod
from collections.abc import Iterable
from pydoc import locate
import pandas as pd
import numpy as np
from raymon.globals import Buildable, Serializable, DataException
from raymon.globals import ExtractorException
class Extractor(Serializable, Buildable, ABC):
def __str__(self):
return f"{self.__class__.__name__}"
def __repr__(self):
return str(self)
@classmethod
def from_jcr(cls, jcr):
classpath = jcr["class"]
state_jcr = jcr["state"]
statsclass = locate(classpath)
if statsclass is None:
raise NameError(f"Could not locate classpath {classpath}")
return statsclass.from_jcr(state_jcr)
class SimpleExtractor(Extractor):
@abstractmethod
def extract(self, data):
"""Extracts a component from a data instance.
Parameters
----------
data : any
The data instance you want to extract a component from. The type is up to you.
"""
raise NotImplementedError
def extract_multiple(self, data):
if data is None:
raise DataException(f"Data is None")
components = []
if isinstance(data, pd.DataFrame) or isinstance(data, np.ndarray):
components = self.extract(data)
elif isinstance(data, Iterable):
for el in data:
components.append(self.extract(el))
else:
raise DataException("Data should be a DataFrame or Iterable")
return components
class EvalExtractor(Extractor):
@abstractmethod
def extract(self, output, actual):
"""Extracts a component from a data instance.
Parameters
----------
data : any
The data instance you want to extract a component from. The type is up to you.
"""
raise NotImplementedError
def extract_multiple(self, output, actual):
if output is None:
raise DataException("output is None")
if actual is None:
raise DataException("actual is None")
if type(output) != type(actual):
raise DataException("output and actual not of same type")
if len(output) != len(actual):
raise DataException("output and actual not of same length")
components = []
if isinstance(output, pd.DataFrame) or isinstance(output, np.ndarray):
zipped = zip(output, actual)
for out, act in zipped:
components.append(self.extract(out, act))
elif isinstance(output, Iterable):
zipped = zip(output, actual)
for out, act in zipped:
components.append(self.extract(out[0], act[0]))
else:
raise DataException("Data should be a DataFrame or Iterable")
return components
class NoneExtractor(SimpleExtractor):
def extract(self, data):
return 0
def to_jcr(self):
data = {}
return data
@classmethod
def from_jcr(cls, jcr):
return cls()
"""Buildable interface"""
def build(self, data):
pass
def is_built(self):
return True
class NoneEvalExtractor(EvalExtractor):
def __init__(self, lower_better=True):
self.lower_better = lower_better
def extract(self, output, actual):
return 0
def to_jcr(self):
data = {}
return data
@classmethod
def from_jcr(cls, jcr):
return cls(**jcr)
"""Buildable interface"""
def build(self, data):
pass
def is_built(self):
return True
|
py | 1a53983a24b91472986a3ba619b7900755ad409e | #!/usr/bin/env python
# Copyright 2015-2017 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import datetime
import sys
import time
from paasta_tools.mesos_tools import get_mesos_master
from paasta_tools.utils import load_system_paasta_config
OUTPUT_FORMAT = "{:<30} {:<8} {:<20} {:<27} {}"
FRAMEWORK_NAME = "marathon"
MAX_BOUNCE_TIME_IN_HOURS = 4
def parse_args():
parser = argparse.ArgumentParser(
description='Find all containers serving previous push versions.',
)
parser.add_argument(
'--bounce-time', dest="bounce_time", type=int,
default=MAX_BOUNCE_TIME_IN_HOURS,
help=(
"Ignore versions that were launched in the last BOUNCE_TIME hours "
"because they probably are still bouncing."
),
)
return parser.parse_args()
def get_mesos_state():
state = get_mesos_master(use_mesos_cache=True).state
return state
def marathon_tasks(state):
for framework in state.get('frameworks', []):
if framework['name'].lower().startswith(FRAMEWORK_NAME):
for task in framework.get('tasks', []):
yield task
def create_slave_id_to_hostname_dict(state):
res = {}
for slave in state['slaves']:
res[slave['id']] = slave['hostname']
return res
def group_running_tasks_by_id_and_gitsha(state):
res = {}
for t in marathon_tasks(state):
if t['state'] == 'TASK_RUNNING':
task_id = t['name'][:t['name'].find('.', t['name'].find('.') + 1)]
gitsha = t['name'][len(task_id) + 1:t['name'].find('.', len(task_id) + 1)]
res.setdefault(task_id, {}).setdefault(gitsha, []).append(t)
return res
def detect_outdated_gitshas(versions, max_bounce_time_in_hours):
"""Find versions that should have drained more than 'max_bounce_time_in_hours' ago"""
if len(versions) < 2:
return []
deploy_time = {}
latest_deploy = 0
for version, tasks in versions.items():
deploy_time[version] = sum(t['statuses'][0]['timestamp'] for t in tasks) / len(tasks)
if (
deploy_time[version] > latest_deploy and
time.time() - deploy_time[version] > max_bounce_time_in_hours * 3600
):
latest_deploy = deploy_time[version]
return [version for version, dtime in deploy_time.items() if dtime < latest_deploy]
def report_outdated_instances(task_id, gitsha, tasks, slave_id2hostname):
output = []
remedy = []
for t in tasks:
deploy_time = datetime.datetime.fromtimestamp(int(t['statuses'][0]['timestamp'])).strftime('%Y-%m-%d %H:%M:%S')
container_name = "mesos-{}.{}".format(
t['slave_id'],
t['statuses'][0]['container_status']['container_id']['value'],
)
hostname = slave_id2hostname[t['slave_id']]
hostname = hostname[:hostname.find('.')]
service_instance = task_id.replace('--', '_')
output.append(
OUTPUT_FORMAT.format(
service_instance[:30],
gitsha[3:],
deploy_time,
hostname,
container_name,
),
)
remedy.append('ssh {0} "sudo hadown {1}; sleep 10; sudo docker stop {2}; sudo haup {1}"'
.format(hostname, service_instance, container_name))
return output, remedy
def check_mesos_tasks(max_bounce_time_in_hours=MAX_BOUNCE_TIME_IN_HOURS):
output = []
remedy = []
state = get_mesos_state()
aggregated_tasks = group_running_tasks_by_id_and_gitsha(state)
slave_id2hostname = create_slave_id_to_hostname_dict(state)
for task_id, versions in aggregated_tasks.items():
for gitsha in detect_outdated_gitshas(versions, max_bounce_time_in_hours):
temp_output, temp_remedy = report_outdated_instances(
task_id, gitsha, versions[gitsha], slave_id2hostname,
)
output.extend(temp_output)
remedy.extend(temp_remedy)
return output, remedy
def main():
args = parse_args()
cluster = load_system_paasta_config().get_cluster()
output, remedy = check_mesos_tasks(args.bounce_time)
if output:
print("CRITICAL - There are {} tasks running in {} that are more than {}h older than their"
" last bounce.".format(len(output), cluster, args.bounce_time))
print(OUTPUT_FORMAT.format('SERVICE.INSTANCE', 'COMMIT', 'CREATED', 'HOSTNAME', 'CONTAINER'))
print('\n'.join(output))
print('')
print('Run the following commands to terminate them:')
print('{code}')
print('\n'.join(remedy))
print('{code}')
return 1
else:
print("OK - There are no outdated tasks in {}".format(cluster))
return 0
if __name__ == "__main__":
sys.exit(main())
|
py | 1a53985c25c804943673c27ec940886ccf8d0c5d | # Copyright (c) 2016, Matt Layman
import os
import sys
import tempfile
import unittest
try:
from unittest import mock
except ImportError:
import mock
from tap import TAPTestRunner
from tap.runner import TAPTestResult, _tracker
class TestTAPTestRunner(unittest.TestCase):
def test_has_tap_test_result(self):
runner = TAPTestRunner()
self.assertEqual(runner.resultclass, TAPTestResult)
def test_runner_uses_outdir(self):
"""Test that the test runner sets the outdir so that TAP
files will be written to that location.
Setting class attributes to get the right behavior is a dirty hack, but
the unittest classes aren't very extensible.
"""
# Save the previous outdir in case **this** execution was using it.
previous_outdir = _tracker.outdir
outdir = tempfile.mkdtemp()
TAPTestRunner.set_outdir(outdir)
self.assertEqual(outdir, _tracker.outdir)
_tracker.outdir = previous_outdir
def test_runner_uses_format(self):
"""Test that format is set on TAPTestResult FORMAT."""
# Save the previous format in case **this** execution was using it.
previous_format = TAPTestResult.FORMAT
fmt = "{method_name}: {short_description}"
TAPTestRunner.set_format(fmt)
self.assertEqual(fmt, TAPTestResult.FORMAT)
TAPTestResult.FORMAT = previous_format
def test_runner_uses_combined(self):
"""Test that output is combined."""
# Save previous combined in case **this** execution was using it.
previous_combined = _tracker.combined
TAPTestRunner.set_combined(True)
self.assertTrue(_tracker.combined)
_tracker.combined = previous_combined
@mock.patch('sys.exit')
def test_bad_format_string(self, fake_exit):
"""A bad format string exits the runner."""
previous_format = TAPTestResult.FORMAT
bad_format = "Not gonna work {sort_desc}"
TAPTestRunner.set_format(bad_format)
result = TAPTestResult(None, True, 1)
test = mock.Mock()
result._description(test)
self.assertTrue(fake_exit.called)
TAPTestResult.FORMAT = previous_format
def test_runner_sets_tracker_for_streaming(self):
"""The tracker is set for streaming mode."""
previous_streaming = _tracker.streaming
previous_stream = _tracker.stream
runner = TAPTestRunner()
runner.set_stream(True)
self.assertTrue(_tracker.streaming)
self.assertTrue(_tracker.stream, sys.stdout)
_tracker.streaming = previous_streaming
_tracker.stream = previous_stream
def test_runner_stream_to_devnull_for_streaming(self):
previous_streaming = _tracker.streaming
previous_stream = _tracker.stream
runner = TAPTestRunner()
runner.set_stream(True)
self.assertTrue(runner.stream.stream.name, os.devnull)
_tracker.streaming = previous_streaming
_tracker.stream = previous_stream
def test_runner_uses_header(self):
"""Test that the case header can be turned off."""
# Save previous header in case **this** execution was using it.
previous_header = _tracker.header
TAPTestRunner.set_header(False)
self.assertFalse(_tracker.header)
TAPTestRunner.set_header(True)
self.assertTrue(_tracker.header)
_tracker.header = previous_header
|
py | 1a5398ca290a09af212058cee2834600270a8058 | # exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('Bcl2', ['BidM', 'BaxA'])
Monomer('C3pro', ['Apop', 'C8A'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'Bcl2', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('ApafA')
Monomer('BidM', ['BaxM', 'Bcl2'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6A', ['C8pro'])
Monomer('C6pro', ['C3A'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_2kf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_1kr', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2kf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1kr', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 75000.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('Bcl2_0', 328000.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Parameter('C6A_0', 0.0)
Parameter('C6pro_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('Bcl2_obs', Bcl2())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Observable('C6A_obs', C6A())
Observable('C6pro_obs', C6pro())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None, Bcl2=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None, Bcl2=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None, Bcl2=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('inhibition_0_Bcl2_inhibitor_BidM_inh_target', Bcl2(BidM=None, BaxA=None) + BidM(BaxM=None, Bcl2=None) | Bcl2(BidM=1, BaxA=None) % BidM(BaxM=None, Bcl2=1), inhibition_0_Bcl2_inhibitor_BidM_inh_target_2kf, inhibition_0_Bcl2_inhibitor_BidM_inh_target_1kr)
Rule('inhibition_0_Bcl2_inhibitor_BaxA_inh_target', Bcl2(BidM=None, BaxA=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | Bcl2(BidM=None, BaxA=1) % BaxA(BaxM=None, Bcl2=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2kf, inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1kr)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(Bcl2(BidM=None, BaxA=None), Bcl2_0)
Initial(C3pro(Apop=None, C8A=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None, Bcl2=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6A(C8pro=None), C6A_0)
Initial(C6pro(C3A=None), C6pro_0)
|
py | 1a5399afafee0730303e2ab23186b2faba652525 | from django.db import models
from django.utils import timezone
import os
#BLOGS
class BlogPost(models.Model):
author=models.CharField(max_length=200)
role=models.CharField(max_length=200)
image=models.ImageField(upload_to='blogMedia/')
title=models.CharField(max_length=200)
displayText=models.TextField()
body=models.TextField()
date=models.DateTimeField(default=timezone.now)
def delete(self):
self.image.storage.delete(str(self.image))
super(BlogPost, self).delete()
def __str__(self):
return self.title+' | '+self.author
class BlogPostComment(models.Model):
post = models.ForeignKey(BlogPost, related_name='comments',on_delete=models.CASCADE)
author = models.CharField(max_length=200)
comment = models.TextField()
date = models.DateTimeField(auto_now_add=True)
approved =models.BooleanField(default=False)
def __str__(self):
return str(self.post.title)+' | '+str(self.author)+': '+str(self.comment)
#News
class New(models.Model):
image=models.ImageField(upload_to='newsMedia')
title = models.CharField(max_length=200)
subtitle = models.CharField(max_length=200)
source = models.CharField(max_length=500)
date=models.DateTimeField(auto_now_add=True)
body=models.TextField()
def delete(self):
self.image.storage.delete(str(self.image))
super(New, self).delete()
def __str__(self):
return str(self.title)+' | '+str(self.date)
#EVENT
class Event(models.Model):
name = models.CharField(max_length=200)
date = models.DateTimeField(auto_now_add=True)
title = models.CharField(max_length=200)
bannerimage=models.ImageField(upload_to='eventMedia')
image1=models.ImageField(upload_to='eventMedia', blank=True)
image2=models.ImageField(upload_to='eventMedia', blank=True)
image3 = models.ImageField(upload_to='eventMedia', blank=True)
image4 = models.ImageField(upload_to='eventMedia', blank=True)
body = models.TextField()
def delete(self):
self.bannerimage.storage.delete(str(self.bannerimage))
self.image1.storage.delete(str(self.image1))
self.image2.storage.delete(str(self.image2))
self.image3.storage.delete(str(self.image3))
self.image4.storage.delete(str(self.image4))
super(Event, self).delete()
def __str__(self):
return str(self.name) |
py | 1a5399b1a8985a1294e758bf3d2b524c3b1717b9 | # Aggregator class
# : contains aggregate methods
import logging
from Queue import Empty
from threading import Thread, Event
from user_agents import parse
logger = logging.getLogger('pwstat_aggregator')
class Aggregator(Thread):
def __init__(self, queue, writer, stat_list):
Thread.__init__(self)
self.queue = queue
self.writer = writer
self.stat_list = stat_list
# match dictionary: used in calculate function
# example 'stat_name': self.function_name
self.match = {'timestamp': self.get_timestamp,
'user-agent': self.get_user_agent,
}
# validate statistics specified
for stat in stat_list:
assert stat in self.match, \
"'{}' did not match any stats in aggregator. Available stats: {}".format(stat, self.match.keys())
self._stop = Event()
# default thread method
def run(self):
logger.info("Aggregator starting...")
while True:
try:
# timeout after 5 seconds
temp = self.queue.get(timeout=5)
self.writer.write(self.calculate(temp))
except Empty:
pass
if self._stop.isSet():
logger.info("Aggregator stopping...")
break
return
# thread termination method
def stop(self):
self._stop.set()
# aggregate functions
def get_timestamp(self, data):
return str(data[2])
def get_user_agent(self, data):
ua_string = data[0]['user-agent']
user_agent = parse(ua_string)
return 'mobile' if user_agent.is_mobile else 'desktop' if user_agent.is_pc else 'other'
# main method
def calculate(self, data):
# call aggregate functions and return dictionary of stats
# data has the form (headers, body, timestamp)
logger.info("Starting aggregation...")
result = dict()
for stat in self.stat_list:
stat_function = self.match[stat]
result[stat] = stat_function(data)
return result
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.