filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_16060 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests deprecation warnings in a few special cases."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
class DeprecationTest(test.TestCase):
@test.mock.patch.object(logging, "warning", autospec=True)
def testDeprecatedFunction(self, mock_warning):
self.assertEqual(0, mock_warning.call_count)
tf.compat.v1.initializers.tables_initializer()
self.assertEqual(0, mock_warning.call_count)
tf.tables_initializer()
self.assertEqual(1, mock_warning.call_count)
self.assertRegexpMatches(
mock_warning.call_args[0][1],
"deprecation_test.py:")
self.assertRegexpMatches(
mock_warning.call_args[0][2], r"tables_initializer")
self.assertRegexpMatches(
mock_warning.call_args[0][3],
r"compat.v1.initializers.tables_initializer")
tf.tables_initializer()
self.assertEqual(1, mock_warning.call_count)
@test.mock.patch.object(logging, "warning", autospec=True)
def testDeprecatedClass(self, mock_warning):
value = np.array([1, 2, 3])
row_splits = np.array([1])
self.assertEqual(0, mock_warning.call_count)
tf.compat.v1.ragged.RaggedTensorValue(value, row_splits)
self.assertEqual(0, mock_warning.call_count)
tf.ragged.RaggedTensorValue(value, row_splits)
self.assertEqual(1, mock_warning.call_count)
self.assertRegexpMatches(
mock_warning.call_args[0][1],
"deprecation_test.py:")
self.assertRegexpMatches(
mock_warning.call_args[0][2], r"ragged.RaggedTensorValue")
self.assertRegexpMatches(
mock_warning.call_args[0][3],
r"compat.v1.ragged.RaggedTensorValue")
tf.ragged.RaggedTensorValue(value, row_splits)
self.assertEqual(1, mock_warning.call_count)
@test.mock.patch.object(logging, "warning", autospec=True)
def testDeprecatedFunctionEndpoint(self, mock_warning):
array = tf.IndexedSlices(
tf.compat.v1.convert_to_tensor(np.array([1, 2])),
tf.compat.v1.convert_to_tensor(np.array([0, 2])))
mask_indices = tf.compat.v1.convert_to_tensor(np.array([2]))
self.assertEqual(0, mock_warning.call_count)
tf.sparse.mask(array, mask_indices)
self.assertEqual(0, mock_warning.call_count)
tf.sparse_mask(array, mask_indices)
self.assertEqual(1, mock_warning.call_count)
self.assertRegexpMatches(
mock_warning.call_args[0][1],
"deprecation_test.py:")
self.assertRegexpMatches(
mock_warning.call_args[0][2], r"sparse_mask")
self.assertRegexpMatches(
mock_warning.call_args[0][3],
"sparse.mask")
tf.sparse_mask(array, mask_indices)
self.assertEqual(1, mock_warning.call_count)
@test.mock.patch.object(logging, "warning", autospec=True)
def testDeprecatedClassEndpoint(self, mock_warning):
self.assertEqual(0, mock_warning.call_count)
tf.io.VarLenFeature(tf.dtypes.int32)
self.assertEqual(0, mock_warning.call_count)
tf.VarLenFeature(tf.dtypes.int32)
self.assertEqual(1, mock_warning.call_count)
self.assertRegexpMatches(
mock_warning.call_args[0][1],
"deprecation_test.py:")
self.assertRegexpMatches(
mock_warning.call_args[0][2], r"VarLenFeature")
self.assertRegexpMatches(
mock_warning.call_args[0][3],
r"io.VarLenFeature")
tf.VarLenFeature(tf.dtypes.int32)
self.assertEqual(1, mock_warning.call_count)
if __name__ == "__main__":
test.main()
|
the-stack_0_16062 | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from google.appengine.ext import ndb
from common.base_handler import BaseHandler, Permission
def _FormatDatetime(dt):
if not dt:
return None # pragma: no cover
else:
return dt.strftime('%Y-%m-%d %H:%M:%S UTC')
class Culprit(BaseHandler):
PERMISSION_LEVEL = Permission.ANYONE
def HandleGet(self):
"""Lists the build cycles in which the culprit caused failures."""
key = self.request.get('key', '')
culprit = ndb.Key(urlsafe=key).get()
if not culprit: # pragma: no cover
return self.CreateError('Culprit not found', 404)
def ConvertBuildInfoToADict(build_info):
return {
'master_name': build_info[0],
'builder_name': build_info[1],
'build_number': build_info[2],
}
data = {
'project_name': culprit.project_name,
'revision': culprit.revision,
'commit_position': culprit.commit_position,
'cr_notified': culprit.cr_notified,
'cr_notification_time': _FormatDatetime(culprit.cr_notification_time),
'builds': map(ConvertBuildInfoToADict, culprit.builds),
'key': key,
}
return {'template': 'waterfall/culprit.html', 'data': data}
|
the-stack_0_16063 | #!/usr/bin/python -tt
# Expense Calculator
class Expense_Calculator(object):
def Expenses(self, Age, Retirement_Age, Inflation, Current_Expenses):
self.Future_Expenses={}
for x in range(Age,Retirement_Age+1):
if x==Age:
self.Future_Expenses[Age]=Current_Expenses
else:
self.Future_Expenses[x]=self.Future_Expenses[x-1]*(1+Inflation/100)
return self.Future_Expenses
# Modify Expenses
def Modify_Expense(self, Future_Expenses, Age, Value):
self.Future_Expenses[Age]=Value
return self.Future_Expenses
# Calculate Balance available for given corpus
def Balance(self, Corpus, Age, Retirement_Age, Deposit_Rate, Inflation_rate, Expenses):
self.Current_Balance={}
for x in range(Age,Retirement_Age+1):
if x==Age:
self.Current_Balance[Age]=Corpus
else:
self.Monthly_Expenses=Expenses[x-1]/12
self.Monthly_rate=Deposit_Rate/1200
self.Current_Balance[x]=(((1 + self.Monthly_rate)**12 * (self.Monthly_rate*self.Current_Balance[x-1] - self.Monthly_Expenses) + self.Monthly_Expenses)/self.Monthly_rate)
return self.Current_Balance
# Calculate Final Balance available at the end
def Final_Balance(self, Corpus, Age, Retirement_Age, Deposit_Rate, Inflation_rate, Expenses):
self.End_Balance=self.Balance(Corpus, Age, Retirement_Age, Deposit_Rate, Inflation_rate, Expenses)
return self.End_Balance[Retirement_Age]
# Calculate minimum Balance to keep handy
def Minimum_Balance(self, Age, Retirement_Age, Deposit_Rate, Inflation_rate, Expenses):
self.Initial_Corpus=Expenses[Retirement_Age]
epsilon=0.001
self.End_Balance=self.Final_Balance(self.Initial_Corpus, Age, Retirement_Age, Deposit_Rate, Inflation_rate, Expenses)
if self.End_Balance>0:
Min=self.Initial_Corpus/2
while self.Final_Balance(Max, Age, Retirement_Age, Deposit_Rate, Inflation_rate, Expenses)>0:
Min=Min/2
Max=self.Initial_Corpus
elif self.End_Balance<0:
Min=self.Initial_Corpus
Max=self.Initial_Corpus*2
while self.Final_Balance(Max, Age, Retirement_Age, Deposit_Rate, Inflation_rate, Expenses)<0:
Max=Max*2
self.Minimum_Corpus=(Min+Max)/2
while abs(self.Final_Balance(self.Minimum_Corpus, Age, Retirement_Age, Deposit_Rate, Inflation_rate, Expenses))>=epsilon:
if self.Final_Balance(self.Minimum_Corpus, Age, Retirement_Age, Deposit_Rate, Inflation_rate, Expenses)>0:
Max=self.Minimum_Corpus
elif self.Final_Balance(self.Minimum_Corpus, Age, Retirement_Age, Deposit_Rate, Inflation_rate, Expenses)<0:
Min=self.Minimum_Corpus
self.Minimum_Corpus=(Min+Max)/2
return self.Minimum_Corpus
# Age=int(input("Enter your Age : "))
# Retirement_Age=int(input("Enter your Retirement Age : "))
# Inflation_rate=int(input("Enter the Inflation rate : "))
# Deposit_rate=int(input("Enter the Deposit rate : "))
# Corpus=int(input("Enter the Corpus : "))
# Annual_Expenses=int(input("Enter current Annual Expenses : "))
# Future_Expenses=Expenses(Age, Retirement_Age, Inflation_rate, Annual_Expenses)
# for key in Future_Expenses:
# print(f'Age->{key} Expenses->{Future_Expenses[key]}')
# Annual_Balance=Balance(Corpus, Age, Retirement_Age, Deposit_rate, Inflation_rate, Future_Expenses)
# for key in Annual_Balance:
# print(f'Age->{key} Balance->{Annual_Balance[key]}')
# Min_Corpus=Minimum_Balance(Age, Retirement_Age, Deposit_rate, Inflation_rate, Future_Expenses)
# print(f'Minimum Corpus required is {Min_Corpus}')
#if __name__ == '__main__':
# main()
|
the-stack_0_16064 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
import sys
from django.conf import settings
from django.template import loader
from django.test import TestCase
from django.test.client import RequestFactory
from django.utils.encoding import smart_str
from puppeteer_pdf.utils import (_options_to_args, make_absolute_paths, render_pdf_from_template,
render_to_temporary_file, RenderedFile, puppeteer_to_pdf)
from puppeteer_pdf.views import PDFResponse, PDFTemplateView, PDFTemplateResponse
class UnicodeContentPDFTemplateView(PDFTemplateView):
"""
PDFTemplateView with the addition of unicode content in his context.
Used in unicode content view testing.
"""
def get_context_data(self, **kwargs):
Base = super(UnicodeContentPDFTemplateView, self)
context = Base.get_context_data(**kwargs)
context['title'] = u'♥'
return context
class TestUtils(TestCase):
def setUp(self):
# Clear standard error
self._stderr = sys.stderr
sys.stderr = open(os.devnull, 'w')
self.factory = RequestFactory()
def tearDown(self):
sys.stderr = self._stderr
def test_options_to_args(self):
self.assertEqual(_options_to_args(), [])
self.assertEqual(_options_to_args(heart=u'♥', displayHeaderFooter=True,
path='file-path'),
['--displayHeaderFooter',
'--heart', u'♥',
'--path', 'file-path'])
self.assertEqual(_options_to_args(heart=u'♥', landscape=True,
path='file-path'),
['--heart', u'♥',
'--landscape',
'--path', 'file-path'])
self.assertEqual(_options_to_args(heart=u'♥', landscape=False,
path='file-path'),
['--heart', u'♥',
'--path', 'file-path'])
def test_puppeteer_to_pdf(self):
"""Should run puppeteer to generate a PDF"""
title = 'A test template.'
template = loader.get_template('sample.html')
temp_file = render_to_temporary_file(template, context={'title': title})
try:
# Single page
pdf_output = puppeteer_to_pdf(input=temp_file.name)
self.assertTrue(pdf_output.startswith(b'%PDF'), pdf_output)
# Unicode
pdf_output = puppeteer_to_pdf(input=temp_file.name, title=u'♥')
self.assertTrue(pdf_output.startswith(b'%PDF'), pdf_output)
finally:
temp_file.close()
def test_puppeteer_to_pdf_with_unicode_content(self):
"""A puppeteer_to_pdf call should render unicode content properly"""
title = u'♥'
template = loader.get_template('unicode.html')
temp_file = render_to_temporary_file(template, context={'title': title})
try:
pdf_output = puppeteer_to_pdf(input=temp_file.name)
self.assertTrue(pdf_output.startswith(b'%PDF'), pdf_output)
finally:
temp_file.close()
def test_render_to_temporary_file(self):
"""Should render a template to a temporary file."""
title = 'A test template.'
template = loader.get_template('sample.html')
temp_file = render_to_temporary_file(template, context={'title': title})
temp_file.seek(0)
saved_content = smart_str(temp_file.read())
self.assertTrue(title in saved_content)
temp_file.close()
def _render_file(self, template, context):
"""Helper method for testing rendered file deleted/persists tests."""
render = RenderedFile(template=template, context=context)
render.temporary_file.seek(0)
saved_content = smart_str(render.temporary_file.read())
return (saved_content, render.filename)
def test_rendered_file_deleted_on_production(self):
"""If PUPPETEER_PDF_DEBUG=False, delete rendered file on object close."""
title = 'A test template.'
template = loader.get_template('sample.html')
debug = getattr(settings, 'PUPPETEER_PDF_DEBUG', settings.DEBUG)
saved_content, filename = self._render_file(template=template,
context={'title': title})
# First verify temp file was rendered correctly.
self.assertTrue(title in saved_content)
# Then check if file is deleted when debug=False.
self.assertFalse(debug)
self.assertFalse(os.path.isfile(filename))
def test_rendered_file_persists_on_debug(self):
"""If PUPPETEER_PDF_DEBUG=True, the rendered file should persist."""
title = 'A test template.'
template = loader.get_template('sample.html')
with self.settings(PUPPETEER_PDF_DEBUG=True):
debug = getattr(settings, 'PUPPETEER_PDF_DEBUG', settings.DEBUG)
saved_content, filename = self._render_file(template=template,
context={'title': title})
# First verify temp file was rendered correctly.
self.assertTrue(title in saved_content)
# Then check if file persists when debug=True.
self.assertTrue(debug)
self.assertTrue(os.path.isfile(filename))
def test_render_with_null_request(self):
"""If request=None, the file should render properly."""
title = 'A test template.'
loader.get_template('sample.html')
pdf_content = render_pdf_from_template('sample.html',
header_template=None,
footer_template=None,
context={'title': title})
self.assertTrue(pdf_content.startswith(b'%PDF-'))
self.assertTrue(pdf_content.endswith(b'%%EOF'))
class TestViews(TestCase):
template = 'sample.html'
footer_template = 'footer.html'
pdf_filename = 'output.pdf'
attached_fileheader = 'attachment; filename="{0}"'
inline_fileheader = 'inline; filename="{0}"'
def test_pdf_response(self):
"""Should generate correct HttpResponse object and content type."""
# 404
response = PDFResponse(content='', status=404)
self.assertEqual(response.status_code, 404)
self.assertEqual(response.content, b'')
self.assertEqual(response['Content-Type'], 'application/pdf')
self.assertFalse(response.has_header('Content-Disposition'))
content = b'%PDF-1.4\n%%EOF'
# Without filename
response = PDFResponse(content=content)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, content)
self.assertEqual(response['Content-Type'], 'application/pdf')
self.assertFalse(response.has_header('Content-Disposition'))
# With filename
response = PDFResponse(content=content, filename="nospace.pdf")
self.assertEqual(response['Content-Disposition'],
'attachment; filename="nospace.pdf"')
response = PDFResponse(content=content, filename="one space.pdf")
self.assertEqual(response['Content-Disposition'],
'attachment; filename="one space.pdf"')
response = PDFResponse(content=content, filename="4'5\".pdf")
self.assertEqual(response['Content-Disposition'],
'attachment; filename="4\'5.pdf"')
response = PDFResponse(content=content, filename=u"♥.pdf")
try:
import unidecode
except ImportError:
filename = '?.pdf'
else:
filename = '.pdf'
self.assertEqual(response['Content-Disposition'],
'attachment; filename="{0}"'.format(filename))
# Content as a direct output
response = PDFResponse(content=content, filename="nospace.pdf",
show_content_in_browser=True)
self.assertEqual(response['Content-Disposition'],
'inline; filename="nospace.pdf"')
response = PDFResponse(content=content, filename="one space.pdf",
show_content_in_browser=True)
self.assertEqual(response['Content-Disposition'],
'inline; filename="one space.pdf"')
response = PDFResponse(content=content, filename="4'5\".pdf",
show_content_in_browser=True)
self.assertEqual(response['Content-Disposition'],
'inline; filename="4\'5.pdf"')
response = PDFResponse(content=content, filename=u"♥.pdf",
show_content_in_browser=True)
try:
import unidecode
except ImportError:
filename = '?.pdf'
else:
filename = '.pdf'
self.assertEqual(response['Content-Disposition'],
'inline; filename="{0}"'.format(filename))
# Content-Type
response = PDFResponse(content=content,
content_type='application/x-pdf')
self.assertEqual(response['Content-Type'], 'application/x-pdf')
def test_pdf_template_response(self, show_content=False):
"""Test PDFTemplateResponse."""
context = {'title': 'Heading'}
request = RequestFactory().get('/')
response = PDFTemplateResponse(request=request,
template=self.template,
context=context,
show_content_in_browser=show_content)
self.assertEqual(response._request, request)
self.assertEqual(response.template_name, self.template)
self.assertEqual(response.context_data, context)
self.assertEqual(response.filename, None)
self.assertEqual(response.header_template, None)
self.assertEqual(response.footer_template, None)
self.assertEqual(response.cmd_options, {})
self.assertFalse(response.has_header('Content-Disposition'))
# Render to temporary file
template = loader.get_template(self.template)
tempfile = render_to_temporary_file(template, context=context)
tempfile.seek(0)
html_content = smart_str(tempfile.read())
self.assertTrue(html_content.startswith('<html>'))
self.assertTrue('<h1>{title}</h1>'.format(**context)
in html_content)
pdf_content = response.rendered_content
self.assertTrue(pdf_content.startswith(b'%PDF-'))
self.assertTrue(pdf_content.endswith(b'%%EOF'))
# Footer
cmd_options = {}
response = PDFTemplateResponse(request=request,
template=self.template,
context=context,
filename=self.pdf_filename,
show_content_in_browser=show_content,
footer_template=self.footer_template,
cmd_options=cmd_options)
self.assertEqual(response.filename, self.pdf_filename)
self.assertEqual(response.header_template, None)
self.assertEqual(response.footer_template, self.footer_template)
self.assertEqual(response.cmd_options, cmd_options)
self.assertTrue(response.has_header('Content-Disposition'))
footer_template = loader.get_template(self.footer_template)
tempfile = render_to_temporary_file(footer_template, context=context,
request=request)
tempfile.seek(0)
footer_content = smart_str(tempfile.read())
footer_content = make_absolute_paths(footer_content)
media_url = 'file://{0}/'.format(settings.MEDIA_ROOT)
self.assertTrue(media_url in footer_content, True)
static_url = 'file://{0}/'.format(settings.STATIC_ROOT)
self.assertTrue(static_url in footer_content, True)
def test_pdf_template_response_to_browser(self):
self.test_pdf_template_response(show_content=True)
def test_pdf_template_view(self, show_content=False):
"""Test PDFTemplateView."""
view = PDFTemplateView.as_view(filename=self.pdf_filename,
show_content_in_browser=show_content,
template_name=self.template,
footer_template=self.footer_template)
# As PDF
request = RequestFactory().get('/')
response = view(request)
self.assertEqual(response.status_code, 200)
response.render()
fileheader = self.attached_fileheader
if show_content:
fileheader = self.inline_fileheader
self.assertEqual(response['Content-Disposition'],
fileheader.format(self.pdf_filename))
self.assertTrue(response.content.startswith(b'%PDF-'))
self.assertTrue(response.content.endswith(b'%%EOF'))
# As HTML
request = RequestFactory().get('/?as=html')
response = view(request)
self.assertEqual(response.status_code, 200)
response.render()
self.assertFalse(response.has_header('Content-Disposition'))
self.assertTrue(response.content.startswith(b'<html>'))
# POST
request = RequestFactory().post('/')
response = view(request)
self.assertEqual(response.status_code, 405)
def test_pdf_template_view_to_browser(self):
self.test_pdf_template_view(show_content=True)
def test_pdf_template_view_unicode(self, show_content=False):
"""Test PDFTemplateView with unicode content."""
view = UnicodeContentPDFTemplateView.as_view(
filename=self.pdf_filename,
show_content_in_browser=show_content,
template_name=self.template
)
# As PDF
request = RequestFactory().get('/')
response = view(request)
self.assertEqual(response.status_code, 200)
response.render()
fileheader = self.attached_fileheader
if show_content:
fileheader = self.inline_fileheader
self.assertEqual(response['Content-Disposition'],
fileheader.format(self.pdf_filename))
# not sure how we can test this as the contents is all encoded...
# best we can do for the moment is check it's a pdf and it worked.
# self.assertTrue('☃' in response.content)
self.assertTrue(response.content.startswith(b'%PDF-'))
self.assertTrue(response.content.endswith(b'%%EOF'))
def test_pdf_template_view_unicode_to_browser(self):
self.test_pdf_template_view_unicode(show_content=True)
def test_get_cmd_options(self):
# Default cmd_options
view = PDFTemplateView()
self.assertEqual(view.cmd_options, PDFTemplateView.cmd_options)
self.assertEqual(PDFTemplateView.cmd_options, {})
# Instantiate with new cmd_options
cmd_options = {'orientation': 'landscape'}
view = PDFTemplateView(cmd_options=cmd_options)
self.assertEqual(view.cmd_options, cmd_options)
self.assertEqual(PDFTemplateView.cmd_options, {})
# Update local instance of cmd_options
view = PDFTemplateView()
view.cmd_options.update(cmd_options)
self.assertEqual(view.cmd_options, cmd_options)
self.assertEqual(PDFTemplateView.cmd_options, {})
|
the-stack_0_16065 | import os,sys
sys.path.append('../')
import numpy as np
import pandas as pd
from keras.callbacks import EarlyStopping
from keras.layers.advanced_activations import ReLU, PReLU
from keras.layers.core import Dense, Dropout
from keras.layers.normalization import BatchNormalization
from keras.models import Sequential
from keras.optimizers import SGD, Adam
from keras.utils import np_utils
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score, f1_score
from src.model import Model
from src.util import Util
from scipy.sparse import issparse
# tensorflowの警告抑制
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
class ModelMLP(Model):
def __init__(self, run_fold_name, **params):
super().__init__(run_fold_name, params)
def train(self, tr_x, tr_y, va_x=None, va_y=None):
validation = va_x is not None
# パラメータ
nb_classes = 5
input_dropout = self.params['input_dropout']
hidden_layers = int(self.params['hidden_layers'])
hidden_units = int(self.params['hidden_units'])
hidden_activation = self.params['hidden_activation']
hidden_dropout = self.params['hidden_dropout']
batch_norm = self.params['batch_norm']
optimizer_type = self.params['optimizer']['type']
optimizer_lr = self.params['optimizer']['lr']
batch_size = int(self.params['batch_size'])
nb_epoch = int(self.params['nb_epoch'])
# 標準化
if issparse(tr_x):
scaler = StandardScaler(with_mean=False)
else:
scaler = StandardScaler()
scaler.fit(tr_x)
tr_x = scaler.transform(tr_x)
tr_y = np_utils.to_categorical(tr_y, num_classes=nb_classes)
if validation:
va_x = scaler.transform(va_x)
va_y = np_utils.to_categorical(va_y, num_classes=nb_classes)
self.scaler = scaler
# Sequentialモデルを定義
self.model = Sequential()
# input dropout
self.model.add(Dropout(input_dropout, input_shape=(tr_x.shape[1],)))
# 中間層
for i in range(hidden_layers):
self.model.add(Dense(hidden_units))
if batch_norm == 'before_act':
self.model.add(BatchNormalization())
if hidden_activation == 'prelu':
self.model.add(PReLU())
elif hidden_activation == 'relu':
self.model.add(ReLU())
else:
raise NotImplementedError
self.model.add(Dropout(hidden_dropout))
# 出力層
self.model.add(Dense(nb_classes, activation='softmax'))
# オプティマイザ
if optimizer_type == 'sgd':
optimizer = SGD(lr=optimizer_lr, decay=1e-6, momentum=0.9, nesterov=True)
elif optimizer_type == 'adam':
optimizer = Adam(lr=optimizer_lr, beta_1=0.9, beta_2=0.999, decay=0.)
else:
raise NotImplementedError
# 目的関数、評価指標などの設定
self.model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
# 学習の実行
if validation:
# 何epoch validation-score が更新されなければ中断するか
patience = 12
early_stopping = EarlyStopping(monitor='val_loss', patience=patience,
verbose=2, restore_best_weights=True)
history = self.model.fit(tr_x, tr_y, epochs=nb_epoch, batch_size=batch_size, verbose=2,
validation_data=(va_x, va_y), callbacks=[early_stopping])
else:
history = self.model.fit(tr_x, tr_y, nb_epoch=nb_epoch, batch_size=batch_size, verbose=2)
def predict(self, te_x):
te_x = self.scaler.fit_transform(te_x)
y_pred = self.model.predict(te_x)
return y_pred
def score(self, te_x, te_y):
y_pred = self.predict(te_x)
#print(classification_report(te_y, y_pred))
return f1_score(np.identity(5)[te_y], np.identity(5)[np.argmax(y_pred, axis=1)], average='samples')
def save_model(self, feature):
model_path = os.path.join(f'../model/model/{feature}', f'{self.run_fold_name}.h5')
scaler_path = os.path.join(f'../model/model/{feature}', f'{self.run_fold_name}-scaler.pkl')
os.makedirs(os.path.dirname(model_path), exist_ok=True)
self.model.save(model_path)
Util.dump(self.scaler, scaler_path)
def load_model(self, feature):
model_path = os.path.join(f'../model/model/{feature}', f'{self.run_fold_name}.h5')
scaler_path = os.path.join(f'../model/model/{feature}', f'{self.run_fold_name}-scaler.pkl')
self.model = load_model(model_path)
self.scaler = Util.load(scaler_path)
|
the-stack_0_16067 | from typing import List, Dict, Any, Optional
import logging
from pytrec_eval import RelevanceEvaluator
from haystack import MultiLabel, Label
from farm.evaluation.squad_evaluation import compute_f1 as calculate_f1_str
from farm.evaluation.squad_evaluation import compute_exact as calculate_em_str
logger = logging.getLogger(__name__)
SUM_PREFIX = 'sum_'
class EvalRewriter:
"""
TODO
This is a pipeline node that should be placed after a node that returns a `query` and `original_query`, e.g.
Rewriter, in order to assess its performance. Performance metrics are stored in this class and updated as each
sample passes through it. To view the results of the evaluation, call EvalRewriter.print().
"""
def __init__(self):
self.outgoing_edges = 1
self.init_counts()
def init_counts(self):
self.f1_micro = 0
self.query_count = 0
def run(self, **kwargs):
query = kwargs.get('query', None)
original_query = kwargs.get('original_query', None)
self.query_count += 1
if original_query is None or query is None:
raise KeyError(f'The previous component should provide both the `query` and `original_query`, but args '
f'given are: {kwargs}')
return {**kwargs}, "output_1"
class EvalTREC:
"""
This is a pipeline node that should be placed after a node that returns a List of Document, e.g., Retriever or
Ranker, in order to assess its performance. Performance metrics are stored in this class and updated as each
sample passes through it. To view the results of the evaluation, call EvalTREC.print().
"""
def __init__(self,
debug: bool = False,
top_k_eval_documents: int = 10,
metrics: set = None,
name="EvalTREC",
):
"""
@param metrics
Please provide which metrics to use. Please consult the trec_eval documentation
(https://github.com/usnistgov/trec_eval) for the available metrics.
:param debug:
When True, the results for each sample and its evaluation will be stored in self.log
:param top_k_eval_documents:
calculate eval metrics for top k results
"""
self.metrics = metrics if metrics else {'recall', 'ndcg', 'map', 'map_cut', 'recip_rank', 'ndcg_cut.1,3'}
self.outgoing_edges = 1
self.init_counts()
self.debug = debug
self.log: List = []
self.top_k_eval_documents = top_k_eval_documents
self.name = name
self.too_few_docs_warning = False
self.top_k_used = 0
def init_counts(self):
self.correct_retrieval_count = 0
self.query_count = 0
self.has_answer_count = 0
self.has_answer_correct = 0
self.has_answer_recall = 0
self.no_answer_count = 0
self.recall = 0.0
self.mean_reciprocal_rank = 0.0
self.has_answer_mean_reciprocal_rank = 0.0
self.reciprocal_rank_sum = 0.0
self.has_answer_reciprocal_rank_sum = 0.0
# For mean average precision
self.mean_average_precision = 0.0
self.average_precision_sum = 0.0
# Reset sum parameters
self.pytrec_eval_sums = {}
def run(self, documents, labels: dict, top_k_eval_documents: Optional[int] = None, **kwargs):
"""Run this node on one sample and its labels"""
self.query_count += 1
if not top_k_eval_documents:
top_k_eval_documents = self.top_k_eval_documents
if not self.top_k_used:
self.top_k_used = top_k_eval_documents
elif self.top_k_used != top_k_eval_documents:
logger.warning(f"EvalDocuments was last run with top_k_eval_documents={self.top_k_used} but is "
f"being run again with top_k_eval_documents={self.top_k_eval_documents}. "
f"The evaluation counter is being reset from this point so that the evaluation "
f"metrics are interpretable.")
self.init_counts()
if len(documents) < top_k_eval_documents and not self.too_few_docs_warning:
logger.warning(f"EvalDocuments is being provided less candidate documents than top_k_eval_documents "
f"(currently set to {top_k_eval_documents}).")
self.too_few_docs_warning = True
qrels = kwargs.get('qrels', None)
qrels = {k: int(rank) for k, rank in qrels.items()}
# The RelevanceEvaluator wants a dictionary with query id keys. What the ID is, is irrelevant. It is just
# used to retrieve the results.
query_id = 'q1'
evaluator = RelevanceEvaluator({query_id: qrels}, self.metrics)
# The run should have the format {query_id: {doc_id: rank_score}}
run = {query_id: {d.id: d.score for d in documents}}
pytrec_results = evaluator.evaluate(run)[query_id]
retrieved_reciprocal_rank = pytrec_results['recip_rank']
# TODO MAP computed by pytrec_eval differs from Haystack's self.average_precision_retrieved...
average_precision = pytrec_results['map']
for k, score in pytrec_results.items():
sum_key = f"{SUM_PREFIX}{k}"
if sum_key not in self.pytrec_eval_sums:
self.pytrec_eval_sums[sum_key] = 0
self.pytrec_eval_sums[sum_key] += score
self.reciprocal_rank_sum += retrieved_reciprocal_rank
self.average_precision_sum += average_precision
correct_retrieval = True if retrieved_reciprocal_rank > 0 else False
self.has_answer_count += 1
self.has_answer_correct += int(correct_retrieval)
self.has_answer_reciprocal_rank_sum += retrieved_reciprocal_rank
self.has_answer_recall = self.has_answer_correct / self.has_answer_count
self.has_answer_mean_reciprocal_rank = self.has_answer_reciprocal_rank_sum / self.has_answer_count
self.correct_retrieval_count += correct_retrieval
self.recall = self.correct_retrieval_count / self.query_count
self.mean_reciprocal_rank = self.reciprocal_rank_sum / self.query_count
self.mean_average_precision = self.average_precision_sum / self.query_count
self.top_k_used = top_k_eval_documents
return_dict = {"documents": documents,
"labels": labels,
"correct_retrieval": correct_retrieval,
"retrieved_reciprocal_rank": retrieved_reciprocal_rank,
"average_precision": average_precision,
"pytrec_eval_results": pytrec_results,
**kwargs}
if self.debug:
self.log.append(return_dict)
return return_dict, "output_1"
def print(self):
"""Print the evaluation results"""
print(self.name)
print("-----------------")
for key, sum_score in self.pytrec_eval_sums.items():
print(f"{key.replace(SUM_PREFIX, '')}: {(sum_score / self.query_count):.4f}")
class EvalDocuments:
"""
This is a pipeline node that should be placed after a node that returns a List of Document, e.g., Retriever or
Ranker, in order to assess its performance. Performance metrics are stored in this class and updated as each
sample passes through it. To view the results of the evaluation, call EvalDocuments.print(). Note that results
from this Node may differ from that when calling Retriever.eval() since that is a closed domain evaluation. Have
a look at our evaluation tutorial for more info about open vs closed domain eval (
https://haystack.deepset.ai/docs/latest/tutorial5md).
"""
def __init__(self,
debug: bool=False,
open_domain: bool=True,
top_k_eval_documents: int = 10,
name="EvalDocuments",
):
"""
:param open_domain: When True, a document is considered correctly retrieved so long as the answer string can be found within it.
When False, correct retrieval is evaluated based on document_id.
:param debug: When True, a record of each sample and its evaluation will be stored in EvalDocuments.log
:param top_k: calculate eval metrics for top k results, e.g., recall@k
"""
self.outgoing_edges = 1
self.init_counts()
self.no_answer_warning = False
self.debug = debug
self.log: List = []
self.open_domain = open_domain
self.top_k_eval_documents = top_k_eval_documents
self.name = name
self.too_few_docs_warning = False
self.top_k_used = 0
def init_counts(self):
self.correct_retrieval_count = 0
self.query_count = 0
self.has_answer_count = 0
self.has_answer_correct = 0
self.has_answer_recall = 0
self.no_answer_count = 0
self.recall = 0.0
self.mean_reciprocal_rank = 0.0
self.has_answer_mean_reciprocal_rank = 0.0
self.reciprocal_rank_sum = 0.0
self.has_answer_reciprocal_rank_sum = 0.0
# For mean average precision
self.mean_average_precision = 0.0
self.average_precision_sum = 0.0
def run(self, documents, labels: dict, top_k_eval_documents: Optional[int]=None, **kwargs):
"""Run this node on one sample and its labels"""
self.query_count += 1
if not top_k_eval_documents:
top_k_eval_documents = self.top_k_eval_documents
if not self.top_k_used:
self.top_k_used = top_k_eval_documents
elif self.top_k_used != top_k_eval_documents:
logger.warning(f"EvalDocuments was last run with top_k_eval_documents={self.top_k_used} but is "
f"being run again with top_k_eval_documents={self.top_k_eval_documents}. "
f"The evaluation counter is being reset from this point so that the evaluation "
f"metrics are interpretable.")
self.init_counts()
if len(documents) < top_k_eval_documents and not self.too_few_docs_warning:
logger.warning(f"EvalDocuments is being provided less candidate documents than top_k_eval_documents "
f"(currently set to {top_k_eval_documents}).")
self.too_few_docs_warning = True
# TODO retriever_labels is currently a Multilabel object but should eventually be a RetrieverLabel object
retriever_labels = get_label(labels, kwargs["node_id"])
# Haystack native way: If there are answer span annotations in the labels
if retriever_labels.no_answer: # If this sample is impossible to answer and expects a no_answer response
self.no_answer_count += 1
correct_retrieval = 1
retrieved_reciprocal_rank = 1
self.reciprocal_rank_sum += 1
average_precision = 1
self.average_precision_sum += average_precision
if not self.no_answer_warning:
self.no_answer_warning = True
logger.warning("There seem to be empty string labels in the dataset suggesting that there "
"are samples with is_impossible=True. "
"Retrieval of these samples is always treated as correct.")
else:
self.has_answer_count += 1
retrieved_reciprocal_rank = self.reciprocal_rank_retrieved(retriever_labels, documents,
top_k_eval_documents)
self.reciprocal_rank_sum += retrieved_reciprocal_rank
correct_retrieval = True if retrieved_reciprocal_rank > 0 else False
self.has_answer_correct += int(correct_retrieval)
self.has_answer_reciprocal_rank_sum += retrieved_reciprocal_rank
self.has_answer_recall = self.has_answer_correct / self.has_answer_count
self.has_answer_mean_reciprocal_rank = self.has_answer_reciprocal_rank_sum / self.has_answer_count
# For computing MAP
average_precision = self.average_precision_retrieved(retriever_labels, documents, top_k_eval_documents)
self.average_precision_sum += average_precision
self.correct_retrieval_count += correct_retrieval
self.recall = self.correct_retrieval_count / self.query_count
self.mean_reciprocal_rank = self.reciprocal_rank_sum / self.query_count
self.mean_average_precision = self.average_precision_sum / self.query_count
self.top_k_used = top_k_eval_documents
return_dict = {"documents": documents,
"labels": labels,
"correct_retrieval": correct_retrieval,
"retrieved_reciprocal_rank": retrieved_reciprocal_rank,
"average_precision": average_precision,
**kwargs}
if self.debug:
self.log.append(return_dict)
return return_dict, "output_1"
def is_correctly_retrieved(self, retriever_labels, predictions):
return self.reciprocal_rank_retrieved(retriever_labels, predictions) > 0
def reciprocal_rank_retrieved(self, retriever_labels, predictions, top_k_eval_documents):
if self.open_domain:
for label in retriever_labels.multiple_answers:
for rank, p in enumerate(predictions[:top_k_eval_documents]):
if label.lower() in p.text.lower():
return 1/(rank+1)
return False
else:
prediction_ids = [p.id for p in predictions[:top_k_eval_documents]]
label_ids = retriever_labels.multiple_document_ids
for rank, p in enumerate(prediction_ids):
if p in label_ids:
return 1/(rank+1)
return 0
def average_precision_retrieved(self, retriever_labels, predictions, top_k_eval_documents):
prediction_ids = [p.id for p in predictions[:top_k_eval_documents]]
label_ids = set(retriever_labels.multiple_document_ids)
correct = 0
total = 0
for rank, p in enumerate(prediction_ids):
if p in label_ids:
correct += 1
total += correct / (rank + 1)
return total / correct if correct > 0 else 0
def print(self):
"""Print the evaluation results"""
print(self.name)
print("-----------------")
if self.no_answer_count:
print(
f"has_answer recall@{self.top_k_used}: {self.has_answer_recall:.4f} ({self.has_answer_correct}/{self.has_answer_count})")
print(
f"no_answer recall@{self.top_k_used}: 1.00 ({self.no_answer_count}/{self.no_answer_count}) (no_answer samples are always treated as correctly retrieved)")
print(
f"has_answer mean_reciprocal_rank@{self.top_k_used}: {self.has_answer_mean_reciprocal_rank:.4f}")
print(
f"no_answer mean_reciprocal_rank@{self.top_k_used}: 1.0000 (no_answer samples are always treated as correctly retrieved at rank 1)")
print(f"recall@{self.top_k_used}: {self.recall:.4f} ({self.correct_retrieval_count} / {self.query_count})")
print(f"mean_reciprocal_rank@{self.top_k_used}: {self.mean_reciprocal_rank:.4f}")
print(f"mean_average_precision@{self.top_k_used}: {self.mean_average_precision:.4f}")
class EvalAnswers:
"""
This is a pipeline node that should be placed after a Reader in order to assess the performance of the Reader
individually or to assess the extractive QA performance of the whole pipeline. Performance metrics are stored in
this class and updated as each sample passes through it. To view the results of the evaluation, call EvalAnswers.print().
Note that results from this Node may differ from that when calling Reader.eval()
since that is a closed domain evaluation. Have a look at our evaluation tutorial for more info about
open vs closed domain eval (https://haystack.deepset.ai/docs/latest/tutorial5md).
"""
def __init__(self, skip_incorrect_retrieval: bool=True, open_domain: bool=True, debug: bool=False):
"""
:param skip_incorrect_retrieval: When set to True, this eval will ignore the cases where the retriever returned no correct documents
:param open_domain: When True, extracted answers are evaluated purely on string similarity rather than the position of the extracted answer
:param debug: When True, a record of each sample and its evaluation will be stored in EvalAnswers.log
"""
self.outgoing_edges = 1
self.init_counts()
self.log: List = []
self.debug = debug
self.skip_incorrect_retrieval = skip_incorrect_retrieval
self.open_domain = open_domain
def init_counts(self):
self.query_count = 0
self.correct_retrieval_count = 0
self.no_answer_count = 0
self.has_answer_count = 0
self.top_1_no_answer_count = 0
self.top_1_em_count = 0
self.top_k_em_count = 0
self.top_1_f1_sum = 0
self.top_k_f1_sum = 0
self.top_1_no_answer = 0
self.top_1_em = 0.0
self.top_k_em = 0.0
self.top_1_f1 = 0.0
self.top_k_f1 = 0.0
def run(self, labels, answers, **kwargs):
"""Run this node on one sample and its labels"""
self.query_count += 1
predictions = answers
skip = self.skip_incorrect_retrieval and not kwargs.get("correct_retrieval")
if predictions and not skip:
self.correct_retrieval_count += 1
multi_labels = get_label(labels, kwargs["node_id"])
# If this sample is impossible to answer and expects a no_answer response
if multi_labels.no_answer:
self.no_answer_count += 1
if predictions[0]["answer"] is None:
self.top_1_no_answer_count += 1
if self.debug:
self.log.append({"predictions": predictions,
"gold_labels": multi_labels,
"top_1_no_answer": int(predictions[0] == ""),
})
self.update_no_answer_metrics()
# If there are answer span annotations in the labels
else:
self.has_answer_count += 1
predictions = [p for p in predictions if p["answer"]]
top_1_em, top_1_f1, top_k_em, top_k_f1 = self.evaluate_extraction(multi_labels, predictions)
if self.debug:
self.log.append({"predictions": predictions,
"gold_labels": multi_labels,
"top_k_f1": top_k_f1,
"top_k_em": top_k_em
})
self.top_1_em_count += top_1_em
self.top_1_f1_sum += top_1_f1
self.top_k_em_count += top_k_em
self.top_k_f1_sum += top_k_f1
self.update_has_answer_metrics()
return {**kwargs}, "output_1"
def evaluate_extraction(self, gold_labels, predictions):
if self.open_domain:
gold_labels_list = gold_labels.multiple_answers
predictions_str = [p["answer"] for p in predictions]
top_1_em = calculate_em_str_multi(gold_labels_list, predictions_str[0])
top_1_f1 = calculate_f1_str_multi(gold_labels_list, predictions_str[0])
top_k_em = max([calculate_em_str_multi(gold_labels_list, p) for p in predictions_str])
top_k_f1 = max([calculate_f1_str_multi(gold_labels_list, p) for p in predictions_str])
else:
logger.error("Closed Domain Reader Evaluation not yet implemented")
return 0,0,0,0
return top_1_em, top_1_f1, top_k_em, top_k_f1
def update_has_answer_metrics(self):
self.top_1_em = self.top_1_em_count / self.has_answer_count
self.top_k_em = self.top_k_em_count / self.has_answer_count
self.top_1_f1 = self.top_1_f1_sum / self.has_answer_count
self.top_k_f1 = self.top_k_f1_sum / self.has_answer_count
def update_no_answer_metrics(self):
self.top_1_no_answer = self.top_1_no_answer_count / self.no_answer_count
def print(self, mode):
"""Print the evaluation results"""
if mode == "reader":
print("Reader")
print("-----------------")
# print(f"answer in retrieved docs: {correct_retrieval}")
print(f"has answer queries: {self.has_answer_count}")
print(f"top 1 EM: {self.top_1_em:.4f}")
print(f"top k EM: {self.top_k_em:.4f}")
print(f"top 1 F1: {self.top_1_f1:.4f}")
print(f"top k F1: {self.top_k_f1:.4f}")
if self.no_answer_count:
print()
print(f"no_answer queries: {self.no_answer_count}")
print(f"top 1 no_answer accuracy: {self.top_1_no_answer:.4f}")
elif mode == "pipeline":
print("Pipeline")
print("-----------------")
pipeline_top_1_em = (self.top_1_em_count + self.top_1_no_answer_count) / self.query_count
pipeline_top_k_em = (self.top_k_em_count + self.no_answer_count) / self.query_count
pipeline_top_1_f1 = (self.top_1_f1_sum + self.top_1_no_answer_count) / self.query_count
pipeline_top_k_f1 = (self.top_k_f1_sum + self.no_answer_count) / self.query_count
print(f"queries: {self.query_count}")
print(f"top 1 EM: {pipeline_top_1_em:.4f}")
print(f"top k EM: {pipeline_top_k_em:.4f}")
print(f"top 1 F1: {pipeline_top_1_f1:.4f}")
print(f"top k F1: {pipeline_top_k_f1:.4f}")
if self.no_answer_count:
print(
"(top k results are likely inflated since the Reader always returns a no_answer prediction in its top k)"
)
def get_label(labels, node_id):
if type(labels) in [Label, MultiLabel]:
ret = labels
# If labels is a dict, then fetch the value using node_id (e.g. "EvalRetriever") as the key
else:
ret = labels[node_id]
return ret
def calculate_em_str_multi(gold_labels, prediction):
for gold_label in gold_labels:
result = calculate_em_str(gold_label, prediction)
if result == 1.0:
return 1.0
return 0.0
def calculate_f1_str_multi(gold_labels, prediction):
results = []
for gold_label in gold_labels:
result = calculate_f1_str(gold_label, prediction)
results.append(result)
return max(results)
def calculate_reader_metrics(metric_counts: Dict[str, float], correct_retrievals: int):
number_of_has_answer = correct_retrievals - metric_counts["number_of_no_answer"]
metrics = {
"reader_top1_accuracy" : metric_counts["correct_readings_top1"] / correct_retrievals,
"reader_top1_accuracy_has_answer" : metric_counts["correct_readings_top1_has_answer"] / number_of_has_answer,
"reader_topk_accuracy" : metric_counts["correct_readings_topk"] / correct_retrievals,
"reader_topk_accuracy_has_answer" : metric_counts["correct_readings_topk_has_answer"] / number_of_has_answer,
"reader_top1_em" : metric_counts["exact_matches_top1"] / correct_retrievals,
"reader_top1_em_has_answer" : metric_counts["exact_matches_top1_has_answer"] / number_of_has_answer,
"reader_topk_em" : metric_counts["exact_matches_topk"] / correct_retrievals,
"reader_topk_em_has_answer" : metric_counts["exact_matches_topk_has_answer"] / number_of_has_answer,
"reader_top1_f1" : metric_counts["summed_f1_top1"] / correct_retrievals,
"reader_top1_f1_has_answer" : metric_counts["summed_f1_top1_has_answer"] / number_of_has_answer,
"reader_topk_f1" : metric_counts["summed_f1_topk"] / correct_retrievals,
"reader_topk_f1_has_answer" : metric_counts["summed_f1_topk_has_answer"] / number_of_has_answer,
}
if metric_counts["number_of_no_answer"]:
metrics["reader_top1_no_answer_accuracy"] = metric_counts["correct_no_answers_top1"] / metric_counts[
"number_of_no_answer"]
metrics["reader_topk_no_answer_accuracy"] = metric_counts["correct_no_answers_topk"] / metric_counts[
"number_of_no_answer"]
else:
metrics["reader_top1_no_answer_accuracy"] = None # type: ignore
metrics["reader_topk_no_answer_accuracy"] = None # type: ignore
return metrics
def calculate_average_precision_and_reciprocal_rank(questions_with_docs: List[dict]):
questions_with_correct_doc = []
summed_avg_precision_retriever = 0.0
summed_reciprocal_rank_retriever = 0.0
for question in questions_with_docs:
number_relevant_docs = len(set(question["question"].multiple_document_ids))
found_relevant_doc = False
relevant_docs_found = 0
current_avg_precision = 0.0
for doc_idx, doc in enumerate(question["docs"]):
# check if correct doc among retrieved docs
if doc.id in question["question"].multiple_document_ids:
if not found_relevant_doc:
summed_reciprocal_rank_retriever += 1 / (doc_idx + 1)
relevant_docs_found += 1
found_relevant_doc = True
current_avg_precision += relevant_docs_found / (doc_idx + 1)
if relevant_docs_found == number_relevant_docs:
break
if found_relevant_doc:
all_relevant_docs = len(set(question["question"].multiple_document_ids))
summed_avg_precision_retriever += current_avg_precision / all_relevant_docs
if found_relevant_doc:
questions_with_correct_doc.append({
"question": question["question"],
"docs": question["docs"]
})
return questions_with_correct_doc, summed_avg_precision_retriever, summed_reciprocal_rank_retriever
def eval_counts_reader(question: MultiLabel, predicted_answers: Dict[str, Any], metric_counts: Dict[str, float]):
# Calculates evaluation metrics for one question and adds results to counter.
# check if question is answerable
if not question.no_answer:
found_answer = False
found_em = False
best_f1 = 0
for answer_idx, answer in enumerate(predicted_answers["answers"]):
if answer["document_id"] in question.multiple_document_ids:
gold_spans = [{"offset_start": question.multiple_offset_start_in_docs[i],
"offset_end": question.multiple_offset_start_in_docs[i] + len(question.multiple_answers[i]),
"doc_id": question.multiple_document_ids[i]} for i in range(len(question.multiple_answers))] # type: ignore
predicted_span = {"offset_start": answer["offset_start_in_doc"],
"offset_end": answer["offset_end_in_doc"],
"doc_id": answer["document_id"]}
best_f1_in_gold_spans = 0
for gold_span in gold_spans:
if gold_span["doc_id"] == predicted_span["doc_id"]:
# check if overlap between gold answer and predicted answer
if not found_answer:
metric_counts, found_answer = _count_overlap(gold_span, predicted_span, metric_counts, answer_idx) # type: ignore
# check for exact match
if not found_em:
metric_counts, found_em = _count_exact_match(gold_span, predicted_span, metric_counts, answer_idx) # type: ignore
# calculate f1
current_f1 = _calculate_f1(gold_span, predicted_span) # type: ignore
if current_f1 > best_f1_in_gold_spans:
best_f1_in_gold_spans = current_f1
# top-1 f1
if answer_idx == 0:
metric_counts["summed_f1_top1"] += best_f1_in_gold_spans
metric_counts["summed_f1_top1_has_answer"] += best_f1_in_gold_spans
if best_f1_in_gold_spans > best_f1:
best_f1 = best_f1_in_gold_spans
if found_em:
break
# top-k answers: use best f1-score
metric_counts["summed_f1_topk"] += best_f1
metric_counts["summed_f1_topk_has_answer"] += best_f1
# question not answerable
else:
metric_counts["number_of_no_answer"] += 1
metric_counts = _count_no_answer(predicted_answers["answers"], metric_counts)
return metric_counts
def eval_counts_reader_batch(pred: Dict[str, Any], metric_counts: Dict[str, float]):
# Calculates evaluation metrics for one question and adds results to counter.
# check if question is answerable
if not pred["label"].no_answer:
found_answer = False
found_em = False
best_f1 = 0
for answer_idx, answer in enumerate(pred["answers"]):
# check if correct document:
if answer["document_id"] in pred["label"].multiple_document_ids:
gold_spans = [{"offset_start": pred["label"].multiple_offset_start_in_docs[i],
"offset_end": pred["label"].multiple_offset_start_in_docs[i] + len(pred["label"].multiple_answers[i]),
"doc_id": pred["label"].multiple_document_ids[i]}
for i in range(len(pred["label"].multiple_answers))] # type: ignore
predicted_span = {"offset_start": answer["offset_start_in_doc"],
"offset_end": answer["offset_end_in_doc"],
"doc_id": answer["document_id"]}
best_f1_in_gold_spans = 0
for gold_span in gold_spans:
if gold_span["doc_id"] == predicted_span["doc_id"]:
# check if overlap between gold answer and predicted answer
if not found_answer:
metric_counts, found_answer = _count_overlap(
gold_span, predicted_span, metric_counts, answer_idx
)
# check for exact match
if not found_em:
metric_counts, found_em = _count_exact_match(
gold_span, predicted_span, metric_counts, answer_idx
)
# calculate f1
current_f1 = _calculate_f1(gold_span, predicted_span)
if current_f1 > best_f1_in_gold_spans:
best_f1_in_gold_spans = current_f1
# top-1 f1
if answer_idx == 0:
metric_counts["summed_f1_top1"] += best_f1_in_gold_spans
metric_counts["summed_f1_top1_has_answer"] += best_f1_in_gold_spans
if best_f1_in_gold_spans > best_f1:
best_f1 = best_f1_in_gold_spans
if found_em:
break
# top-k answers: use best f1-score
metric_counts["summed_f1_topk"] += best_f1
metric_counts["summed_f1_topk_has_answer"] += best_f1
# question not answerable
else:
metric_counts["number_of_no_answer"] += 1
metric_counts = _count_no_answer(pred["answers"], metric_counts)
return metric_counts
def _count_overlap(
gold_span: Dict[str, Any],
predicted_span: Dict[str, Any],
metric_counts: Dict[str, float],
answer_idx: int
):
# Checks if overlap between prediction and real answer.
found_answer = False
if (gold_span["offset_start"] <= predicted_span["offset_end"]) and \
(predicted_span["offset_start"] <= gold_span["offset_end"]):
# top-1 answer
if answer_idx == 0:
metric_counts["correct_readings_top1"] += 1
metric_counts["correct_readings_top1_has_answer"] += 1
# top-k answers
metric_counts["correct_readings_topk"] += 1
metric_counts["correct_readings_topk_has_answer"] += 1
found_answer = True
return metric_counts, found_answer
def _count_exact_match(
gold_span: Dict[str, Any],
predicted_span: Dict[str, Any],
metric_counts: Dict[str, float],
answer_idx: int
):
# Check if exact match between prediction and real answer.
# As evaluation needs to be framework independent, we cannot use the farm.evaluation.metrics.py functions.
found_em = False
if (gold_span["offset_start"] == predicted_span["offset_start"]) and \
(gold_span["offset_end"] == predicted_span["offset_end"]):
if metric_counts:
# top-1 answer
if answer_idx == 0:
metric_counts["exact_matches_top1"] += 1
metric_counts["exact_matches_top1_has_answer"] += 1
# top-k answers
metric_counts["exact_matches_topk"] += 1
metric_counts["exact_matches_topk_has_answer"] += 1
found_em = True
return metric_counts, found_em
def _calculate_f1(gold_span: Dict[str, Any], predicted_span: Dict[str, Any]):
# Calculates F1-Score for prediction based on real answer using character offsets.
# As evaluation needs to be framework independent, we cannot use the farm.evaluation.metrics.py functions.
pred_indices = list(range(predicted_span["offset_start"], predicted_span["offset_end"]))
gold_indices = list(range(gold_span["offset_start"], gold_span["offset_end"]))
n_overlap = len([x for x in pred_indices if x in gold_indices])
if pred_indices and gold_indices and n_overlap:
precision = n_overlap / len(pred_indices)
recall = n_overlap / len(gold_indices)
f1 = (2 * precision * recall) / (precision + recall)
return f1
else:
return 0
def _count_no_answer(answers: List[dict], metric_counts: Dict[str, float]):
# Checks if one of the answers is 'no answer'.
for answer_idx, answer in enumerate(answers):
# check if 'no answer'
if answer["answer"] is None:
# top-1 answer
if answer_idx == 0:
metric_counts["correct_no_answers_top1"] += 1
metric_counts["correct_readings_top1"] += 1
metric_counts["exact_matches_top1"] += 1
metric_counts["summed_f1_top1"] += 1
# top-k answers
metric_counts["correct_no_answers_topk"] += 1
metric_counts["correct_readings_topk"] += 1
metric_counts["exact_matches_topk"] += 1
metric_counts["summed_f1_topk"] += 1
break
return metric_counts
|
the-stack_0_16068 | # Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Validates V2 proto messages.
Internally, this module is a bit magical. It keeps a stack of fields currently
being validated per thread. It is used to construct a path to an invalid field
value.
"""
import contextlib
import logging
import re
import threading
from components import cipd
from go.chromium.org.luci.buildbucket.proto import common_pb2
import buildtags
import config
import errors
import model
class Error(Exception):
"""Raised on validation errors."""
PUBSUB_USER_DATA_MAX_LENGTH = 4096
# Maximum size of Build.summary_markdown field. Defined in build.proto.
MAX_BUILD_SUMMARY_MARKDOWN_SIZE = 4000 # 4 KB
# swarming.py and api.py reserve these properties.
RESERVED_PROPERTY_PATHS = [
# Reserved for buildbucket internals.
['buildbucket'],
['$recipe_engine/buildbucket'],
# Deprecated in favor of api.buildbucket.builder.builder,
# https://chromium.googlesource.com/infra/luci/recipes-py/+/master/recipe_modules/buildbucket/api.py
# Prohibited.
['buildername'],
# Deprecated in favor of api.buildbucket.build_input.gitiles_commit,
# https://chromium.googlesource.com/infra/luci/recipes-py/+/master/recipe_modules/buildbucket/api.py
# Prohibited.
['branch'],
['repository'],
# Set to const true.
['$recipe_engine/runtime', 'is_luci'],
# Populated from Build.input.experimental.
['$recipe_engine/runtime', 'is_experimental'],
]
# Statuses with start time required.
START_TIME_REQUIRED_STATUSES = (
common_pb2.STARTED,
common_pb2.SUCCESS,
common_pb2.FAILURE,
)
# Step statuses, listed from best to worst and if applicable. See
# https://chromium.googlesource.com/infra/luci/luci-go/+/dffd1081b775979aa1c5a8046d9a65adead1cee8/buildbucket/proto/step.proto#75
STATUS_PRECEDENCE = (
common_pb2.SUCCESS, # best
common_pb2.FAILURE,
common_pb2.INFRA_FAILURE,
common_pb2.CANCELED, # worst
)
# Character separating parent from children steps.
STEP_SEP = '|'
################################################################################
# Validation of common.proto messages.
# The order of functions must match the order of messages in common.proto.
def validate_gerrit_change(change, require_project=False):
"""Validates common_pb2.GerritChange."""
# project is not required.
_check_truth(change, 'host', 'change', 'patchset')
if require_project and not change.project: # pragma: no branch
# TODO(nodir): escalate to an error.
logging.warning('gerrit_change.project is not specified')
def validate_gitiles_commit(commit, require_ref=True):
"""Validates common_pb2.GitilesCommit."""
_check_truth(commit, 'host', 'project')
if require_ref:
_check_truth(commit, 'ref')
if commit.ref:
if not commit.ref.startswith('refs/'):
_enter_err('ref', 'must start with "refs/"')
else:
if not commit.id:
_err('id or ref is required')
if commit.position:
_enter_err('position', 'requires ref')
if commit.id:
with _enter('id'):
_validate_hex_sha1(commit.id)
def validate_tags(string_pairs, mode):
"""Validates a list of common.StringPair tags.
For mode, see buildtags.validate_tags docstring.
"""
for p in string_pairs:
if ':' in p.key:
_err('tag key "%s" cannot have a colon', p.key)
with _handle_invalid_input_error():
tags = ['%s:%s' % (p.key, p.value) for p in string_pairs]
buildtags.validate_tags(tags, mode)
################################################################################
# Validation of build.proto messages.
# The order of functions must match the order of messages in common.proto.
def validate_builder_id(builder_id, require_bucket=True, require_builder=True):
"""Validates build_pb2.BuilderID."""
assert require_bucket or not require_builder
_check_truth(builder_id, 'project')
if require_bucket:
_check_truth(builder_id, 'bucket')
if require_builder:
_check_truth(builder_id, 'builder')
with _enter('project'), _handle_invalid_input_error():
config.validate_project_id(builder_id.project)
with _enter('bucket'), _handle_invalid_input_error():
if builder_id.bucket:
config.validate_bucket_name(builder_id.bucket)
parts = builder_id.bucket.split('.')
if len(parts) >= 3 and parts[0] == 'luci':
_err(
'invalid usage of v1 bucket format in v2 API; use %r instead',
parts[2]
)
elif builder_id.builder:
_err('required by .builder field')
with _enter('builder'), _handle_invalid_input_error():
if builder_id.builder:
errors.validate_builder_name(builder_id.builder)
################################################################################
# Validation of rpc.proto messages.
# The order of functions must match the order of messages in rpc.proto.
def validate_get_build_request(req):
"""Validates rpc_pb2.GetBuildRequest."""
if req.id:
if req.HasField('builder') or req.build_number:
_err('id is mutually exclusive with builder and build_number')
elif req.HasField('builder') and req.build_number:
validate_builder_id(req.builder)
else:
_err('id or (builder and build_number) are required')
def validate_search_builds_request(req):
"""Validates rpc_pb2.SearchBuildRequest."""
with _enter('predicate'):
validate_build_predicate(req.predicate)
_validate_paged_request(req)
def validate_requested_dimension(dim):
"""Validates common_pb2.RequestedDimension."""
_check_truth(dim, 'key', 'value')
with _enter('key'):
if dim.key == 'caches':
_err('"caches" is invalid; define caches instead')
if dim.key == 'pool':
_err('"pool" is not allowed')
with _enter('expiration'):
with _enter('seconds'):
if dim.expiration.seconds < 0:
_err('must not be negative')
if dim.expiration.seconds % 60 != 0:
_err('must be a multiple of 60')
if dim.expiration.nanos:
_enter_err('nanos', 'must be 0')
def validate_schedule_build_request(req, legacy=False):
if '/' in req.request_id: # pragma: no cover
_enter_err('request_id', 'must not contain /')
if not req.HasField('builder') and not req.template_build_id:
_err('builder or template_build_id is required')
if req.HasField('builder'):
with _enter('builder'):
validate_builder_id(req.builder, require_builder=not legacy)
with _enter('exe'):
_check_falsehood(req.exe, 'cipd_package')
if req.exe.cipd_version:
with _enter('cipd_version'):
_validate_cipd_version(req.exe.cipd_version)
with _enter('properties'):
validate_struct(req.properties)
if not legacy: # pragma: no branch
for path in RESERVED_PROPERTY_PATHS:
if _struct_has_path(req.properties, path):
_err('property path %r is reserved', path)
if req.HasField('gitiles_commit'):
with _enter('gitiles_commit'):
validate_gitiles_commit(req.gitiles_commit)
_check_repeated(
req,
'gerrit_changes',
lambda c: validate_gerrit_change(c, require_project=not legacy),
)
with _enter('tags'):
validate_tags(req.tags, 'new')
_check_repeated(req, 'dimensions', validate_requested_dimension)
key_exp = set()
with _enter('dimensions'):
for d in req.dimensions:
t = (d.key, d.expiration.seconds)
if t in key_exp:
_err(
'key "%s" and expiration %ds are not unique', d.key,
d.expiration.seconds
)
key_exp.add(t)
if req.priority < 0 or req.priority > 255:
_enter_err('priority', 'must be in [0, 255]')
if req.HasField('notify'): # pragma: no branch
with _enter('notify'):
validate_notification_config(req.notify)
def validate_cancel_build_request(req):
_check_truth(req, 'id', 'summary_markdown')
with _enter('summary_markdown'):
validate_build_summary_markdown(req.summary_markdown)
def validate_struct(struct):
for name, value in struct.fields.iteritems():
if not value.WhichOneof('kind'):
_enter_err(name, 'value is not set; for null, initialize null_value')
def validate_notification_config(notify):
_check_truth(notify, 'pubsub_topic')
if len(notify.user_data) > PUBSUB_USER_DATA_MAX_LENGTH:
_enter_err('user_data', 'must be <= %d bytes', PUBSUB_USER_DATA_MAX_LENGTH)
# Set of UpdateBuildRequest field paths updatable via UpdateBuild RPC.
UPDATE_BUILD_FIELD_PATHS = {
'build.status',
'build.status_details',
'build.summary_markdown',
'build.steps',
'build.output',
'build.output.properties',
'build.output.gitiles_commit',
'build.tags',
}
# Set of valid build statuses supported by UpdateBuild RPC.
UPDATE_BUILD_STATUSES = {
common_pb2.STARTED,
# kitchen does not actually use SUCCESS. It relies on swarming pubsub
# handler in Buildbucket because a task may fail after recipe succeeded.
common_pb2.SUCCESS,
common_pb2.FAILURE,
common_pb2.INFRA_FAILURE,
}
def validate_update_build_request(req, make_build_steps_func=None):
"""Validates rpc_pb2.UpdateBuildRequest.
If make_build_steps_func is given, it will be called at the end to validate
the size of the its serialized representation. This allows the callee to save
the BuildStep locally and thus avoid re-doing the work later.
"""
update_paths = set(req.update_mask.paths)
with _enter('update_mask', 'paths'):
unsupported = update_paths - UPDATE_BUILD_FIELD_PATHS
if unsupported:
_err('unsupported path(s) %r', sorted(unsupported))
# Check build values, if present in the mask.
with _enter('build'):
_check_truth(req.build, 'id')
if 'build.status' in update_paths:
if req.build.status not in UPDATE_BUILD_STATUSES:
_enter_err(
'status', 'invalid status %s for UpdateBuild',
common_pb2.Status.Name(req.build.status)
)
if 'build.output.gitiles_commit' in update_paths:
with _enter('output', 'gitiles_commit'):
validate_gitiles_commit(req.build.output.gitiles_commit)
if 'build.summary_markdown' in update_paths:
with _enter('summary_markdown'):
validate_build_summary_markdown(req.build.summary_markdown)
if 'build.output.properties' in update_paths:
with _enter('output', 'properties'):
validate_struct(req.build.output.properties)
if 'build.tags' in update_paths:
with _enter('tags'):
validate_tags(req.build.tags, 'append')
if 'build.steps' in update_paths: # pragma: no branch
with _enter('steps'):
build_steps = (
make_build_steps_func()
if make_build_steps_func else model.BuildSteps.make(req.build)
)
limit = model.BuildSteps.MAX_STEPS_LEN
if len(build_steps.step_container_bytes) > limit:
_err('too big to accept')
validate_steps(req.build.steps)
def validate_build_summary_markdown(summary_markdown):
size = len(summary_markdown)
limit = MAX_BUILD_SUMMARY_MARKDOWN_SIZE
if size > limit:
_err('too big to accept (%d > %d bytes)', size, limit)
def validate_steps(steps):
seen_steps = dict()
for i, s in enumerate(steps):
with _enter('step[%d]' % i):
validate_step(s, seen_steps)
def validate_step(step, steps):
"""Validates build's step, internally and relative to (previous) steps."""
_check_truth(step, 'name')
if step.name in steps:
_enter_err('name', 'duplicate: %r', step.name)
validate_internal_timing_consistency(step)
log_names = set()
_check_repeated(step, 'logs', lambda log: validate_log(log, log_names))
name_path = step.name.split(STEP_SEP)
parent_name = STEP_SEP.join(name_path[:-1])
if parent_name:
if parent_name not in steps:
_err('parent to %r must precede', step.name)
parent = steps[parent_name]
validate_status_consistency(step, parent)
validate_timing_consistency(step, parent)
steps[step.name] = step
def validate_internal_timing_consistency(step):
"""Validates internal timing consistency of a step."""
if (step.status not in common_pb2.Status.values() or
step.status == common_pb2.STATUS_UNSPECIFIED):
_err('must have buildbucket.v2.Status that is not STATUS_UNSPECIFIED')
if step.status in START_TIME_REQUIRED_STATUSES and not step.HasField(
'start_time'):
_enter_err(
'start_time', 'required by status %s',
common_pb2.Status.Name(step.status)
)
elif step.status < common_pb2.STARTED and step.HasField('start_time'):
_enter_err(
'start_time', 'invalid for status %s',
common_pb2.Status.Name(step.status)
)
if bool(step.status & common_pb2.ENDED_MASK) ^ step.HasField('end_time'):
_err('must have both or neither end_time and a terminal status')
if (step.HasField('end_time') and
step.start_time.ToDatetime() > step.end_time.ToDatetime()):
_err('start_time after end_time')
def validate_status_consistency(child, parent):
"""Validates inter-step status consistency."""
c, p = child.status, parent.status
c_name, p_name = common_pb2.Status.Name(c), common_pb2.Status.Name(p)
if p == common_pb2.SCHEDULED:
_enter_err('status', 'parent %r must be at least STARTED', parent.name)
if not bool(c & common_pb2.ENDED_MASK) and p != common_pb2.STARTED:
_enter_err(
'status', 'non-terminal (%s) %r must have STARTED parent %r (%s)',
c_name, child.name, parent.name, p_name
)
if (p in STATUS_PRECEDENCE and c in STATUS_PRECEDENCE and
STATUS_PRECEDENCE.index(p) < STATUS_PRECEDENCE.index(c)):
_enter_err(
'status', '%r\'s status %s is worse than parent %r\'s status %s',
child.name, c_name, parent.name, p_name
)
def validate_timing_consistency(child, parent):
"""Validates inter-step timing consistency."""
parent_start = parent.start_time.ToDatetime(
) if parent.HasField('start_time') else None
parent_end = parent.end_time.ToDatetime(
) if parent.HasField('end_time') else None
if child.HasField('start_time'):
child_start = child.start_time.ToDatetime()
with _enter('start_time'):
if parent_start and parent_start > child_start:
_err('cannot precede parent %r\'s start time', parent.name)
if parent_end and parent_end < child_start:
_err('cannot follow parent %r\'s end time', parent.name)
if child.HasField('end_time'):
child_end = child.end_time.ToDatetime()
with _enter('end_time'):
if parent_start and parent_start > child_end:
_err('cannot precede parent %r\'s start time', parent.name)
if parent_end and parent_end < child_end:
_err('cannot follow parent %r\'s end time', parent.name)
def validate_log(log, names):
"""Validates a log within a build step; checks uniqueness against names param.
"""
_check_truth(log, 'name', 'url', 'view_url')
if log.name in names:
_enter_err('name', 'duplicate: %r', log.name)
names.add(log.name)
def validate_build_predicate(predicate):
"""Validates rpc_pb2.BuildPredicate."""
if predicate.HasField('builder'):
with _enter('builder'):
validate_builder_id(
predicate.builder, require_bucket=False, require_builder=False
)
_check_repeated(predicate, 'gerrit_changes', validate_gerrit_change)
if predicate.HasField('output_gitiles_commit'):
with _enter('output_gitiles_commit'):
_validate_predicate_output_gitiles_commit(predicate.output_gitiles_commit)
if predicate.HasField('create_time') and predicate.HasField('build'):
_err('create_time and build are mutually exclusive')
with _enter('tags'):
validate_tags(predicate.tags, 'search')
# List of supported BuildPredicate.output_gitiles_commit field sets.
# It is more restrictied than the generic validate_gitiles_commit because the
# field sets by which builds are indexed are more restricted.
SUPPORTED_PREDICATE_OUTPUT_GITILES_COMMIT_FIELD_SET = {
tuple(sorted(s)) for s in [
('host', 'project', 'id'),
('host', 'project', 'ref'),
('host', 'project', 'ref', 'position'),
]
}
def _validate_predicate_output_gitiles_commit(commit):
"""Validates BuildsPredicate.output_gitiles_commit.
From rpc_pb2.SearchBuildsRequest.output_gitiles_commit comment:
One of the following subfield sets must specified:
- host, project, id
- host, project, ref
- host, project, ref, position
"""
field_set = tuple(sorted(f.name for f, _ in commit.ListFields()))
if field_set not in SUPPORTED_PREDICATE_OUTPUT_GITILES_COMMIT_FIELD_SET:
_err(
'unsupported set of fields %r. Supported field sets: %r', field_set,
SUPPORTED_PREDICATE_OUTPUT_GITILES_COMMIT_FIELD_SET
)
validate_gitiles_commit(commit, require_ref=False)
################################################################################
# Internals.
def _validate_cipd_version(version):
if not cipd.is_valid_version(version):
_err('invalid version "%s"', version)
def _struct_has_path(struct, path):
"""Returns True if struct has a value at field path."""
for p in path:
f = struct.fields.get(p)
if f is None:
return False
struct = f.struct_value
return True
def _validate_hex_sha1(sha1):
pattern = r'[a-z0-9]{40}'
if not re.match(pattern, sha1):
_err('does not match r"%s"', pattern)
def _validate_paged_request(req):
"""Validates req.page_size."""
if req.page_size < 0:
_enter_err('page_size', 'must be not be negative')
def _check_truth(msg, *field_names):
"""Validates that the field values are truish."""
assert field_names, 'at least 1 field is required'
for f in field_names:
if not getattr(msg, f):
_enter_err(f, 'required')
def _check_falsehood(msg, *field_names):
"""Validates that the field values are falsish."""
for f in field_names:
if getattr(msg, f):
_enter_err(f, 'disallowed')
def _check_repeated(msg, field_name, validator):
"""Validates each element of a repeated field."""
for i, c in enumerate(getattr(msg, field_name)):
with _enter('%s[%d]' % (field_name, i)):
validator(c)
@contextlib.contextmanager
def _enter(*names):
_field_stack().extend(names)
try:
yield
finally:
_field_stack()[-len(names):] = []
def _err(fmt, *args):
field_path = '.'.join(_field_stack())
raise Error('%s: %s' % (field_path, fmt % args))
@contextlib.contextmanager
def _handle_invalid_input_error():
try:
yield
except errors.InvalidInputError as ex:
_err('%s', ex.message)
def _enter_err(name, fmt, *args):
with _enter(name):
_err(fmt, *args)
def _field_stack():
if not hasattr(_CONTEXT, 'field_stack'): # pragma: no cover
_CONTEXT.field_stack = []
return _CONTEXT.field_stack
# Validation context of the current thread.
_CONTEXT = threading.local()
|
the-stack_0_16069 | import sys
import random
import re
import asyncio
import aiohttp
import discord
from discord.ext import commands
import xml.etree.ElementTree as ET
import loadconfig
class anime(commands.Cog):
'''Alles rund um Animes'''
def __init__(self, bot):
self.bot = bot
async def cog_command_error(self, ctx, error):
print('Error in {0.command.qualified_name}: {1}'.format(ctx, error))
def checkRole(self, user, roleRec):
ok = False
for all in list(user.roles):
if all.name == roleRec:
ok = True
return ok
@commands.command()
async def kawaii(self, ctx):
'''Gibt ein zufälliges kawaii Bild aus'''
if loadconfig.__kawaiichannel__:
pins = await self.bot.get_channel(loadconfig.__kawaiichannel__).pins()
rnd = random.choice(pins)
img = rnd.attachments[0].url
emojis = [':blush:', ':flushed:', ':heart_eyes:', ':heart_eyes_cat:', ':heart:']
await ctx.send(f'{random.choice(emojis)} Von: {rnd.author.display_name}: {img}')
else:
await ctx.send('**:no_entry:** Es wurde kein Channel für den Bot eingestellt! Wende dich bitte an den Bot Admin')
@commands.command(pass_context=True, hidden=True)
async def nsfw(self, ctx):
'''Vergibt die Rolle um auf die NSFW Channel zugreifen zu können'''
if ctx.guild.id == loadconfig.__botserverid__:
if loadconfig.__selfassignrole__:
role = discord.utils.get(ctx.guild.roles, name=loadconfig.__selfassignrole__)
if role in ctx.author.roles:
try:
await ctx.author.remove_roles(role)
except:
pass
tmp = await ctx.send(f':x: Rolle **{role}** wurde entfernt')
else:
try:
await ctx.author.add_roles(role)
except:
pass
tmp = await ctx.send(f':white_check_mark: Rolle **{role}** wurde hinzugefügt')
else:
tmp = await ctx.send('**:no_entry:** Es wurde keine Rolle für den Bot eingestellt! Wende dich bitte an den Bot Admin')
else:
tmp = await ctx.send(f'**:no_entry:** This command don\'t work on this server!')
await asyncio.sleep(2 * 60)
await tmp.delete()
await ctx.message.delete()
@commands.command(aliases=['wave', 'hi', 'ohaiyo'])
async def hello(self, ctx):
'''Nonsense gifs zum Hallo sagen'''
gifs = ['https://cdn.discordapp.com/attachments/102817255661772800/219512763607678976/large_1.gif',
'https://cdn.discordapp.com/attachments/102817255661772800/219512898563735552/large.gif',
'https://cdn.discordapp.com/attachments/102817255661772800/219518948251664384/WgQWD.gif',
'https://cdn.discordapp.com/attachments/102817255661772800/219518717426532352/tumblr_lnttzfSUM41qgcvsy.gif',
'https://cdn.discordapp.com/attachments/102817255661772800/219519191290478592/tumblr_mf76erIF6s1qj96p1o1_500.gif',
'https://cdn.discordapp.com/attachments/102817255661772800/219519729604231168/giphy_3.gif',
'https://cdn.discordapp.com/attachments/102817255661772800/219519737971867649/63953d32c650703cded875ac601e765778ce90d0_hq.gif',
'https://cdn.discordapp.com/attachments/102817255661772800/219519738781368321/17201a4342e901e5f1bc2a03ad487219c0434c22_hq.gif']
msg = f':wave: {random.choice(gifs)}'
await ctx.send(msg)
@commands.command(aliases=['nepu', 'topnep'])
async def nep(self, ctx):
'''Can't stop the Nep'''
neps = ['https://cdn.discordapp.com/attachments/102817255661772800/219530759881359360/community_image_1421846157.gif',
'https://cdn.discordapp.com/attachments/102817255661772800/219535598187184128/tumblr_nv25gtvX911ubsb68o1_500.gif',
'https://cdn.discordapp.com/attachments/102817255661772800/219535698309545984/tumblr_mpub9tTuZl1rvrw2eo2_r1_500.gif',
'https://cdn.discordapp.com/attachments/102817255661772800/219535820430770176/dd9f3cc873f3e13fe098429388fc24242a545a21_hq.gif',
'https://cdn.discordapp.com/attachments/102817255661772800/219535828773371904/tumblr_nl62nrrPar1u0bcbmo1_500.gif',
'https://cdn.discordapp.com/attachments/102817255661772800/219535828995538944/dUBNqIH.gif',
'https://cdn.discordapp.com/attachments/102817255661772800/219535906942615553/b3886374588ec93849e1210449c4561fa699ff0d_hq.gif',
'https://cdn.discordapp.com/attachments/102817255661772800/219536353841381376/tumblr_nl9wb2qMFD1u3qei8o1_500.gif',
'https://cdn.discordapp.com/attachments/102817255661772800/219536345176080384/tumblr_njhahjh1DB1t0co30o1_500.gif',
'https://cdn.discordapp.com/attachments/102817255661772800/219536356223877120/tumblr_njkq53Roep1t0co30o1_500.gif',
'https://cdn.discordapp.com/attachments/102817255661772800/219536424121139210/tumblr_oalathnmFC1uskgfro1_400.gif',
'https://cdn.discordapp.com/attachments/102817255661772800/219536451807739904/tumblr_nfg22lqmZ31rjwa86o1_500.gif',
'https://cdn.discordapp.com/attachments/102817255661772800/219536686529380362/tumblr_o98bm76djb1vv3oz0o1_500.gif',
'https://cdn.discordapp.com/attachments/102817255661772800/219537181440475146/tumblr_mya4mdVhDv1rmk3cyo1_500.gif',
'https://i.imgur.com/4xnJN9x.png',
'https://i.imgur.com/bunWIWD.jpg']
nepnep = ['topnep',
'Can\'t pep the nep',
'Flat is justice',
'nep nep nep nep nep nep nep nep nep nep nep',
'Nepgear > your waifu']
msg = f'{random.choice(nepnep)} {random.choice(neps)}'
await ctx.send(msg)
@commands.command(aliases=['headpat'])
async def pat(self, ctx, member: discord.Member = None):
'''/r/headpats Pat Pat Pat :3
Beispiel:
-----------
:pat @Der-Eddy#6508
'''
gifs = ['https://gfycat.com/PoisedWindingCaecilian',
'https://cdn.awwni.me/sou1.jpg',
'https://i.imgur.com/Nzxa95W.gifv',
'https://cdn.awwni.me/sk0x.png',
'https://i.imgur.com/N0UIRkk.png',
'https://cdn.awwni.me/r915.jpg',
'https://i.imgur.com/VRViMGf.gifv',
'https://i.imgur.com/73dNfOk.gifv',
'https://i.imgur.com/UXAKjRc.jpg',
'https://i.imgur.com/dzlDuNs.jpg',
'https://i.imgur.com/hPR7SOt.gif',
'https://i.imgur.com/IqGRUu4.gif',
'https://68.media.tumblr.com/f95f14437809dfec8057b2bd525e6b4a/tumblr_omvkl2SzeK1ql0375o1_500.gif',
'https://i.redd.it/0ffv8i3p1vrz.jpg',
'http://i.imgur.com/3dzA6OU.png',
'http://i.imgur.com/vkFKabZ.jpg',
'https://i.imgur.com/Lb4p20s.jpg',
'https://cdn.awwni.me/snot.jpg',
'https://i.imgur.com/5yEOa6u.jpg',
'https://i.redd.it/dc7oebkfsetz.jpg']
if member == ctx.me:
msg = f'Arigato {ctx.author.mention} <:Hiding:322410632517517324> \n{random.choice(gifs)}'
await ctx.send(msg)
elif member is not None:
msg = f'{ctx.author.mention} tätschelt dich {member.mention} :3 \n{random.choice(gifs)}'
await ctx.send(msg)
@commands.command(aliases=['rate', 'waifu'])
async def ratewaifu(self, ctx, *, waifuName: str):
'''Rate my waifu
Beispiel:
-----------
:ratewaifu Sagiri
'''
waifu = waifuName.lower()
bestWaifus = ['kobeni', 'emilia', 'shinobu', 'karen', 'shouko', 'shoko',
'minori', 'chidori', 'sagiri', 'mashiro', 'last order',
'saki', 'makoto', 'yui', 'nep', 'nepgear', 'taiga']
trashWaifus = ['shino', 'rikka']
#this lists are highly biased, but who cares ¯\_(ツ)_/¯
if waifu in bestWaifus:
rating = 10
elif waifu in trashWaifus:
rating = 0
else:
rating = hash(waifu) % 10
if waifu == 'emilia':
emoji = '<:Emilia:230684388084416512>'
elif waifu == 'shinobu':
emoji = '<:Shinobu:303302053688770561>'
elif waifu == 'mashiro':
emoji = '<:mashiro:266233568626343936>'
elif waifu == 'sagiri':
emoji = '<:Sagiri:407630432319045634>'
elif waifu == 'nep' or waifu == 'neptunia' or waifu == 'nepgear':
emoji = '<:nep:261230988758220822>'
elif rating < 2:
emoji = ':put_litter_in_its_place:'
elif rating < 5:
emoji = '<:k3llyLUL:341946977266827264>'
elif rating < 7:
emoji = '<:k3llyTHINK:341946932639432704>'
elif rating < 9:
emojis = ['<:faeGasm:298772756412104704>', '<:naroGasm:341200647741243393>']
emoji = random.choice(emojis)
elif rating < 10:
emojis = ['<:kanoLewd:230662559458525185>', '<:fowShy:230662561580843008>', '<:mendoLewd:230662561169801216>']
emoji = random.choice(emojis)
elif rating == 10:
emojis = ['<:okhand:335170448666918923>', '<:nepnep:314906910061101057>', '<:gaku:249970768786489345>', '<:faeWant:313430419661914113>']
emoji = random.choice(emojis)
msg = f'{emoji} Ich bewerte **{waifuName}** als **{rating}/10**'
await ctx.send(msg)
@commands.command(aliases=['anilist'])
async def anime(self, ctx, *, animeName: str):
'''Sucht auf AniList.co nach einem Anime und gibt die Basis-Informationen zurück
Beispiel:
-----------
:anime Mushishi
'''
api = 'https://graphql.anilist.co'
query = '''
query ($name: String){
Media(search: $name, type: ANIME) {
id
idMal
description
title {
romaji
english
}
coverImage {
large
}
startDate {
year
month
day
}
endDate {
year
month
day
}
synonyms
format
status
episodes
duration
nextAiringEpisode {
episode
}
averageScore
meanScore
source
genres
tags {
name
}
studios(isMain: true) {
nodes {
name
}
}
siteUrl
}
}
'''
variables = {
'name': animeName
}
async with aiohttp.ClientSession() as session:
async with session.post(api, json={'query': query, 'variables': variables}, headers = self.bot.userAgentHeaders) as r:
if r.status == 200:
json = await r.json()
data = json['data']['Media']
embed = discord.Embed(color=ctx.author.top_role.colour)
embed.set_footer(text='API provided by AniList.co | ID: {}'.format(str(data['id'])))
embed.set_thumbnail(url=data['coverImage']['large'])
if data['title']['english'] == None or data['title']['english'] == data['title']['romaji']:
embed.add_field(name='Titel', value=data['title']['romaji'], inline=False)
else:
embed.add_field(name='Titel', value='{} ({})'.format(data['title']['english'], data['title']['romaji']), inline=False)
#embed.add_field(name='Beschreibung', value=data['description'], inline=False)
if data['synonyms'] != []:
embed.add_field(name='Synonyme', value=', '.join(data['synonyms']), inline=True)
embed.add_field(name='Typ', value=data['format'].replace('_', ' ').title().replace('Tv', 'TV'), inline=True)
if data['episodes'] > 1:
embed.add_field(name='Folgen', value='{} à {} min'.format(data['episodes'], data['duration']), inline=True)
else:
embed.add_field(name='Dauer', value=str(data['duration']) + ' min', inline=True)
embed.add_field(name='Gestartet', value='{}.{}.{}'.format(data['startDate']['day'], data['startDate']['month'], data['startDate']['year']), inline=True)
if data['endDate']['day'] == None:
embed.add_field(name='Released Folgen', value=data['nextAiringEpisode']['episode'] - 1, inline=True)
elif data['episodes'] > 1:
embed.add_field(name='Beendet', value='{}.{}.{}'.format(data['endDate']['day'], data['endDate']['month'], data['endDate']['year']), inline=True)
embed.add_field(name='Status', value=data['status'].replace('_', ' ').title(), inline=True)
try:
embed.add_field(name='Haupt-Studio', value=data['studios']['nodes'][0]['name'], inline=True)
except IndexError:
pass
embed.add_field(name='Ø Score', value=data['averageScore'], inline=True)
embed.add_field(name='Genres', value=', '.join(data['genres']), inline=False)
tags = ''
for tag in data['tags']:
tags += tag['name'] + ', '
embed.add_field(name='Tags', value=tags[:-2], inline=False)
try:
embed.add_field(name='Adaptiert von', value=data['source'].replace('_', ' ').title(), inline=True)
except AttributeError:
pass
embed.add_field(name='AniList Link', value=data['siteUrl'], inline=False)
embed.add_field(name='MyAnimeList Link', value='https://myanimelist.net/anime/' + str(data['idMal']), inline=False)
await ctx.send(embed=embed)
else:
await ctx.send(':x: Konnte keinen passenden Anime finden!')
@commands.command()
async def manga(self, ctx, *, mangaName: str):
'''Sucht auf AniList.co nach einem Manga und gibt die Basis-Informationen zurück
Beispiel:
-----------
:manga Air Gear
'''
api = 'https://graphql.anilist.co'
query = '''
query ($name: String){
Media(search: $name, type: MANGA) {
id
idMal
description
title {
romaji
english
}
coverImage {
large
}
startDate {
year
month
day
}
endDate {
year
month
day
}
status
chapters
volumes
averageScore
meanScore
genres
tags {
name
}
siteUrl
}
}
'''
variables = {
'name': mangaName
}
async with aiohttp.ClientSession() as session:
async with session.post(api, json={'query': query, 'variables': variables}, headers = self.bot.userAgentHeaders) as r:
if r.status == 200:
json = await r.json()
data = json['data']['Media']
embed = discord.Embed(color=ctx.author.top_role.colour)
embed.set_footer(text='API provided by AniList.co | ID: {}'.format(str(data['id'])))
embed.set_thumbnail(url=data['coverImage']['large'])
if data['title']['english'] == None or data['title']['english'] == data['title']['romaji']:
embed.add_field(name='Titel', value=data['title']['romaji'], inline=False)
else:
embed.add_field(name='Titel', value='{} ({})'.format(data['title']['english'], data['title']['romaji']), inline=False)
#embed.add_field(name='Beschreibung', value=data['description'], inline=False)
if data['chapters'] != None:
# https://github.com/AniList/ApiV2-GraphQL-Docs/issues/47
embed.add_field(name='Kapitel', value=data['chapters'], inline=True)
embed.add_field(name='Bände', value=data['volumes'], inline=True)
embed.add_field(name='Gestartet', value='{}.{}.{}'.format(data['startDate']['day'], data['startDate']['month'], data['startDate']['year']), inline=True)
if data['endDate']['day'] != None:
embed.add_field(name='Beendet', value='{}.{}.{}'.format(data['endDate']['day'], data['endDate']['month'], data['endDate']['year']), inline=True)
embed.add_field(name='Status', value=data['status'].replace('_', ' ').title(), inline=True)
embed.add_field(name='Ø Score', value=data['averageScore'], inline=True)
embed.add_field(name='Genres', value=', '.join(data['genres']), inline=False)
tags = ''
for tag in data['tags']:
tags += tag['name'] + ', '
embed.add_field(name='Tags', value=tags[:-2], inline=False)
embed.add_field(name='AniList Link', value=data['siteUrl'], inline=False)
embed.add_field(name='MyAnimeList Link', value='https://myanimelist.net/anime/' + str(data['idMal']), inline=False)
await ctx.send(embed=embed)
else:
await ctx.send(':x: Konnte keinen passenden Manga finden!')
@commands.command(aliases=['sauce', 'iqdb'])
async def saucenao(self, ctx, url: str = None):
'''Versucht die Quelle eines Anime Bildes zu finden
Beispiel:
-----------
:saucenao
:saucenao https://i.imgur.com/nmnVtgs.jpg
'''
if url == None:
async for message in ctx.channel.history(before=ctx.message):
try:
url = message.attachments[0].url
continue
except IndexError:
pass
elif not url.endswith(('.jpg', '.png', '.bmp', '.gif', '.jpeg')):
await ctx.send(':x: Keine korrekte URL angegeben!')
return
tmp = await ctx.send(f'Versuche die Quelle des Bildes <{url}> zu finden ...')
saucenao = f'http://saucenao.com/search.php?db=999&url={url}'
async with aiohttp.ClientSession(headers = self.bot.userAgentHeaders) as cs:
async with cs.get(saucenao) as r:
#Thanks to https://github.com/MistressMamiya/hsauce_bot/blob/master/get_source.py
content = await r.text()
content = content.split('Low similarity results')[0] # Get rid of the low similarity results
artist = re.search(r'<strong>Creator: <\/strong>(.*?)<br', content)
anime = re.search(r'<strong>Material: <\/strong>(.*?)<br', content)
characters = re.search(r'<strong>Characters: <\/strong><br \/>(.*?)<br \/></div>', content)
pixiv = re.search(r'<strong>Pixiv ID: </strong><a href=\"(.*?)\" class', content)
danbooru = re.search(r'<a href=\"https://danbooru\.donmai\.us/post/show/(\d+)\">', content)
gelbooru = re.search(r'<a href=\"https://gelbooru\.com/index\.php\?page=post&s=view&id=(\d+)\">', content)
yandere = re.search(r'<a href=\"https://yande\.re/post/show/(\d+)\">', content)
konachan = re.search(r'<a href=\"http://konachan\.com/post/show/(\d+)\">', content)
sankaku = re.search(r'<a href=\"https://chan\.sankakucomplex\.com/post/show/(\d+)\">', content)
embed = discord.Embed()
embed.set_footer(text='Provided by https://saucenao.com')
embed.set_thumbnail(url=url)
if anime:
embed.add_field(name='Anime', value=anime.group(1), inline=True)
if artist:
embed.add_field(name='Artist', value=artist.group(1), inline=True)
if characters:
embed.add_field(name='Charaktere', value=str(characters.group(1)).replace('<br />', ', '), inline=True)
if pixiv:
embed.add_field(name='Pixiv Link', value=pixiv.group(1), inline=False)
if danbooru:
embed.add_field(name='Danbooru Link', value='https://danbooru.donmai.us/post/show/' + danbooru.group(1), inline=False)
if gelbooru:
embed.add_field(name='Gelbooru Link', value='https://gelbooru.com/index.php?page=post&s=view&id=' + gelbooru.group(1), inline=False)
if yandere:
embed.add_field(name='Yande.re Link', value='https://yande.re/post/show/' + yandere.group(1), inline=False)
if konachan:
embed.add_field(name='Konachan Link', value='http://konachan.com/post/show/' + konachan.group(1), inline=False)
if sankaku:
embed.add_field(name='Sankaku Link', value='https://chan.sankakucomplex.com/post/show/' + sankaku.group(1), inline=False)
if anime or artist or characters or pixiv or danbooru or gelbooru or yandere or konachan or sankaku:
await tmp.edit(content='', embed=embed)
else:
await tmp.edit(content=':x: Konnte nichts finden!')
# @commands.command(pass_context=True, hidden=True)
# async def imgur(self, ctx, amount: int = None):
# '''Lädt eine bestimmte Anzahl der letzten hochgeladenen Bilder im Channel bei Imgur hoch'''
# await ctx.send(':new: Befehl in Arbeit!')
#
# @commands.command(pass_context=True, alias=['ani'], hidden=True)
# async def anisearch(self, ctx, url: str = None):
# '''Gibt Informationen über einen AniSearch.de User zurück'''
# async with aiohttp.get(url) as r:
# if r.status == 200:
# content = await r.text()
# animeRE = r"<td class=\"rtype2\">\w+</td><td>(\d+)</td>"
# watchedAnimes = re.search(content, animeRE)
# await ctx.send(str(watchedAnimes.group(0)))
# else:
# await ctx.send(':x: Konnte den Benutzer nicht finden (falsche URL?)')
def setup(bot):
bot.add_cog(anime(bot))
|
the-stack_0_16070 | _base_ = "finetune-eval-base.py"
# dataset settings
data_source_cfg = dict(
type="ImageNet",
memcached=False,
mclient_path='/no/matter',
# this will be ignored if type != ImageListMultihead
)
data_train_list = "data/flowers/meta/train-1000.txt"
data_train_root = 'data/flowers'
data_val_list = "data/flowers/meta/val.txt"
data_val_root = 'data/flowers'
data_test_list = "data/flowers/meta/test.txt"
data_test_root = 'data/flowers'
dataset_type = "ClassificationDataset"
img_norm_cfg = dict(mean=[0.4355,0.3777,0.2880], std=[0.2970, 0.2459, 0.2705])
train_pipeline = [
dict(type='RandomResizedCrop', size=224),
dict(type='RandomHorizontalFlip'),
dict(type='ToTensor'),
dict(type='Normalize', **img_norm_cfg),
]
test_pipeline = [
dict(type='Resize', size=256),
dict(type='CenterCrop', size=224),
dict(type='ToTensor'),
dict(type='Normalize', **img_norm_cfg),
]
data = dict(
batch_size=64,
workers_per_gpu=6,
train=dict(
type=dataset_type,
data_source=dict(
list_file=data_train_list, root=data_train_root,
**data_source_cfg),
pipeline=train_pipeline),
val=dict(
type=dataset_type,
data_source=dict(
list_file=data_val_list, root=data_val_root, **data_source_cfg),
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_source=dict(
list_file=data_test_list, root=data_test_root, **data_source_cfg),
pipeline=test_pipeline))
custom_hooks = [
dict(
name="val",
type='ValidateHook',
dataset=data['val'],
by_epoch=False,
initial=False,
interval=100,
imgs_per_gpu=32,
workers_per_gpu=6,
eval_param=dict(topk=(1,5))),
dict(
name="test",
type='ValidateHook',
dataset=data['test'],
by_epoch=False,
initial=False,
interval=100,
imgs_per_gpu=32,
workers_per_gpu=6,
eval_param=dict(topk=(1,5))),
]
by_iter =True
# learning policy
lr_config = dict(
by_epoch=False,
policy='step',
step=[833,1667],
gamma=0.1 # multiply LR by this number at each step
)
# momentum and weight decay from VTAB and IDRL
optimizer = dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.,
paramwise_options={'\Ahead.': dict(lr_mult=100)})
# runtime settings
# total iters or total epochs
total_iters=2500
checkpoint_config = dict(interval=10000)
log_config = dict(
interval=1,
by_epoch=False,
hooks=[
dict(type='TextLoggerHook', by_epoch=False),
dict(type='TensorboardLoggerHook', by_epoch=False)
])
optimizer_config = dict(update_interval=4)
|
the-stack_0_16071 | #!/usr/bin/python
# This file is part of python-registry.
#
# Copyright 2011 Will Ballenthin <[email protected]>
# while at Mandiant <http://www.mandiant.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Find all Registry paths, value names, and values that
# contain the given string.
#
# python findkey.py <registry file> <needle>
#
from __future__ import print_function
from __future__ import unicode_literals
import sys
import argparse
from Registry import Registry
def main():
parser = argparse.ArgumentParser(
description="Search for a string in a Windows Registry hive")
parser.add_argument("registry_hive", type=str,
help="Path to the Windows Registry hive to process")
parser.add_argument("query", type=str,
help="Query for which to search")
parser.add_argument("-i", action="store_true", dest="case_insensitive",
help="Query for which to search")
args = parser.parse_args()
paths = []
value_names = []
values = []
def rec(key, depth, needle):
for value in key.values():
if (args.case_insensitive and needle in value.name().lower()) or needle in value.name():
value_names.append((key.path(), value.name()))
sys.stdout.write("n")
sys.stdout.flush()
try:
if (args.case_insensitive and needle in str(value.value()).lower()) or needle in str(value.value()):
values.append((key.path(), value.name()))
sys.stdout.write("v")
sys.stdout.flush()
except UnicodeEncodeError:
pass
except UnicodeDecodeError:
pass
for subkey in key.subkeys():
if needle in subkey.name():
paths.append(subkey.path())
sys.stdout.write("p")
sys.stdout.flush()
rec(subkey, depth + 1, needle)
reg = Registry.Registry(args.registry_hive)
needle = args.query
if args.case_insensitive:
needle = needle.lower()
rec(reg.root(), 0, needle)
print("")
print("[Paths]")
for path in paths:
print(" - %s" % (path))
if len(paths) == 0:
print(" (none)")
print("")
print("[Value Names]")
for pair in value_names:
print(" - %s : %s" % (pair[0], pair[1]))
if len(value_names) == 0:
print(" (none)")
print("")
print("[Values]")
for pair in values:
print(" - %s : %s" % (pair[0], pair[1]))
if len(values) == 0:
print(" (none)")
print("")
if __name__ == "__main__":
main()
|
the-stack_0_16072 | # -*- coding: utf-8 -*-
from django.conf.urls import url
from django.urls import path
from . import views
urlpatterns = [
path('', views.AIPicsPageView.as_view(), name="list"),
path('<int:pk>', views.AIPicsDetailView.as_view(), name="detail"),
path('api/set-api-pic-state', views.ApiSetAiPicStateView.as_view(), name='set-api-pic-state'),
path('api/delete-api-pic', views.ApiDeleteAiPicsView.as_view(), name='delete-api-pic'),
path('api/delete-attachment', views.ApiDeleteAttachmentView.as_view(), name='delete-attachment'),
]
|
the-stack_0_16073 | import altair as alt
from altair_transform import extract_data, transform_chart
import numpy as np
import pandas as pd
import pytest
@pytest.fixture
def data():
rand = np.random.RandomState(42)
return pd.DataFrame(
{
"x": rand.randint(0, 100, 12),
"y": rand.randint(0, 100, 12),
"t": pd.date_range("2012-01-15", freq="M", periods=12),
"i": range(12),
"c": list("AAABBBCCCDDD"),
}
)
@pytest.fixture
def chart(data):
return (
alt.Chart(data)
.transform_calculate(xpy="datum.x + datum.y", xmy="datum.x - datum.y")
.mark_point()
.encode(x="xpy:Q", y="xmy:Q")
)
def test_extract_data(data, chart):
out1 = extract_data(chart)
out2 = data.copy()
out2["xpy"] = data.x + data.y
out2["xmy"] = data.x - data.y
assert out1.equals(out2)
def test_transform_chart(data, chart):
original_chart = chart.copy()
data_out = extract_data(chart)
chart_out = transform_chart(chart)
# Original chart not modified
assert original_chart == chart
# Transform applied to output chart
assert chart_out.data.equals(data_out)
assert chart_out.transform is alt.Undefined
assert chart.mark == chart_out.mark
assert chart.encoding == chart_out.encoding
def test_transform_chart_with_aggregate():
data = pd.DataFrame({"x": list("AABBBCCCC")})
chart = alt.Chart(data).mark_bar().encode(x="x:N", y="count():Q")
chart_out = transform_chart(chart)
assert chart_out.data.equals(pd.DataFrame({"x": list("ABC"), "__count": [2, 3, 4]}))
assert chart_out.encoding.to_dict() == {
"x": {"field": "x", "type": "nominal"},
"y": {"field": "__count", "type": "quantitative", "title": "Count of Records"},
}
|
the-stack_0_16074 | import itertools
import re
from cytoolz import (
compose,
curry,
)
from eth_utils import (
remove_0x_prefix,
to_dict,
)
from .filesystem import (
is_under_path,
)
from .hexadecimal import (
hexbytes_to_hexstr,
)
from .string import (
normalize_class_name,
)
def is_project_contract(contracts_source_dirs, contract_data):
return any(
is_under_path(source_dir, contract_data['source_path'])
for source_dir
in contracts_source_dirs
)
def is_test_contract(tests_dir, contract_data):
return is_under_path(tests_dir, contract_data['source_path'])
def package_contracts(contract_factories):
_dict = {
'__len__': lambda s: len(contract_factories),
'__iter__': lambda s: iter(contract_factories.items()),
'__contains__': lambda s, k: contract_factories.__contains__(k),
'__getitem__': lambda s, k: contract_factories.__getitem__(k),
'__setitem__': lambda s, k, v: contract_factories.__setitem__(k, v),
'keys': lambda s: contract_factories.keys(),
'values': lambda s: contract_factories.values(),
}
_dict.update(contract_factories)
return type('contracts', (object,), _dict)()
CONTRACT_FACTORY_FIELDS = {
'abi',
'asm',
'ast',
'bytecode',
'bytecode_runtime',
'clone_bin',
'dev_doc',
'interface',
'metadata',
'opcodes',
'src_map',
'src_map_runtime',
'user_doc',
}
def create_contract_factory(web3, contract_name, contract_data):
factory_kwargs = {
key: contract_data[key]
for key
in CONTRACT_FACTORY_FIELDS
if key in contract_data
}
return web3.eth.contract(
contract_name=normalize_class_name(contract_name),
**factory_kwargs
)
def construct_contract_factories(web3, compiled_contracts):
contract_classes = {
contract_name: create_contract_factory(
web3,
contract_name,
contract_data,
)
for contract_name, contract_data
in compiled_contracts.items()
}
return package_contracts(contract_classes)
@to_dict
def compute_direct_dependency_graph(compiled_contracts):
"""
Given a dictionary or mapping of compiled contract data, this returns a *shallow*
dependency graph of each contracts explicit link dependencies.
"""
for contract_data in compiled_contracts:
yield (
contract_data['name'],
contract_data['direct_dependencies'],
)
def compute_recursive_contract_dependencies(contract_name, dependency_graph):
"""
Recursive computation of the linker dependencies for a specific contract
within a contract dependency graph.
"""
direct_dependencies = dependency_graph.get(contract_name, set())
sub_dependencies = itertools.chain.from_iterable((
compute_recursive_contract_dependencies(dep, dependency_graph)
for dep in direct_dependencies
))
return set(itertools.chain(direct_dependencies, sub_dependencies))
CONTRACT_NAME_REGEX = '^[_a-zA-Z][_a-zA-Z0-9]*$'
def is_contract_name(value):
return bool(re.match(CONTRACT_NAME_REGEX, value))
EMPTY_BYTECODE_VALUES = {None, "0x"}
SWARM_HASH_PREFIX = "a165627a7a72305820"
SWARM_HASH_SUFFIX = "0029"
EMBEDDED_SWARM_HASH_REGEX = (
SWARM_HASH_PREFIX +
"[0-9a-zA-Z]{64}" +
SWARM_HASH_SUFFIX +
"$"
)
SWARM_HASH_REPLACEMENT = (
SWARM_HASH_PREFIX +
"<" +
"-" * 20 +
"swarm-hash-placeholder" +
"-" * 20 +
">" +
SWARM_HASH_SUFFIX
)
PUSH20_OPCODE = "73"
ADDRESS_OPCODE = "30"
EQ_OPCODE = "14"
EMBEDDED_ADDRESS_REGEX = (
'^' +
PUSH20_OPCODE +
"[0-9a-f]{40}" +
ADDRESS_OPCODE +
EQ_OPCODE
)
ADDRESS_REPLACEMENT = (
PUSH20_OPCODE +
"<" +
"-" * 9 +
"address-place-holder" +
"-" * 9 +
">" +
ADDRESS_OPCODE +
EQ_OPCODE
)
def compare_bytecode(left, right):
unprefixed_left = remove_0x_prefix(left)
unprefixed_right = remove_0x_prefix(right)
sub = curry(re.sub)
norm_pipeline = compose(
sub(EMBEDDED_SWARM_HASH_REGEX, SWARM_HASH_REPLACEMENT),
sub(EMBEDDED_ADDRESS_REGEX, ADDRESS_REPLACEMENT)
)
norm_left = norm_pipeline(unprefixed_left)
norm_right = norm_pipeline(unprefixed_right)
if len(norm_left) != len(unprefixed_left) or len(norm_right) != len(unprefixed_right):
raise ValueError(
"Invariant. Normalized bytecodes are not the correct lengths:" +
"\n- left (original) :" +
left +
"\n- left (unprefixed):" +
unprefixed_left +
"\n- left (normalized):" +
norm_left +
"\n- right (original) :" +
right +
"\n- right (unprefixed):" +
unprefixed_right +
"\n- right (normalized):" +
norm_right
)
return norm_left == norm_right
def verify_contract_bytecode(web3, expected_bytecode, address):
"""
TODO: write tests for this.
"""
from populus.contracts.exceptions import BytecodeMismatch
expected_bytecode = hexbytes_to_hexstr(expected_bytecode)
# Check that the contract has bytecode
if expected_bytecode in EMPTY_BYTECODE_VALUES:
raise ValueError(
"Contract instances which contain an address cannot have empty "
"runtime bytecode"
)
chain_bytecode = hexbytes_to_hexstr(web3.eth.getCode(address))
if chain_bytecode in EMPTY_BYTECODE_VALUES:
raise BytecodeMismatch(
"No bytecode found at address: {0}".format(address)
)
elif not compare_bytecode(chain_bytecode, expected_bytecode):
raise BytecodeMismatch(
"Bytecode found at {0} does not match compiled bytecode:\n"
" - chain_bytecode: {1}\n"
" - compiled_bytecode: {2}".format(
address,
chain_bytecode,
expected_bytecode,
)
)
|
the-stack_0_16075 | # Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# Copyright 2019 The OSArchiver Authors. All rights reserved.
"""
OSArchiver's Source class that implement a db backend
"""
import re
import time
import logging
import pymysql
import arrow
from numpy import array_split
from osarchiver.source import Source
from osarchiver.common.db import DbBase
NOT_OS_DB = ['mysql', 'performance_schema', 'information_schema']
class Db(Source, DbBase):
"""
Database backend of OSArchiver's Source
"""
def __init__(self,
databases=None,
tables=None,
delete_data=0,
excluded_databases='',
excluded_tables='',
where='1=1 LIMIT 0',
archive_data=None,
name=None,
destination=None,
**kwargs):
"""
Create a Source instance with relevant configuration parameters given
in arguments
"""
self.databases = databases
self.tables = tables
self.excluded_databases = NOT_OS_DB
self.excluded_databases.extend([
d for d in re.split(r',|;|\n', excluded_databases)
if d not in NOT_OS_DB
])
self.excluded_tables = excluded_tables
self.archive_data = archive_data
self.delete_data = delete_data
self.destination = destination
self._databases_to_archive = []
self._tables_to_archive = {}
self.tables_with_circular_fk = []
# When selecting data be sure to use the same date to prevent selecting
# parent data newer than children data, it is of the responsability of
# the operator to use the {now} formating value in the configuration
# file in the where option. If {now} is ommitted it it is possible to
# get foreign key check errors because of parents data newer than
# children data
self.now = arrow.utcnow().format(fmt='YYYY-MM-DD HH:mm:ss')
self.where = where.format(now=self.now)
Source.__init__(self, backend='db', name=name)
DbBase.__init__(self, **kwargs)
def __repr__(self):
return "Source {name} [Backend:{backend} Host:{host} - DB:{db} - "\
"Tables:{tables}]".format(backend=self.backend, db=self.databases,
name=self.name, tables=self.tables,
host=self.host)
def databases_to_archive(self):
"""
Return a list of databases that are eligibles to archiving. If no
database are provided or the * character is used the method basically
do a SHOW DATABASE to get available databases
The method exclude the databases that are explicitly excluded
"""
if self._databases_to_archive:
return self._databases_to_archive
if self.databases is None or self.databases == '*':
self._databases_to_archive = self.get_os_databases()
else:
self._databases_to_archive = [
d for d in re.split(r',|;|\n', self.databases)
]
excluded_databases_regex = \
"^(" + "|".join(self.excluded_databases) + ")$"
self._databases_to_archive = [
d for d in self._databases_to_archive
if not re.match(excluded_databases_regex, d)
]
return self._databases_to_archive
def tables_to_archive(self, database=None):
"""
For a given database, return the list of tables that are eligible to
archiving.
- Retrieve tables if needed (*, or empty)
- Check that tables ov teh 'deleted_at' column (deleted_column
parameter)
- Exclude tables in excluded_tables
- Reorder tables depending foreign key
"""
if database is None:
logging.warning("Can not call tables_to_archive on None database")
return []
if database in self._tables_to_archive:
return self._tables_to_archive[database]
database_tables = [
v[0] for (i, v) in enumerate(self.get_database_tables(database))
]
logging.info("Tables list of database '%s': %s", database,
database_tables)
# Step 1: is to get all the tables we want to archive
# no table specified or jocker used means we want all tables
# else we filter against the tables specified
if self.tables is None or self.tables == '*':
self._tables_to_archive[database] = database_tables
else:
self._tables_to_archive[database] = \
[t for t in re.split(r',|;|\n', self.tables)
if t in database_tables]
# Step 2: verify that all tables have the deleted column 'deleted_at'
logging.debug("Verifying that tables have the '%s' column",
self.deleted_column)
tables = []
for table in self._tables_to_archive[database]:
if not self.table_has_deleted_column(table=table,
database=database):
logging.debug(
"Table '%s' has no column named '%s',"
" ignoring it", table, self.deleted_column)
continue
tables.append(table)
# update self._tables_to_archive with the filtered tables
self._tables_to_archive[database] = tables
# Step 3: then exclude to one explicitly given
excluded_tables_regex = "^(" + "|".join(
re.split(r',|;|\n', self.excluded_tables)) + ")$"
logging.debug("Ignoring tables matching '%s'", excluded_tables_regex)
self._tables_to_archive[database] = [
t for t in self._tables_to_archive[database]
if not re.match(excluded_tables_regex, t)
]
# Step 4 for each table retrieve child tables referencing the parent
# table and order them childs first, parents then
ordered_tables = []
for table in self._tables_to_archive[database]:
children = self.get_linked_tables(database=database, table=table)
for child in children:
# never do same things twice
if child['table_name'] in ordered_tables:
ordered_tables.remove(child['table_name'])
# check if table was already checked for deleted column
if not child['table_name'] in \
self._tables_to_archive[database]:
if not self.table_has_deleted_column(
table=child['table_name'],
database=child['table_schema']):
logging.debug(
"Child table '%s' has not column named "
"'%s', can not handle it", child['table_name'],
self.deleted_column)
continue
ordered_tables.append(child['table_name'])
self._tables_to_archive[database] = ordered_tables
logging.debug(
"Tables ordered depending foreign key dependencies: "
"'%s'", self._tables_to_archive[database])
return self._tables_to_archive[database]
def get_linked_tables(self, database=None, table=None):
"""
For a given database.table return tables that have a foreign key
dependency of the current table
"""
children = self.get_tables_with_fk(database=database, table=table)
logging.debug("Ordered tables to archive from %s.%s: %s", database,
table, children)
children_tables = []
for child in children:
if child['table_schema'] == database and \
child['table_name'] == table:
self.tables_with_circular_fk.append('{db}.{table}'.format(
db=database, table=table))
continue
grandchildren = self.get_linked_tables(database=database,
table=child['table_name'])
for grandchild in grandchildren:
if grandchild in children_tables:
children_tables.remove(grandchild)
children_tables.append(grandchild)
children_tables.append({'table_name': table, 'table_schema': database})
logging.debug("Returned child tables of %s.%s: %s", database, table,
children_tables)
return children_tables
def select(self, limit=None, database=None, table=None):
"""
select data from a database.table, apply limit or take the default one
the select by set depends of the primary key type (int vs uuid)
In case of int:
SELECT * FROM <db>.<table> WHERE <pk> > <last_selected_id> AND ...
In case of uuid (uuid are not ordered naturally ordered, we sort them)
SELECT * FROM <db>.<table> WHERE <pk> > "<last_selected_id>" AND...
ORDER BY <pk>
"""
offset = 0
last_selected_id = 0
# Use primary key column to improve performance on large
# dataset vs using OFFSET
primary_key = self.get_table_primary_key(database=database,
table=table)
if limit is None:
limit = self.select_limit
sql = "SELECT * FROM `{database}`.`{table}` WHERE {pk} > "\
"'{last_id}' AND {where} LIMIT {limit}"
pk_type_checked = False
while True:
formatted_sql = sql.format(database=database,
table=table,
where=self.where,
limit=limit,
last_id=last_selected_id,
pk=primary_key,
offset=offset)
result = self.db_request(sql=formatted_sql,
cursor_type=pymysql.cursors.DictCursor,
database=database,
table=table,
fetch_method='fetchall')
logging.info("Fetched %s result in %s.%s", len(result), database,
table)
if not result:
break
last_selected_id = result[-1][primary_key]
yield result
offset += len(result)
if pk_type_checked is False:
# If the primary key is a digit remove the simple quote from
# the last_id variable for performance purpose
if str(last_selected_id).isdigit():
# remove the simple quote arround id
sql = "SELECT * FROM `{database}`.`{table}` WHERE {pk} >"\
" {last_id} AND {where} LIMIT {limit}"
else:
# else this a string and we force to order by that string
# to simulate an integer primary key
sql = "SELECT * FROM `{database}`.`{table}` WHERE {pk} >"\
" '{last_id}' AND {where} ORDER BY {pk} LIMIT {limit}"
pk_type_checked = True
def read(self, limit=None):
"""
The read method that has to be implemented (Source abstract class)
"""
databases_to_archive = self.databases_to_archive()
logging.info("Database elected for archiving: %s",
databases_to_archive)
for database in databases_to_archive:
tables_to_archive = self.tables_to_archive(database=database)
logging.info("Tables elected for archiving: %s", tables_to_archive)
for table in tables_to_archive:
logging.info("%s.%s is to archive", database, table)
yield {
'database':
database,
'table':
table,
'data':
self.select(limit=limit, database=database, table=table)
}
def delete_set(self, database=None, table=None, limit=None, data=None):
"""
Delete a set of data using the primary_key of table
"""
if not self.delete_data:
logging.info(
"Ignoring delete step because delete_data is set to"
" %s", self.delete_data)
return
if limit is None:
limit = self.delete_limit
primary_key = self.get_table_primary_key(database=database,
table=table)
# Check if primary key is a digit to prevent casting by MySQL and
# optimize the request, store the value in metadata for caching
pk_is_digit = self.get_metadata(database=database,
table=table,
key='pk_is_digit')
if pk_is_digit is None:
pk_is_digit = str(data[0][primary_key]).isdigit()
self.add_metadata(database=database,
table=table,
key='pk_is_digit',
value=pk_is_digit)
def create_array_chunks(array, chunk_size):
for i in range(0, len(array), chunk_size):
yield array[i:i + chunk_size]
# For performance purpose split data in subdata of lenght=limit
for subdata in list(create_array_chunks(data, limit)):
if pk_is_digit:
ids = ', '.join([str(d[primary_key]) for d in subdata])
else:
ids = '"' + '", "'.join([str(d['id']) for d in subdata]) + '"'
total_deleted_count = 0
# equivalent to a while True but we know why we are looping
while "there are rows to delete":
if total_deleted_count > 0:
logging.debug(
"Waiting %s seconds before deleting next"
"subset of data ", self.delete_loop_delay)
time.sleep(int(self.delete_loop_delay))
sql = "DELETE FROM `{database}`.`{table}` WHERE "\
"`{pk}` IN ({ids}) LIMIT {limit}".format(
database=database,
table=table,
ids=ids,
pk=primary_key,
limit=limit)
foreign_key_check = \
'{db}.{table}'.format(db=database, table=table) \
not in self.tables_with_circular_fk
count = self.db_request(sql=sql,
foreign_key_check=foreign_key_check,
database=database,
table=table)
logging.info("%s rows deleted from %s.%s", count, database,
table)
total_deleted_count += count
if int(count) < int(limit) or \
total_deleted_count == len(subdata):
logging.debug("No more row to delete in this data set")
break
logging.debug("Waiting %s seconds after a deletion",
self.delete_loop_delay)
time.sleep(int(self.delete_loop_delay))
def delete(self, database=None, table=None, limit=None, data=None):
"""
The delete method that has to be implemented (Source abstract class)
"""
try:
self.delete_set(database=database,
table=table,
limit=limit,
data=data)
except pymysql.err.IntegrityError as integrity_error:
# foreign key constraint fails usually because of error while
# processing openstack tasks
# to prevent never deleting some of data, we re run delete with
# half set of data if we caught an integrity error (1451)
# To prevent never deleting rest of data of a set, we re run delete
# with a half set if we caught an integrity error (1451)
# until we caught the offending row
if integrity_error.args[0] != 1451:
raise integrity_error
# we caught the row causing integrity error
if len(data) == 1:
logging.error("OSArchiver hit a row that will never be deleted"
" unless you fix remaining chlidren data")
logging.error("Parent row that can not be deleted: %s", data)
logging.error("To get children items:")
logging.error(
self.integrity_exception_select_statement(
error=integrity_error.args[1], row=data[0]))
logging.error("Here a POTENTIAL fix, ensure BEFORE that data "
"should be effectively deleted, then run "
"osarchiver again:")
logging.error(
self.integrity_exception_potential_fix(
error=integrity_error.args[1], row=data[0]))
else:
logging.error("Integrity error caught, deleting with "
"dichotomy")
for subdata in array_split(data, 2):
logging.debug(
"Dichotomy delete with a set of %s data "
"length", len(subdata))
self.delete(database=database,
table=table,
data=subdata,
limit=len(subdata))
def clean_exit(self):
"""
Tasks to be executed to exit cleanly:
- Disconnect from the database
"""
logging.info("Closing source DB connection")
self.disconnect()
|
the-stack_0_16077 | # Original source: https://github.com/pytorch/examples/blob/master/fast_neural_style/neural_style/neural_style.py
import argparse
import os
import sys
import re
from PIL import Image
import torch
from torchvision import transforms
def load_image(filename, size=None, scale=None):
img = Image.open(filename)
if size is not None:
img = img.resize((size, size), Image.ANTIALIAS)
elif scale is not None:
img = img.resize((int(img.size[0] / scale), int(img.size[1] / scale)), Image.ANTIALIAS)
return img
def save_image(filename, data):
img = data.clone().clamp(0, 255).numpy()
img = img.transpose(1, 2, 0).astype("uint8")
img = Image.fromarray(img)
img.save(filename)
class TransformerNet(torch.nn.Module):
def __init__(self):
super(TransformerNet, self).__init__()
# Initial convolution layers
self.conv1 = ConvLayer(3, 32, kernel_size=9, stride=1)
self.in1 = torch.nn.InstanceNorm2d(32, affine=True)
self.conv2 = ConvLayer(32, 64, kernel_size=3, stride=2)
self.in2 = torch.nn.InstanceNorm2d(64, affine=True)
self.conv3 = ConvLayer(64, 128, kernel_size=3, stride=2)
self.in3 = torch.nn.InstanceNorm2d(128, affine=True)
# Residual layers
self.res1 = ResidualBlock(128)
self.res2 = ResidualBlock(128)
self.res3 = ResidualBlock(128)
self.res4 = ResidualBlock(128)
self.res5 = ResidualBlock(128)
# Upsampling Layers
self.deconv1 = UpsampleConvLayer(128, 64, kernel_size=3, stride=1, upsample=2)
self.in4 = torch.nn.InstanceNorm2d(64, affine=True)
self.deconv2 = UpsampleConvLayer(64, 32, kernel_size=3, stride=1, upsample=2)
self.in5 = torch.nn.InstanceNorm2d(32, affine=True)
self.deconv3 = ConvLayer(32, 3, kernel_size=9, stride=1)
# Non-linearities
self.relu = torch.nn.ReLU()
def forward(self, X):
y = self.relu(self.in1(self.conv1(X)))
y = self.relu(self.in2(self.conv2(y)))
y = self.relu(self.in3(self.conv3(y)))
y = self.res1(y)
y = self.res2(y)
y = self.res3(y)
y = self.res4(y)
y = self.res5(y)
y = self.relu(self.in4(self.deconv1(y)))
y = self.relu(self.in5(self.deconv2(y)))
y = self.deconv3(y)
return y
class ConvLayer(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride):
super(ConvLayer, self).__init__()
reflection_padding = kernel_size // 2
self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding)
self.conv2d = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride)
def forward(self, x):
out = self.reflection_pad(x)
out = self.conv2d(out)
return out
class ResidualBlock(torch.nn.Module):
"""ResidualBlock
introduced in: https://arxiv.org/abs/1512.03385
recommended architecture: http://torch.ch/blog/2016/02/04/resnets.html
"""
def __init__(self, channels):
super(ResidualBlock, self).__init__()
self.conv1 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in1 = torch.nn.InstanceNorm2d(channels, affine=True)
self.conv2 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in2 = torch.nn.InstanceNorm2d(channels, affine=True)
self.relu = torch.nn.ReLU()
def forward(self, x):
residual = x
out = self.relu(self.in1(self.conv1(x)))
out = self.in2(self.conv2(out))
out = out + residual
return out
class UpsampleConvLayer(torch.nn.Module):
"""UpsampleConvLayer
Upsamples the input and then does a convolution. This method gives better results
compared to ConvTranspose2d.
ref: http://distill.pub/2016/deconv-checkerboard/
"""
def __init__(self, in_channels, out_channels, kernel_size, stride, upsample=None):
super(UpsampleConvLayer, self).__init__()
self.upsample = upsample
if upsample:
self.upsample_layer = torch.nn.Upsample(mode='nearest', scale_factor=upsample)
reflection_padding = kernel_size // 2
self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding)
self.conv2d = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride)
def forward(self, x):
x_in = x
if self.upsample:
x_in = self.upsample_layer(x_in)
out = self.reflection_pad(x_in)
out = self.conv2d(out)
return out
def stylize(args):
device = torch.device("cuda" if args.cuda else "cpu")
with torch.no_grad():
style_model = TransformerNet()
state_dict = torch.load(os.path.join(args.model_dir, args.style+".pth"))
# remove saved deprecated running_* keys in InstanceNorm from the checkpoint
for k in list(state_dict.keys()):
if re.search(r'in\d+\.running_(mean|var)$', k):
del state_dict[k]
style_model.load_state_dict(state_dict)
style_model.to(device)
filenames = os.listdir(args.content_dir)
for filename in filenames:
print("Processing {}".format(filename))
full_path = os.path.join(args.content_dir, filename)
content_image = load_image(full_path, scale=args.content_scale)
content_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(lambda x: x.mul(255))
])
content_image = content_transform(content_image)
content_image = content_image.unsqueeze(0).to(device)
output = style_model(content_image).cpu()
output_path = os.path.join(args.output_dir, filename)
save_image(output_path, output[0])
def main():
arg_parser = argparse.ArgumentParser(description="parser for fast-neural-style")
arg_parser.add_argument("--content-scale", type=float, default=None,
help="factor for scaling down the content image")
arg_parser.add_argument("--model-dir", type=str, required=True,
help="saved model to be used for stylizing the image.")
arg_parser.add_argument("--cuda", type=int, required=True,
help="set it to 1 for running on GPU, 0 for CPU")
arg_parser.add_argument("--style", type=str,
help="style name")
arg_parser.add_argument("--content-dir", type=str, required=True,
help="directory holding the images")
arg_parser.add_argument("--output-dir", type=str, required=True,
help="directory holding the output images")
args = arg_parser.parse_args()
if args.cuda and not torch.cuda.is_available():
print("ERROR: cuda is not available, try running on CPU")
sys.exit(1)
os.makedirs(args.output_dir, exist_ok=True)
stylize(args)
if __name__ == "__main__":
main()
|
the-stack_0_16078 | from django.shortcuts import render, get_object_or_404
from django.contrib.auth.decorators import login_required
from .forms import Createform
from django.contrib import messages
@login_required
def editpost(request, id):
obj= get_object_or_404(Post, id=id)
form = Createform(request.POST or None, instance= obj)
context= {'form': form}
if form.is_valid():
obj= form.save(commit= False)
obj.save()
messages.success(request, "You successfully updated the post")
context= {'form': form}
return render(request, 'posts/edit.html', context)
else:
context= {'form': form,
'error': 'The form was not updated successfully. Please enter in a title and content'}
return render(request,'posts/edit.html' , context)
|
the-stack_0_16079 | import numpy
import pandas
import xarray as xr
import numpy as np
from dolo.numeric.optimize.ncpsolve import ncpsolve
from dolo.numeric.optimize.newton import newton as newton_solver
from dolo.numeric.optimize.newton import SerialDifferentiableFunction
## TODO: extend for mc process
def response(model, dr, varname, T=40, impulse:float=None):
i_exo = model.symbols["exogenous"].index(varname)
if impulse is None:
try:
impulse = numpy.sqrt( model.exogenous.Σ[i_exo, i_exo] ) # works only for IID/AR1
except:
impulse = numpy.sqrt( model.exogenous.σ ) # works only for IID/AR1
e1 = numpy.zeros(len(model.symbols["exogenous"]))
e1[i_exo] = impulse
m_simul = model.exogenous.response(T, e1)
m_simul = m_simul[:,None,:]
sim = simulate(model, dr, N=1, T=T, driving_process=m_simul, stochastic=False)
irf = sim.sel(N=0)
return irf
def find_index(sim, values):
sh = sim.shape
N = sh[0]
T = sh[1]
indices = np.zeros((N,T), dtype=int)
for n in range(N):
for t in range(T):
v = sim[n,t,:]
ind = np.where((values == v[None,:]).all(axis=1))[0][0]
indices[n,t] = ind
return indices
from dolo.numeric.grids import CartesianGrid, UnstructuredGrid
from dolo.algos.results import AlgoResult
def simulate(model, dr, process=None, N=1, T=40, s0=None, i0=None, m0=None,
driving_process=None, seed=42, stochastic=True):
'''
Simulate a model using the specified decision rule.
Parameters
----------
model: Model
dr: decision rule
process:
s0: ndarray
initial state where all simulations start
driving_process: ndarray
realization of exogenous driving process (drawn randomly if None)
N: int
number of simulations
T: int
horizon for the simulations
seed: int
used to initialize the random number generator. Use it to replicate
exact same results among simulations
discard: boolean (False)
if True, then all simulations containing at least one non finite value
are discarded
Returns
-------
xarray.DataArray:
returns a ``T x N x n_v`` array where ``n_v``
is the number of variables.
'''
if isinstance(dr, AlgoResult):
dr = dr.dr
calib = model.calibration
parms = numpy.array(calib['parameters'])
if s0 is None:
s0 = calib['states']
n_x = len(model.symbols["controls"])
n_s = len(model.symbols["states"])
s_simul = numpy.zeros((T, N, n_s))
x_simul = numpy.zeros((T, N, n_x))
s_simul[0, :, :] = s0[None, :]
# are we simulating a markov chain or a continuous process ?
if driving_process is not None:
if len(driving_process.shape)==3:
m_simul = driving_process
sim_type = 'continuous'
if m0 is None:
m0 = model.calibration["exogenous"]
x_simul[0,:,:] = dr.eval_ms(m0[None,:], s0[None,:])[0,:]
elif len(driving_process.shape)==2:
i_simul = driving_process
nodes = dr.exo_grid.nodes
m_simul = nodes[i_simul]
# inds = i_simul.ravel()
# m_simul = np.reshape( np.concatenate( [nodes[i,:][None,:] for i in inds.ravel()], axis=0 ), inds.shape + (-1,) )
sim_type = 'discrete'
x_simul[0,:,:] = dr.eval_is(i0, s0[None,:])[0,:]
else:
raise Exception("Incorrect specification of driving values.")
m0 = m_simul[0,:,:]
else:
from dolo.numeric.processes import ContinuousProcess
if process is None:
if hasattr(dr,'dprocess') and hasattr(dr.dprocess, 'simulate'):
process = dr.dprocess
else:
process = model.exogenous
# detect type of simulation
if isinstance(process, ContinuousProcess):
sim_type = 'continuous'
else:
sim_type = 'discrete'
if sim_type =='discrete':
if i0 is None:
i0 = 0
dp = process
m_simul = dp.simulate(N, T, i0=i0, stochastic=stochastic)
i_simul = find_index(m_simul, dp.values)
m0 = dp.node(i0)
x0 = dr.eval_is(i0, s0[None,:])[0,:]
else:
m_simul = process.simulate(N, T, m0=m0, stochastic=stochastic)
sim_type = 'continuous'
if m0 is None:
m0 = model.calibration["exogenous"]
x0 = dr.eval_ms(m0[None,:], s0[None,:])[0,:]
x_simul[0, :, :] = x0[None, :]
f = model.functions['arbitrage']
g = model.functions['transition']
numpy.random.seed(seed)
mp = m0
for i in range(T):
m = m_simul[i,:,:]
s = s_simul[i,:,:]
if sim_type=='discrete':
i_m = i_simul[i,:]
xx = [dr.eval_is(i_m[ii], s[ii,:][None,:])[0,:] for ii in range(s.shape[0])]
x = np.row_stack(xx)
else:
x = dr.eval_ms(m, s)
x_simul[i,:,:] = x
ss = g(mp, s, x, m, parms)
if i < T-1:
s_simul[i + 1, :, :] = ss
mp = m
if 'auxiliary' not in model.functions: # TODO: find a better test than this
l = [s_simul, x_simul]
varnames = model.symbols['states'] + model.symbols['controls']
else:
aux = model.functions['auxiliary']
a_simul = aux(
m_simul.reshape((N * T, -1)),
s_simul.reshape((N * T, -1)),
x_simul.reshape((N * T, -1)), parms)
a_simul = a_simul.reshape(T, N, -1)
l = [m_simul, s_simul, x_simul, a_simul]
varnames = model.symbols['exogenous'] + model.symbols['states'] + model.symbols[
'controls'] + model.symbols['auxiliaries']
simul = numpy.concatenate(l, axis=2)
if sim_type=='discrete':
varnames = ['_i_m'] + varnames
simul = np.concatenate([i_simul[:,:,None], simul], axis=2)
data = xr.DataArray(
simul,
dims=['T','N','V'],
coords={'T': range(T), 'N': range(N), 'V': varnames}
)
return data
def tabulate(model, dr, state, bounds=None, n_steps=100, s0=None, i0=None, m0=None, **kwargs):
import numpy
if isinstance(dr, AlgoResult):
dr = dr.dr
states_names = model.symbols['states']
controls_names = model.symbols['controls']
index = states_names.index(str(state))
if bounds is None:
try:
endo_grid = dr.endo_grid
bounds = [endo_grid.min[index], endo_grid.max[index]]
except:
domain = model.domain
bounds = [domain.min[index], domain.max[index]]
if bounds is None:
raise Exception("No bounds provided for simulation or by model.")
values = numpy.linspace(bounds[0], bounds[1], n_steps)
if s0 is None:
s0 = model.calibration['states']
svec = numpy.row_stack([s0]*n_steps)
svec[:,index] = values
try:
dp = dr.dprocess
except:
dp = model.exogenous.discretize()
if (i0 is None) and (m0 is None):
from dolo.numeric.grids import UnstructuredGrid
if isinstance(dp.grid, UnstructuredGrid):
n_ms = dp.n_nodes
[q,r] = divmod(n_ms,2)
i0 = q-1+r
else:
m0 = model.calibration["exogenous"]
if i0 is not None:
m = dp.node(i0)
xvec = dr.eval_is(i0,svec)
elif m0 is not None:
m = m0
xvec = dr.eval_ms(m0,svec)
mm = numpy.row_stack([m]*n_steps)
l = [mm, svec, xvec]
series = model.symbols['exogenous'] + model.symbols['states'] + model.symbols['controls']
if 'auxiliary' in model.functions:
p = model.calibration['parameters']
pp = numpy.row_stack([p]*n_steps)
avec = model.functions['auxiliary'](mm, svec,xvec,pp)
l.append(avec)
series.extend(model.symbols['auxiliaries'])
import pandas
tb = numpy.concatenate(l, axis=1)
df = pandas.DataFrame(tb, columns=series)
return df
def tabulate_2d(model, dr, states=None, i0=0, s0=None, n=[12,13]):
import numpy
import xarray as xr
if isinstance(dr, AlgoResult):
dr = dr.dr
if s0 is None:
s0 = model.calibration["states"]
if states is None:
states = model.symbols["states"]
assert(len(states)==2)
domain = model.get_domain()
lps = [numpy.linspace(*domain[s], n[i]) for i,s in enumerate(states)]
i_x = model.symbols["states"].index(states[0])
i_y = model.symbols["states"].index(states[1])
vals = []
vstates = []
s = s0.copy()
for xx in lps[0]:
vv = []
s[i_x] = xx
for yy in lps[1]:
s[i_y] = yy
x = dr.eval_is(i0, s)
vv.append(numpy.concatenate([s,x]))
vals.append(vv)
vv = numpy.array(vals)
controls = model.symbols["states"] + model.symbols["controls"]
# tab = xr.DataArray(vv, dims=[states[0], states[1], 'V'], coords=[lps[0], lps[1], 'V'])
tab = xr.DataArray(vv, dims=[states[0], states[1], 'V'], coords={states[0]:lps[0], states[1]:lps[1], 'V':controls})
return tab
def plot3d(tab, varname):
X = numpy.array( tab[tab.dims[0]] )
Y = numpy.array( tab[tab.dims[1]] )
Z = numpy.array( tab.loc[:,:,varname] )
data = [
go.Surface(
x=X,
y=Y,
z=Z
)
]
layout = go.Layout(
title='Equity',
autosize=False,
width=500,
height=500,
# xaxis=go.XAxis(title=tab.dims[0]),
# yaxis={'title':tab.dims[1]},
# zaxis={'title':varname},
xaxis=dict(
title='x Axis',
nticks=7,
titlefont=dict(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
),
margin=dict(
l=65,
r=50,
b=65,
t=90
)
)
fig = go.Figure(data=data, layout=layout)
return iplot(fig, filename='graph_'+varname)
def plot_decision_rule(plot_controls=None,**kwargs):
if isinstance(dr, AlgoResult):
dr = dr.dr
df = tabulate(dr, state, bounds=None, n_steps=100, s0=None, i0=None, m0=None)
from matplotlib import pyplot
if isinstance(plot_controls, str):
cn = plot_controls
pyplot.plot(values, df[cn], **kwargs)
else:
for cn in plot_controls:
pyplot.plot(values, df[cn], label=cn, **kwargs)
pyplot.legend()
pyplot.xlabel('state = {} | mstate = {}'.format(state, i0))
|
the-stack_0_16081 | import tensorflow as tf
import numpy as np
import cv2
# from .base_model import BaseModel
# from .utils import box_nms
def classical_detector_descriptor(im, **config):
im = np.uint8(im)
if config['method'] == 'sift':
sift = cv2.xfeatures2d.SIFT_create(nfeatures=1500)
keypoints, desc = sift.detectAndCompute(im, None)
responses = np.array([k.response for k in keypoints])
keypoints = np.array([k.pt for k in keypoints]).astype(int)
desc = np.array(desc)
detections = np.zeros(im.shape[:2], np.float)
detections[keypoints[:, 1], keypoints[:, 0]] = responses
descriptors = np.zeros((im.shape[0], im.shape[1], 128), np.float)
descriptors[keypoints[:, 1], keypoints[:, 0]] = desc
elif config['method'] == 'orb':
orb = cv2.ORB_create(nfeatures=1500)
keypoints, desc = orb.detectAndCompute(im, None)
responses = np.array([k.response for k in keypoints])
keypoints = np.array([k.pt for k in keypoints]).astype(int)
desc = np.array(desc)
detections = np.zeros(im.shape[:2], np.float)
detections[keypoints[:, 1], keypoints[:, 0]] = responses
descriptors = np.zeros((im.shape[0], im.shape[1], 32), np.float)
descriptors[keypoints[:, 1], keypoints[:, 0]] = desc
detections = detections.astype(np.float32)
descriptors = descriptors.astype(np.float32)
return (detections, descriptors)
# from models.classical_detector_descriptors import SIFT_det
def SIFT_det(img, img_rgb, visualize=False, nfeatures=2000):
"""
return:
x_all: np [N, 2] (x, y)
des: np [N, 128] (descriptors)
"""
# Initiate SIFT detector
# pip install opencv-python==3.4.2.16, opencv-contrib-python==3.4.2.16
# https://www.pyimagesearch.com/2015/07/16/where-did-sift-and-surf-go-in-opencv-3/
img = np.uint8(img)
# print("img: ", img)
sift = cv2.xfeatures2d.SIFT_create(contrastThreshold=1e-5)
# find the keypoints and descriptors with SIFT
kp, des = sift.detectAndCompute(img, None)
# print("# kps: {}, descriptors: {}".format(len(kp), des.shape))
x_all = np.array([p.pt for p in kp])
if visualize:
plt.figure(figsize=(30, 4))
plt.imshow(img_rgb)
plt.scatter(x_all[:, 0], x_all[:, 1], s=10, marker='o', c='y')
plt.show()
# return x_all, kp, des
return x_all, des
'''
class ClassicalDetectorsDescriptors(BaseModel):
input_spec = {
'image': {'shape': [None, None, None, 1], 'type': tf.float32}
}
default_config = {
'method': 'sift', # 'orb'
'threshold': 0.5,
'nms': 4,
'top_k': 300,
}
trainable = False
def _model(self, inputs, mode, **config):
im = inputs['image']
with tf.device('/cpu:0'):
keypoints, descriptors = tf.map_fn(lambda i: tf.py_func(
lambda x: classical_detector_descriptor(x, **config),
[i],
(tf.float32, tf.float32)),
im, [tf.float32, tf.float32])
prob = keypoints
prob_nms = prob
if config['nms']:
prob_nms = tf.map_fn(lambda p: box_nms(p, config['nms'], min_prob=0.,
keep_top_k=config['top_k']), prob)
pred = tf.cast(tf.greater_equal(prob_nms, config['threshold']), tf.int32)
keypoints = {'prob': prob, 'prob_nms': prob_nms, 'pred': pred}
return {**keypoints, 'descriptors': descriptors}
def _loss(self, outputs, inputs, **config):
raise NotImplementedError
def _metrics(self, outputs, inputs, **config):
pred = outputs['pred']
labels = inputs['keypoint_map']
precision = tf.reduce_sum(pred*labels) / tf.reduce_sum(pred)
recall = tf.reduce_sum(pred*labels) / tf.reduce_sum(labels)
return {'precision': precision, 'recall': recall}
''' |
the-stack_0_16082 | # Copyright 2021 solo-learn development team.
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies
# or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import argparse
from typing import Any, Dict, List, Sequence
import torch
import torch.nn as nn
from solo.losses.vicreg import vicreg_loss_func
from solo.methods.base import BaseMethod
from solo.utils.misc import gather, get_rank
import torch.nn.functional as F
from solo.losses.oursloss import ours_loss_func
from solo.utils.metrics import corrcoef, pearsonr_cor
class VICReg(BaseMethod):
def __init__(
self,
proj_output_dim: int,
proj_hidden_dim: int,
sim_loss_weight: float,
var_loss_weight: float,
cov_loss_weight: float,
lam: float,
tau_decor: float,
our_loss: str,
**kwargs
):
"""Implements VICReg (https://arxiv.org/abs/2105.04906)
Args:
proj_output_dim (int): number of dimensions of the projected features.
proj_hidden_dim (int): number of neurons in the hidden layers of the projector.
sim_loss_weight (float): weight of the invariance term.
var_loss_weight (float): weight of the variance term.
cov_loss_weight (float): weight of the covariance term.
"""
super().__init__(**kwargs)
self.lam = lam
self.tau_decor = tau_decor
self.our_loss = our_loss
self.sim_loss_weight = sim_loss_weight
self.var_loss_weight = var_loss_weight
self.cov_loss_weight = cov_loss_weight
# projector
self.projector = nn.Sequential(
nn.Linear(self.features_dim, proj_hidden_dim),
nn.BatchNorm1d(proj_hidden_dim),
nn.ReLU(),
nn.Linear(proj_hidden_dim, proj_hidden_dim),
nn.BatchNorm1d(proj_hidden_dim),
nn.ReLU(),
nn.Linear(proj_hidden_dim, proj_output_dim),
)
@staticmethod
def add_model_specific_args(parent_parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
parent_parser = super(VICReg, VICReg).add_model_specific_args(parent_parser)
parser = parent_parser.add_argument_group("vicreg")
# projector
parser.add_argument("--proj_output_dim", type=int, default=2048)
parser.add_argument("--proj_hidden_dim", type=int, default=2048)
# parameters
parser.add_argument("--sim_loss_weight", default=25, type=float)
parser.add_argument("--var_loss_weight", default=25, type=float)
parser.add_argument("--cov_loss_weight", default=1.0, type=float)
# our loss
parser.add_argument("--lam", type=float, default=0.1)
parser.add_argument("--tau_decor", type=float, default=0.1)
parser.add_argument("--our_loss", type=str, default='False')
return parent_parser
@property
def learnable_params(self) -> List[dict]:
"""Adds projector parameters to the parent's learnable parameters.
Returns:
List[dict]: list of learnable parameters.
"""
extra_learnable_params = [{"params": self.projector.parameters()}]
return super().learnable_params + extra_learnable_params
def forward(self, X: torch.Tensor, *args, **kwargs) -> Dict[str, Any]:
"""Performs the forward pass of the backbone and the projector.
Args:
X (torch.Tensor): a batch of images in the tensor format.
Returns:
Dict[str, Any]: a dict containing the outputs of the parent and the projected features.
"""
out = super().forward(X, *args, **kwargs)
z = self.projector(out["feats"])
return {**out, "z": z}
def training_step(self, batch: Sequence[Any], batch_idx: int) -> torch.Tensor:
"""Training step for VICReg reusing BaseMethod training step.
Args:
batch (Sequence[Any]): a batch of data in the format of [img_indexes, [X], Y], where
[X] is a list of size num_crops containing batches of images.
batch_idx (int): index of the batch.
Returns:
torch.Tensor: total loss composed of VICReg loss and classification loss.
"""
out = super().training_step(batch, batch_idx)
class_loss = out["loss"]
feats1, feats2 = out["feats"]
z1 = self.projector(feats1)
z2 = self.projector(feats2)
# ------- vicreg loss -------
total_loss = vicreg_loss_func(
z1,
z2,
sim_loss_weight=self.sim_loss_weight,
var_loss_weight=self.var_loss_weight,
cov_loss_weight=self.cov_loss_weight,
)
self.log("train_vicreg_loss", total_loss, on_epoch=True, sync_dist=True)
with torch.no_grad():
z_std = F.normalize(torch.stack((z1,z2)), dim=-1).std(dim=1).mean()
corr_z = (torch.abs(corrcoef(z1, z2).triu(1)) + torch.abs(corrcoef(z1, z2).tril(-1))).mean()
pear_z = pearsonr_cor(z1, z2).mean()
corr_feats = (torch.abs(corrcoef(feats1, feats2).triu(1)) + torch.abs(corrcoef(feats1, feats2).tril(-1)) ).mean()
pear_feats = pearsonr_cor(feats1, feats2).mean()
### new metrics
metrics = {
"Logits/avg_sum_logits_Z": (torch.stack((z1,z2))).sum(-1).mean(),
"Logits/avg_sum_logits_Z_normalized": F.normalize(torch.stack((z1,z2)), dim=-1).sum(-1).mean(),
"Logits/logits_Z_max": (torch.stack((z1,z2))).max(),
"Logits/logits_Z_min": (torch.stack((z1,z2))).min(),
"Logits/var_Z": (torch.stack((z1,z2))).var(-1).mean(),
"Logits/logits_Z_normalized_max": F.normalize(torch.stack((z1,z2)), dim=-1).max(),
"Logits/logits_Z_normalized_min": F.normalize(torch.stack((z1,z2)), dim=-1).min(),
"MeanVector/mean_vector_Z_max": (torch.stack((z1,z2))).mean(1).max(),
"MeanVector/mean_vector_Z_min": (torch.stack((z1,z2))).mean(1).min(),
"MeanVector/mean_vector_Z_normalized_max": F.normalize(torch.stack((z1,z2)), dim=-1).mean(1).max(),
"MeanVector/mean_vector_Z_normalized_min": F.normalize(torch.stack((z1,z2)), dim=-1).mean(1).min(),
"MeanVector/norm_vector_Z": (torch.stack((z1,z2))).mean(1).mean(0).norm(),
"MeanVector/norm_vector_Z_normalized": F.normalize(torch.stack((z1,z2)), dim=-1).mean(1).mean(0).norm(),
"Backbone/var": (torch.stack((feats1,feats2))).var(-1).mean(),
"Backbone/max": (torch.stack((feats1,feats2))).max(),
"train_z_std": z_std,
"Corr/corr_z": corr_z,
"Corr/pear_z": pear_z,
"Corr/corr_feats": corr_feats,
"Corr/pear_feats": pear_feats,
}
self.log_dict(metrics, on_epoch=True, sync_dist=True)
### new metrics
return total_loss + class_loss
|
the-stack_0_16086 | #!/usr/bin/python3
from simulation import *
from integrators import *
import utils
import schemas
from pyspark.sql.session import SparkSession
import os
"""arguments"""
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("dt", help="delta t for calculating steps",
type=float)
parser.add_argument("target", help="target time to reach in the simulation",
type=float)
parser.add_argument("integrator", help="integrator to use for running the simulation",
choices=['eul1', 'eul2', 'rk4', 'vlf'])
parser.add_argument("input", help="path(s) to input data")
parser.add_argument("--dtout", help="time interval between cluster snapshots",
default=None, type=float)
parser.add_argument("--dtdiag", help="time interval between cdiagnosting output",
default=None, type=float)
parser.add_argument("--saveDiag", help="should diagnostic be saved to disk instead of printed",
nargs="?", const=True, default=False, type=bool)
parser.add_argument("--addT", help="should t be added to cluster snapshots",
nargs="?", const=True, default=False, type=bool)
parser.add_argument("-l", "--limit", help="limit the number of input rows to read",
nargs="?", const=1000, type=int)
parser.add_argument("-o", "--outputDir", help="output path",
default="../output/")
parser.add_argument("-f", help="format to save output in",
choices=['parquet', 'csv'], default="parquet")
parser.add_argument("--comp", help="format to save output in",
type=str, default="none")
parser.add_argument("-G", help="gravitational constant for the simulation",
default=1, type=float)
args = parser.parse_args()
"""/arguments"""
"""adjust spark settings"""
spark = SparkSession.builder.getOrCreate()
spark.conf.set("spark.sql.caseSensitive", "true")
"""load data"""
df_t0 = utils.load_df(args.input,
schema=schemas.clust, part="id", limit=args.limit)
"""setup simulation"""
methods = {
"eul1": IntergratorEuler(args.dt, args.G),
"eul2": IntergratorEuler2(args.dt, args.G),
"rk4": IntegratorRungeKutta4(args.dt, args.G),
"vlf": IntegratorLeapfrog(args.dt, args.G),
}
nameStr = utils.clean_str(spark.conf.get("spark.app.name")) + "-" + spark.conf.get("spark.app.id")
sopts = utils.SaveOptions(os.path.join(args.outputDir, nameStr), fformat=args.f,
compression=args.comp, header="true")
sim = Simulation(df_t0, methods[args.integrator], args.target, sopts,
add_t_snap=args.addT, dt_out=args.dtout, dt_diag=args.dtdiag, saveDiag=args.saveDiag)
"""run"""
sim.run()
|
the-stack_0_16088 | #!/usr/bin/env python
#
# run phmmer against comma separated list of Uniprot IDs.
# produce csv of pairwise match alignment.
#
#
#
#
#
import argparse
import os
import sys
import logging
import traceback
import pandas as pd
gitpath=os.path.expanduser("~/git/cshlwork")
sys.path.append(gitpath)
from protlib import uniprot
from protlib import phmmer
def indexbypacc(lod):
logging.debug(f"indexing uniprot list of dicts len: {len(lod)}")
upbypacc = {}
for p in lod:
pacc = p['proteinacc']
#if pacc == "A0A0J9YTW6":
# logging.debug("Indexing later missing pacc! A0A0R4J0X7")
seq = p['sequence']
upbypacc[pacc] = p
logging.debug(f"produced indexed dict len: {len(upbypacc)}")
return upbypacc
def parse_pairfile(filename):
f = open(filename)
lines = f.readlines()
dupelist = []
lnum = 0
knum = 0
for line in lines:
(p1, p2) = line.split(',')
p1 = p1.strip()
p2 = p2.strip()
if p2 != "NA":
dupelist.append( (p1, p2) )
else:
knum += 1
#logging.debug("skipping NA target. ")
lnum += 1
logging.debug(f" processed {lnum} lines. skipped {knum} NAs. produced {len(dupelist)} items in dupelist[0] = {dupelist[0]}")
#logging.debug(f"dupelist: {dupelist}")
return dupelist
def add_altcodes(upbypacc, infile):
'''
upbypacc { <pacc> : { 'proteinacc' : <pacc>,
'sequence' : <seq> }
,
,
,
}
altcodes:
cat <uniprot>.dat | grep "^AC" > <altcodes>.txt
AC Q9CQV8; O70455; Q3TY33; Q3UAN6;
AC P35213;
AC P62259; P29360; P42655; Q63631;
'''
logging.debug(f"len upbypacc before: {len(upbypacc)}")
nadded = 0
nmissing = 0
try:
f = open(infile)
lines = f.readlines()
for line in lines:
# remove leading AC
fields = line.split()[1:]
#logging.debug(f"fields: {fields}")
if len(fields) > 1:
#logging.debug("more than one field.")
ecode = fields[0].replace(';','')
try:
entry = upbypacc[ecode]
for alt in fields[1:]:
alt = alt.replace(';','')
upbypacc[alt] = entry
#logging.debug(f"added alt {alt} for entry code {ecode}")
nadded += 1
except KeyError:
#logging.warn(f"entry {ecode} not found in upbypacc.")
nmissing += 1
except IOError:
logging.error(f"could not read file {infile}")
traceback.print_exc(file=sys.stdout)
finally:
f.close()
logging.debug(f"len ubypacc after: {len(upbypacc)} {nadded} alts added. {nmissing} missing.")
def parse_filebase(filepath):
'''
gives back filepath minus the last dot extension, or the
same filepath if there is not extension.
'''
return os.path.splitext(filepath)[0]
def run_phmmer(pairlist, uniprot_fasta, uniprot_altcodes, pairtfa, targettfa):
config = get_default_config()
up = parse_uniprot_fasta(uniprot_fasta)
logging.debug(f"up len: {len(up)}")
upbypacc = indexbypacc(up)
add_altcodes(upbypacc, uniprot_altcodes)
logging.debug(f"upbypacc len: {len(upbypacc)}")
write_sequences( pairlist, upbypacc, pairtfa, targettfa )
outfile, exclude_list, cidgidmap = execute_phmmer(config, pairtfa, version='current')
logging.info(f"wrote phmmer output to {outfile}")
df = get_phmmer_df(config, pairtfa)
logging.debug(f"df: {df}")
return df
def get_match(query, target, df):
logging.debug(f"query={query} target={target}")
qdf = df[df['query'] == query]
row = qdf[qdf['target'] == target]
if len(row) > 1 :
logging.warning(f'multiple matches for query={query} target={target} ')
return None
elif len(row) == 1:
r = row.iloc[0]
eval = r['eval']
score =r['score']
bias = r['bias']
return (eval, score, bias)
else:
logging.warning(f'no matches for query={query} target={target} ')
return None
def make_evaltable(pdf, pairlist, evalfile ):
#config = get_default_config()
#pdf = pd.read_csv(phmmerdf, index_col=0)
pdf.drop_duplicates(inplace=True,ignore_index=True)
#dupelist = parse_dupepairs()
lod = []
for tup in pairlist:
(p1, p2) = tup
logging.debug(f"looking for {p1} -> {p2}")
rv = get_match(p1, p2, pdf)
if rv is not None:
(eval, score, bias ) = rv
lod.append( { 'query' : p1,
'target' : p2,
'eval' : eval,
'score' : score,
'bias' : bias,
}
)
logging.debug(f"dupelist length: {len(pairlist)}")
logging.debug(f"matchlist length: {len(lod)}")
edf = pd.DataFrame(lod)
edf.to_csv(evalfile)
logging.debug(f"wrote match df to {evalfile}")
def split_pairlist(pairlist):
qlist = []
tlist = []
for (q, t) in pairlist:
qlist.append(q)
tlist.append(t)
return (qlist, tlist)
if __name__=='__main__':
FORMAT='%(asctime)s (UTC) [ %(levelname)s ] %(filename)s:%(lineno)d %(name)s.%(funcName)s(): %(message)s'
logging.basicConfig(format=FORMAT)
logging.getLogger().setLevel(logging.WARNING)
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--debug',
action="store_true",
dest='debug',
help='debug logging')
parser.add_argument('-v', '--verbose',
action="store_true",
dest='verbose',
help='verbose logging')
parser.add_argument('pairfile',
metavar='pairfile',
type=str,
help='')
parser.add_argument('uniprotdat',
metavar='uniprotdat',
default=os.path.expanduser('~/data/uniprot/uniprot_all_vertebrates.dat'),
nargs="?",
type=str,
help='A uniprot .dat database with sequences for all queries.')
# alt codes now handled natively by uniprot.py
# any tfa files created will use whatever accession is in list.
#parser.add_argument('uniprotalt',
# metavar='uniprotalt',
# default=os.path.expanduser("~/project/hamsini2/uniprot_all_rodents_altcodes.txt"),
# type=str,
# help='')
args= parser.parse_args()
if args.debug:
logging.getLogger().setLevel(logging.DEBUG)
if args.verbose:
logging.getLogger().setLevel(logging.INFO)
fbase = parse_filebase(args.pairfile)
querytfa = f"{fbase}_query.tfa"
targettfa = f"{fbase}_targets.tfa"
phdf = f"{fbase}_phdf.csv"
evalfile = f"{fbase}_scores.csv"
logging.debug(f"fbase={fbase} querytfa={querytfa} targettffa={targettfa} phdf={phdf}")
logging.debug(f"uniprotdat={args.uniprotdat}")
pairlist = parse_pairfile(args.pairfile)
(querylist, targetlist) = split_pairlist(pairlist)
logging.debug(f"qlist[:2] = {querylist[:2]} tlist[:2] = {targetlist[:2]} ")
logging.info(f"Getting uniprot from {args.uniprotdat}...")
uc = uniprot.get_default_config()
upbypacc = uniprot.parse_uniprot_dat(uc, args.uniprotdat)
logging.info(f"Creating tfa files: query={querytfa} db={targettfa}")
uniprot.write_tfa_fromlist(querylist, upbypacc, querytfa)
uniprot.write_tfa_fromlist(targetlist, upbypacc, targettfa)
pc = phmmer.get_default_config()
logging.info(f"Running phmmer query={querytfa} db={targettfa}")
pdf = phmmer.get_phmmer_df(pc, querytfa, targettfa)
pdf.to_csv(phdf)
logging.debug(f"Wrote phmmer DF to {phdf}")
make_evaltable(pdf, pairlist, evalfile )
|
the-stack_0_16090 | from copy import copy
from mysql.connector import MySQLConnection, Error
from python_mysql_dbconfig import read_db_config
import sys
import csv
import boto3
import json
import socket
def query_with_fetchone(query2run,secret,region):
try:
# Grab MySQL connection and database settings. We areusing AWS Secrets Manager
# but you could use another service like Hashicorp Vault
# We cannot use Apache Airflow to store these as this script runs stand alone
secret_name = secret
region_name = region
session = boto3.session.Session()
client = session.client(
service_name='secretsmanager',
region_name=region_name
)
get_secret_value_response = client.get_secret_value(SecretId=secret_name)
info=json.loads(get_secret_value_response['SecretString'])
pw=info['password']
un=info['username']
hs=info['host']
db=info['database']
# Output to the log so we can see and confirm WHERE we are running and WHAT
# we are connecting to
print("Connecting to ",str(hs)," database ", str(db), " as user ", str(un))
print("Database host IP is :", socket.gethostbyname(hs))
print("Source IP is ", socket.gethostname())
conn = MySQLConnection(user=un, password=pw, host=hs, database=db)
cursor = conn.cursor()
query=query2run
print("Query is", str(query))
cursor.execute(query)
records = cursor.fetchall()
c = csv.writer(open("temp.csv","w"))
c.writerows(records)
print("Records exported:")
for row in records:
print(row[0],",",row[1],",",row[2],",",row[3],",",row[4],",",row[5], ",",row[6],",",row[7] )
except Error as e:
print(e)
sys.exit(1)
finally:
cursor.close()
conn.close()
def upload_to_s3(s3bucket,s3folder,region):
# We will upload the temp (temp.csv) file and copy it based on the input params of the script (bucket and dir/file)
try:
s3 = boto3.client('s3', region_name=region)
s3.upload_file('temp.csv',s3bucket,s3folder)
except FileNotFoundError:
print("The file was not found")
return False
except Error as e:
print(e)
sys.exit(1)
if __name__ == '__main__':
try:
arg = sys.argv[2]
except IndexError:
raise SystemExit(f"Usage: {sys.argv[0]} <s3 bucket><s3 file><query><secret><region>")
# The script needs the following arguments to run
# 1. Target S3 bucket where the output of the SQL script will be copied
# 2. Target S3 folder/filename
# 3. The query to execute
# 4. The parameter store (we use AWS Secrets) which holds the values on where to find the MySQL database
# 5. The AWS region
s3bucket=sys.argv[1]
s3folder=sys.argv[2]
query2run=sys.argv[3]
secret=sys.argv[4]
region=sys.argv[5]
query_with_fetchone(query2run,secret,region)
upload_to_s3(s3bucket,s3folder,region)
# demo command to test this from the cli
# for Cloud based MySQL
# python app/read-data-q.py ricsue-airflow-hybrid period1/temp.csv "select * from customers WHERE location = 'Poland' AND (date BETWEEN '2022-01-01 14:15:55' AND '2022-09-29 10:15:55')" rds-airflow-hybrid eu-west-2
# for local/remote based MySQL
# python app/read-data-q.py ricsue-airflow-hybrid period1/temp2.csv "select * from customers WHERE location = 'China' AND (date BETWEEN '2022-01-01 14:15:55' AND '2022-09-29 10:15:55')" localmysql-airflow-hybrid eu-west-2
# other queries you can try, for example
# "select * from customers WHERE location = '{country}' AND (date BETWEEN '{start}' AND '{end}')".format(country=country,start=start,end=end)
|
the-stack_0_16093 | import time
from typing import Optional, Union, List, Dict, Tuple
import uuid
import aiohttp
from blob import Context
from config import Config
from helpers import userHelper
from lib import logger
from objects.constants import Privileges, Countries
from objects.constants.BanchoRanks import BanchoRanks
from objects.constants.GameModes import GameModes
from objects.constants.IdleStatuses import Action
from objects.constants.KurikkuPrivileges import KurikkuPrivileges
from objects.constants.Modificators import Mods
from objects.constants.PresenceFilter import PresenceFilter
from packets.Builder.index import PacketBuilder
from objects.Channel import Channel
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from objects.TypedDicts import TypedStats
from objects.BanchoObjects import Message
from objects.Multiplayer import Match
# I wan't use construction in python like <class>.__dict__.update
# but i forgot if class has __slots__ __dict__ is unavailable, sadly ;-;
class StatsMode:
__slots__ = ("game_mode", "total_score", "ranked_score", "pp",
"accuracy", "total_plays", "playtime", "max_combo", "leaderboard_rank")
def __init__(self):
self.total_score: int = 0
self.ranked_score: int = 0
self.pp: int = 0
self.accuracy: float = 0.00
self.total_plays: int = 0
self.playtime: int = 0
self.leaderboard_rank: int = 0
def update(self, **kwargs: 'TypedStats'):
self.total_score = kwargs.get('total_score', 0)
self.ranked_score = kwargs.get('ranked_score', 0)
self.pp = kwargs.get('pp', 0)
self.accuracy = kwargs.get('accuracy', 0)
self.total_plays = kwargs.get('total_plays', 0)
self.playtime = kwargs.get('playtime', 0)
self.leaderboard_rank = kwargs.get('leaderboard_rank', 0)
class Status:
__slots__ = (
'action', 'action_text', 'map_md5',
'mods', 'mode', 'map_id'
)
def __init__(self):
self.action: Action = Action.Idle
self.action_text: str = ''
self.map_md5: str = ''
self.mode: GameModes = GameModes.STD
self.mods: Mods = Mods.NoMod
self.map_id: int = 0
def update(self, **kwargs):
self.action = Action(kwargs.get('action', 0))
self.action_text = kwargs.get('action_text', '')
self.map_md5 = kwargs.get('map_md5', '')
self.mode = GameModes(kwargs.get('mode', 0))
self.mods = Mods(kwargs.get('mods', 0))
self.map_id = kwargs.get('map_id', 0)
class Player:
def __init__(self, user_id: Union[int], user_name: Union[str],
privileges: Union[int], utc_offset: Optional[int] = 0,
pm_private: bool = False, silence_end: int = 0, is_tourneymode: bool = False,
is_bot: bool = False, ip: str = ''):
self.token: str = self.generate_token()
self.id: int = user_id
self.name: str = user_name
self.ip: str = ip
self.privileges: int = privileges
self.selected_game_mode: GameModes = GameModes.STD
self.stats: Dict[GameModes, StatsMode] = {mode: StatsMode() for mode in GameModes} # setup dictionary with stats
self.pr_status: Status = Status()
self.spectators: List[Player] = []
self.spectating: Optional[Player] = None
self.country: Tuple[int, str] = (0, 'XX')
self.location: Tuple[float, float] = (0.0, 0.0)
self.timezone: int = 24 + utc_offset
self.timezone_offset: int = utc_offset
self.pm_private: bool = pm_private # Как я понял, это типо только друзья могут писать
self.friends: Union[List[int]] = []
self.away_msg: Optional[str] = None
self.silence_end: int = silence_end
self.presence_filter: PresenceFilter = PresenceFilter(1)
self.bot_np: Optional[dict] = None # TODO: Beatmap
self._match: Optional['Match'] = None
self.friends: Union[List[int]] = [] # bot by default xd
self.queue: bytearray = bytearray() # main thing
self.login_time: int = int(time.time())
self.last_packet_unix: int = int(time.time())
self.is_tourneymode: bool = is_tourneymode
self.id_tourney: int = -1
self.is_in_lobby: bool = False
self.is_bot: bool = is_bot
self.tillerino: List[Union[int, Mods]] = [0, Mods(0), -1.0] # 1 - map id, 2 - current_mods, 3 - acc <- legacy code
self.user_chat_log: List['Message'] = []
@property
def match(self):
return self._match
@property
def get_formatted_chatlog(self):
return "\n".join(
f"{time.strftime('%H:%M', time.localtime(message.when))} - {self.name}@{message.to}: {message.body[:50]}"
for message in self.user_chat_log
)
@property
def silenced(self) -> bool:
return self.silence_end > 0
@property
def safe_name(self) -> str:
return self.name.lower().strip().replace(" ", "_")
@property
def irc_name(self) -> str:
return self.name.replace(" ", "_")
@property
def is_restricted(self) -> bool:
# return (self.privileges & Privileges.USER_NORMAL) and not (self.privileges & Privileges.USER_PUBLIC)
return (self.privileges & KurikkuPrivileges.Normal) != KurikkuPrivileges.Normal
@property
def bancho_privs(self) -> BanchoRanks:
privs = BanchoRanks(0)
if (self.privileges & KurikkuPrivileges.Normal.value) == KurikkuPrivileges.Normal.value:
privs |= (BanchoRanks.PLAYER | BanchoRanks.SUPPORTER)
if (self.privileges & KurikkuPrivileges.Bat.value) == KurikkuPrivileges.Bat.value:
privs |= BanchoRanks.BAT
if (self.privileges & KurikkuPrivileges.ChatMod.value) == KurikkuPrivileges.ChatMod.value or \
(self.privileges & KurikkuPrivileges.ReplayModerator.value) == KurikkuPrivileges.ReplayModerator.value:
privs |= BanchoRanks.MOD
if (self.privileges & KurikkuPrivileges.CM.value) == KurikkuPrivileges.CM.value:
privs |= BanchoRanks.ADMIN
if (self.privileges & KurikkuPrivileges.Owner.value) == KurikkuPrivileges.Owner.value:
privs |= BanchoRanks.PEPPY
return privs
@property
def is_admin(self) -> bool:
if (self.privileges & KurikkuPrivileges.Developer) == KurikkuPrivileges.Developer or \
(self.privileges & KurikkuPrivileges.ChatMod) == KurikkuPrivileges.ChatMod or \
(self.privileges & KurikkuPrivileges.CM) == KurikkuPrivileges.CM:
return True
return False
@property
def current_stats(self) -> StatsMode:
return self.stats[self.selected_game_mode]
@classmethod
def generate_token(cls) -> str:
return str(uuid.uuid4())
async def parse_friends(self) -> bool:
async for friend in Context.mysql.iterall(
# why in my db, exists user2 with id = -1?
'select user2 from users_relationships where user1 = %s and user2 > 0',
[self.id]
):
self.friends.append(friend['user2'])
return True
async def parse_country(self, ip: str) -> bool:
if self.privileges & Privileges.USER_DONOR:
# we need to remember donor have locked location
donor_location: str = (await Context.mysql.fetch(
'select country from users_stats where id = %s',
[self.id]
))['country'].upper()
self.country = (Countries.get_country_id(donor_location), donor_location)
else:
if Context.geoip_db:
# You have local geoip2 database, nice!
try:
data = Context.geoip_db.city(ip)
except:
logger.elog(f"[Player/{self.name}] Can't parse location for {ip}")
return False
self.country = (Countries.get_country_id(data.country.iso_code), data.country.iso_code)
self.location = (data.location.latitude, data.location.longitude)
return True
data = None
async with aiohttp.ClientSession() as sess:
async with sess.get(Config.config['geoloc_ip'] + ip) as resp:
try:
data = await resp.json()
finally:
pass
if not data:
logger.elog(f"[Player/{self.name}] Can't parse geoloc")
return False
self.country = (Countries.get_country_id(data['country']), data['country'])
loc = data['loc'].split(",")
self.location = (float(loc[0]), float(loc[1]))
return True
async def update_stats(self, selected_mode: GameModes = None) -> bool:
for mode in GameModes if not selected_mode else [selected_mode]:
res = await Context.mysql.fetch(
'select total_score_{0} as total_score, ranked_score_{0} as ranked_score, '
'pp_{0} as pp, playcount_{0} as total_plays, avg_accuracy_{0} as accuracy, playtime_{0} as playtime '
'from users_stats where id = %s'.format(GameModes.resolve_to_str(mode)),
[self.id]
)
if not res:
logger.elog(f"[Player/{self.name}] Can't parse stats for {GameModes.resolve_to_str(mode)}")
return False
position = await Context.redis.zrevrank(
f"ripple:leaderboard:{GameModes.resolve_to_str(mode)}",
str(self.id)
)
res['leaderboard_rank'] = int(position) + 1 if position else 0
self.stats[mode].update(**res)
async def logout(self) -> None:
if not self.is_tourneymode:
await Context.redis.set("ripple:online_users", len(Context.players.get_all_tokens(True)))
if self.ip:
await userHelper.deleteBanchoSession(self.id, self.ip)
# logic
# leave multiplayer
if self.match:
await self.match.leave_player(self)
# leave specatating
if self.spectating:
await self.spectating.remove_spectator(self)
# leave channels
for (_, chan) in Context.channels.items():
if self.id in chan.users:
await chan.leave_channel(self)
if not self.is_tourneymode:
for p in Context.players.get_all_tokens():
p.enqueue(await PacketBuilder.Logout(self.id))
Context.players.delete_token(self)
return
async def kick(self, message: str = "You have been kicked from the server. Please login again.",
reason: str = "kick") -> bool:
if self.is_bot:
return False
logger.wlog(f"[Player/{self.name}] has been disconnected. {reason}")
if message:
self.enqueue(await PacketBuilder.Notification(message))
self.enqueue(await PacketBuilder.UserID(-1)) # login failed
await self.logout()
return True
# legacy code
async def silence(self, seconds: int = None, reason: str = "", author: int = 999) -> bool:
if seconds is None:
# Get silence expire from db if needed
seconds = max(0, await userHelper.getSilenceEnd(self.id) - int(time.time()))
else:
# Silence in db and token
await userHelper.silence(self.id, seconds, reason, author)
# Silence token
self.silence_end = int(time.time()) + seconds
# Send silence packet to user
self.enqueue(await PacketBuilder.SilenceEnd(seconds))
# Send silenced packet to everyone else
user_silenced = await PacketBuilder.UserSilenced(self.id)
for user in Context.players.get_all_tokens():
user.enqueue(user_silenced)
return True
async def send_message(self, message: 'Message') -> bool:
message.body = f'{message.body[:2045]}...' if message.body[2048:] else message.body
chan: str = message.to
if chan.startswith("#"):
# this is channel object
if chan.startswith("#multi"):
if self.is_tourneymode:
if self.id_tourney > 0:
chan = f"#multi_{self.id_tourney}"
else:
return False
else:
chan = f"#multi_{self.match.id}"
elif chan.startswith("#spec"):
if self.spectating:
chan = f"#spec_{self.spectating.id}"
else:
chan = f"#spec_{self.id}"
channel: 'Channel' = Context.channels.get(chan, None)
if not channel:
logger.klog(f"[{self.name}] Tried to send message in unknown channel. Ignoring it...")
return False
self.user_chat_log.append(message)
logger.klog(
f"{self.name}({self.id}) -> {channel.server_name}: {bytes(message.body, 'latin_1').decode()}"
)
await channel.send_message(self.id, message)
return True
# DM
receiver = Context.players.get_token(name=message.to.lower().strip().replace(" ", "_"))
if not receiver:
logger.klog(f"[{self.name}] Tried to offline user. Ignoring it...")
return False
if receiver.pm_private and self.id not in receiver.friends:
self.enqueue(await PacketBuilder.PMBlocked(message.to))
logger.klog(f"[{self.name}] Tried message {message.to} which has private PM.")
return False
if self.pm_private and receiver.id not in self.friends:
self.pm_private = False
logger.klog(f"[{self.name}] which has private pm sended message to non-friend user. PM unlocked")
if receiver.silenced:
self.enqueue(await PacketBuilder.TargetSilenced(message.to))
logger.klog(f'[{self.name}] Tried message {message.to}, but has been silenced.')
return False
self.user_chat_log.append(message)
logger.klog(
f"#DM {self.name}({self.id}) -> {message.to}({receiver.id}): {bytes(message.body, 'latin_1').decode()}"
)
receiver.enqueue(
await PacketBuilder.BuildMessage(self.id, message)
)
return True
async def add_spectator(self, new_spec: 'Player') -> bool:
spec_chan_name = f"#spec_{self.id}"
if not Context.channels.get(spec_chan_name):
# in this case, we need to create channel for our spectator in temp mode
spec = Channel(
server_name=spec_chan_name,
description=f"Spectator channel for {self.name}",
public_read=True,
public_write=True,
temp_channel=True
)
Context.channels[spec_chan_name] = spec
await spec.join_channel(self)
c: 'Channel' = Context.channels.get(spec_chan_name)
if not await c.join_channel(new_spec):
logger.elog(f"{self.name} failed to join in {spec_chan_name} spectator channel!")
return False
fellow_packet = await PacketBuilder.FellowSpectatorJoined(new_spec.id)
for spectator in self.spectators:
spectator.enqueue(fellow_packet)
new_spec.enqueue(await PacketBuilder.FellowSpectatorJoined(spectator.id))
self.spectators.append(new_spec)
new_spec.spectating = self
self.enqueue(await PacketBuilder.SpectatorJoined(new_spec.id))
logger.slog(f"{new_spec.name} started to spectating {self.name}!")
return True
async def remove_spectator(self, old_spec: 'Player') -> bool:
spec_chan_name = f"#spec_{self.id}"
self.spectators.remove(old_spec) # attempt to remove old player from array
old_spec.spectating = None
spec_chan: Channel = Context.channels.get(spec_chan_name)
await spec_chan.leave_channel(old_spec) # remove our spectator from channel
fellow_packet = await PacketBuilder.FellowSpectatorLeft(old_spec.id)
if not self.spectators:
await spec_chan.leave_channel(self)
else:
for spectator in self.spectators:
spectator.enqueue(fellow_packet)
self.enqueue(await PacketBuilder.SpectatorLeft(old_spec.id))
logger.slog(f"{old_spec.name} has stopped spectating for {self.name}")
return True
async def remove_hidden_spectator(self, old_spec: 'Player') -> bool:
self.spectators.remove(old_spec) # attempt to remove old player from array
old_spec.spectating = None
self.enqueue(await PacketBuilder.SpectatorLeft(old_spec.id))
logger.slog(f"{old_spec.name} has stopped hidden spectating for {self.name}")
return True
async def say_bancho_restarting(self, delay: int = 20) -> bool:
self.enqueue(
await PacketBuilder.BanchoRestarting(delay * 1000)
)
return True
def enqueue(self, b: bytes) -> None:
self.queue += b
def dequeue(self) -> Optional[bytes]:
if self.queue:
data = bytes(self.queue)
self.queue.clear()
return data
return b''
|
the-stack_0_16094 | from eth_utils import (
is_bytes,
)
from ssz.sedes import (
Serializable,
infer_sedes,
sedes_by_name,
)
from ssz.sedes.base import (
BaseSedes,
)
def encode(value, sedes=None, cache=True):
"""
Encode object in SSZ format.
`sedes` needs to be explicitly mentioned for encode/decode
of integers(as of now).
`sedes` parameter could be given as a string or as the
actual sedes object itself.
If `value` has an attribute :attr:`_cached_ssz` (as, notably,
:class:`ssz.sedes.Serializable`) and its value is not `None`, this value is
returned bypassing serialization and encoding, unless `sedes` is given (as
the cache is assumed to refer to the standard serialization which can be
replaced by specifying `sedes`).
If `value` is a :class:`ssz.sedes.Serializable` and `cache` is true, the result of
the encoding will be stored in :attr:`_cached_ssz` if it is empty.
"""
if isinstance(value, Serializable):
cached_ssz = value._cached_ssz
if sedes is None and cached_ssz is not None:
return cached_ssz
else:
really_cache = cache and sedes is None
else:
really_cache = False
if sedes is not None:
if sedes in sedes_by_name:
# Get the actual sedes object from string representation
sedes_obj = sedes_by_name[sedes]
else:
sedes_obj = sedes
if not isinstance(sedes_obj, BaseSedes):
raise TypeError("Invalid sedes object")
else:
sedes_obj = infer_sedes(value)
serialized_obj = sedes_obj.serialize(value)
if really_cache:
value._cached_ssz = serialized_obj
return serialized_obj
def decode(ssz, sedes):
"""
Decode a SSZ encoded object.
"""
if not is_bytes(ssz):
raise TypeError(f"Can only decode SSZ bytes, got type {type(ssz).__name__}")
value = sedes.deserialize(ssz)
return value
|
the-stack_0_16095 | # A OpenTraced server for a Python service that implements the store interface.
from __future__ import print_function
import time
import argparse
from collections import defaultdict
from six import iteritems
import grpc
from concurrent import futures
from jaeger_client import Config
from grpc_opentracing import open_tracing_server_interceptor, \
SpanDecorator
from grpc_opentracing.grpcext import intercept_server
import store_pb2
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
class Store(store_pb2.StoreServicer):
def __init__(self):
self._inventory = defaultdict(int)
def AddItem(self, request, context):
self._inventory[request.name] += 1
return store_pb2.Empty()
def AddItems(self, request_iter, context):
for request in request_iter:
self._inventory[request.name] += 1
return store_pb2.Empty()
def RemoveItem(self, request, context):
new_quantity = self._inventory[request.name] - 1
if new_quantity < 0:
return store_pb2.RemoveItemResponse(was_successful=False)
self._inventory[request.name] = new_quantity
return store_pb2.RemoveItemResponse(was_successful=True)
def RemoveItems(self, request_iter, context):
response = store_pb2.RemoveItemResponse(was_successful=True)
for request in request_iter:
response = self.RemoveItem(request, context)
if not response.was_successful:
break
return response
def ListInventory(self, request, context):
for name, count in iteritems(self._inventory):
if not count:
continue
else:
yield store_pb2.QuantityResponse(name=name, count=count)
def QueryQuantity(self, request, context):
count = self._inventory[request.name]
return store_pb2.QuantityResponse(name=request.name, count=count)
def QueryQuantities(self, request_iter, context):
for request in request_iter:
count = self._inventory[request.name]
yield store_pb2.QuantityResponse(name=request.name, count=count)
class StoreSpanDecorator(SpanDecorator):
def __call__(self, span, rpc_info):
span.set_tag('grpc.method', rpc_info.full_method)
span.set_tag('grpc.headers', str(rpc_info.metadata))
span.set_tag('grpc.deadline', str(rpc_info.timeout))
def serve():
parser = argparse.ArgumentParser()
parser.add_argument(
'--log_payloads',
action='store_true',
help='log request/response objects to open-tracing spans')
parser.add_argument(
'--include_grpc_tags',
action='store_true',
help='set gRPC-specific tags on spans')
args = parser.parse_args()
config = Config(
config={
'sampler': {
'type': 'const',
'param': 1,
},
'logging': True,
},
service_name='store-server')
tracer = config.initialize_tracer()
span_decorator = None
if args.include_grpc_tags:
span_decorator = StoreSpanDecorator()
tracer_interceptor = open_tracing_server_interceptor(
tracer, log_payloads=args.log_payloads, span_decorator=span_decorator)
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
server = intercept_server(server, tracer_interceptor)
store_pb2.add_StoreServicer_to_server(Store(), server)
server.add_insecure_port('[::]:50051')
server.start()
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
time.sleep(2)
tracer.close()
time.sleep(2)
if __name__ == '__main__':
serve()
|
the-stack_0_16096 | #!/usr/bin/env python
from pvaccess import Channel
from pvaccess import PvBoolean
from pvaccess import PvByte
from pvaccess import PvUByte
from pvaccess import PvShort
from pvaccess import PvUShort
from pvaccess import PvInt
from pvaccess import PvUInt
from pvaccess import PvLong
from pvaccess import PvULong
from pvaccess import PvFloat
from pvaccess import PvDouble
from pvaccess import PvString
from testUtility import TestUtility
class TestChannelPutGet:
#
# Boolean PutGet
#
def testPutGet_PvBoolean(self):
value = TestUtility.getRandomBoolean()
c = TestUtility.getBooleanChannel()
value2 = c.putGet(PvBoolean(value)).getPyObject()
assert(value == value2)
# put() must be done using strings 'true'/'false'
def testPutGet_Boolean(self):
value = TestUtility.getRandomBooleanString()
c = TestUtility.getBooleanChannel()
value2 = c.putGet(value).getPyObject()
TestUtility.assertBooleanEquality(value,value2)
def testPutGetBoolean_Boolean(self):
value = TestUtility.getRandomBoolean()
c = TestUtility.getBooleanChannel()
value2 = c.putGetBoolean(value).getPyObject()
assert(value == value2)
#
# Byte PutGet
#
# python chars are unsigned
def testPutGet_PvByte(self):
value = chr(TestUtility.getRandomUByte())
c = TestUtility.getByteChannel()
value2 = c.putGet(PvByte(value)).getPyObject()
assert(value == value2)
# put(byte) must be done using integers
# we need to compare result in python chars, which are unsigned
def testPutGet_Byte(self):
value = TestUtility.getRandomByte()
c = TestUtility.getByteChannel()
value2 = c.putGet(value).getPyObject()
TestUtility.assertCharEquality(value,value2)
def testPutGetByte_Byte(self):
value = chr(TestUtility.getRandomUByte())
c = TestUtility.getByteChannel()
value2 = c.putGetByte(value).getPyObject()
assert(value == value2)
#
# UByte PutGet
#
def testPutGet_PvUByte(self):
value = TestUtility.getRandomUByte()
c = TestUtility.getUByteChannel()
value2 = c.putGet(PvUByte(value)).getPyObject()
assert(value == value2)
def testPutGet_UByte(self):
value = TestUtility.getRandomUByte()
c = TestUtility.getUByteChannel()
value2 = c.putGet(value).getPyObject()
assert(value == value2)
def testPutGetUByte_UByte(self):
value = TestUtility.getRandomUByte()
c = TestUtility.getUByteChannel()
value2 = c.putGetUByte(value).getPyObject()
assert(value == value2)
#
# Short PutGet
#
def testPutGet_PvShort(self):
value = TestUtility.getRandomShort()
c = TestUtility.getShortChannel()
value2 = c.putGet(PvShort(value)).getPyObject()
assert(value == value2)
def testPutGet_Short(self):
value = TestUtility.getRandomShort()
c = TestUtility.getShortChannel()
value2 = c.putGet(value).getPyObject()
assert(value == value2)
def testPutGetShort_Short(self):
value = TestUtility.getRandomShort()
c = TestUtility.getShortChannel()
value2 = c.putGetShort(value).getPyObject()
assert(value == value2)
#
# UShort PutGet
#
def testPutGet_PvUShort(self):
value = TestUtility.getRandomUShort()
c = TestUtility.getUShortChannel()
value2 = c.putGet(PvUShort(value)).getPyObject()
assert(value == value2)
def testPutGet_UShort(self):
value = TestUtility.getRandomUShort()
c = TestUtility.getUShortChannel()
value2 = c.putGet(value).getPyObject()
assert(value == value2)
def testPutGetUShort_UShort(self):
value = TestUtility.getRandomUShort()
c = TestUtility.getUShortChannel()
value2 = c.putGetUShort(value).getPyObject()
assert(value == value2)
#
# Int PutGet
#
def testPutGet_PvInt(self):
value = TestUtility.getRandomInt()
c = TestUtility.getIntChannel()
value2 = c.putGet(PvInt(value)).getPyObject()
assert(value == value2)
def testPutGet_Int(self):
value = TestUtility.getRandomInt()
c = TestUtility.getIntChannel()
value2 = c.putGet(value).getPyObject()
assert(value == value2)
def testPutGetInt_Int(self):
value = TestUtility.getRandomInt()
c = TestUtility.getIntChannel()
value2 = c.putGetInt(value).getPyObject()
assert(value == value2)
#
# UInt PutGet
#
def testPutGet_PvUInt(self):
value = TestUtility.getRandomUInt()
c = TestUtility.getUIntChannel()
value2 = c.putGet(PvUInt(value)).getPyObject()
assert(value == value2)
def testPutGet_UInt(self):
value = TestUtility.getRandomUInt()
c = TestUtility.getUIntChannel()
value2 = c.putGet(value).getPyObject()
assert(value == value2)
def testPutGetUInt_UInt(self):
value = TestUtility.getRandomUInt()
c = TestUtility.getUIntChannel()
value2 = c.putGetUInt(value).getPyObject()
assert(value == value2)
#
# Long PutGet
#
def testPutGet_PvLong(self):
value = TestUtility.getRandomLong()
c = TestUtility.getLongChannel()
value2 = c.putGet(PvLong(value)).getPyObject()
assert(value == value2)
def testPutGet_Long(self):
value = TestUtility.getRandomLong()
c = TestUtility.getLongChannel()
value2 = c.putGet(value).getPyObject()
assert(value == value2)
def testPutGetLong_Long(self):
value = TestUtility.getRandomLong()
c = TestUtility.getLongChannel()
value2 = c.putGetLong(value).getPyObject()
assert(value == value2)
#
# ULong PutGet
#
def testPutGet_PvULong(self):
value = TestUtility.getRandomULong()
c = TestUtility.getULongChannel()
value2 = c.putGet(PvULong(value)).getPyObject()
assert(value == value2)
def testPutGet_ULong(self):
value = TestUtility.getRandomULong()
c = TestUtility.getULongChannel()
value2 = c.putGet(value).getPyObject()
assert(value == value2)
def testPutGetULong_ULong(self):
value = TestUtility.getRandomULong()
c = TestUtility.getULongChannel()
value2 = c.putGetULong(value).getPyObject()
assert(value == value2)
#
# Float PutGet
#
def testPutGet_PvFloat(self):
value = TestUtility.getRandomFloat()
c = TestUtility.getFloatChannel()
value2 = c.putGet(PvFloat(value)).getPyObject()
TestUtility.assertFloatEquality(value, value2)
def testPutGet_Float(self):
value = TestUtility.getRandomFloat()
c = TestUtility.getFloatChannel()
value2 = c.putGet(value).getPyObject()
TestUtility.assertFloatEquality(value, value2)
def testPutGetFloat_Float(self):
value = TestUtility.getRandomFloat()
c = TestUtility.getFloatChannel()
value2 = c.putGetFloat(value).getPyObject()
TestUtility.assertFloatEquality(value, value2)
#
# Double PutGet
#
def testPutGet_PvDouble(self):
value = TestUtility.getRandomDouble()
c = TestUtility.getDoubleChannel()
value2 = c.putGet(PvDouble(value)).getPyObject()
TestUtility.assertDoubleEquality(value, value2)
def testPutGet_Double(self):
value = TestUtility.getRandomDouble()
c = TestUtility.getDoubleChannel()
value2 = c.putGet(value).getPyObject()
TestUtility.assertDoubleEquality(value, value2)
def testPutGetDouble_Double(self):
value = TestUtility.getRandomDouble()
c = TestUtility.getDoubleChannel()
value2 = c.putGetDouble(value).getPyObject()
TestUtility.assertDoubleEquality(value, value2)
#
# String PutGet
#
def testPutGet_PvString(self):
value = TestUtility.getRandomString()
c = TestUtility.getStringChannel()
value2 = c.putGet(PvString(value)).getPyObject()
assert(value == value2)
def testPutGet_String(self):
value = TestUtility.getRandomString()
c = TestUtility.getStringChannel()
value2 = c.putGet(value).getPyObject()
assert(value == value2)
def testPutGetString_String(self):
value = TestUtility.getRandomString()
c = TestUtility.getStringChannel()
value2 = c.putGetString(value).getPyObject()
assert(value == value2)
|
the-stack_0_16097 | """Django settings for workbench project."""
import json
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
DJFS = {'type': 'osfs',
'directory_root': 'workbench/static/djpyfs',
'url_root': '/static/djpyfs'}
DEBUG = True
TEMPLATE_DEBUG = DEBUG
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'workbench', 'templates'),
os.path.join(BASE_DIR, 'sample_xblocks' ,'basic', 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
if 'WORKBENCH_DATABASES' in os.environ:
DATABASES = json.loads(os.environ['WORKBENCH_DATABASES'])
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'var/workbench.db'
}
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake'
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/New_York'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '5ftdd9(@p)tg&bqv$(^d!63psz9+g+_i5om_e%!32%po2_+%l7'
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'workbench.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'workbench.wsgi.application'
TEMPLATE_DIRS = (
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'djpyfs',
'workbench',
'django_nose',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# Only use django-debug-toolbar if it has been installed.
# Installing django-debug-toolbar before running syncdb may cause a
# DatabaseError when trying to run syncdb.
try:
import debug_toolbar # pylint: disable=unused-import, import-error
INSTALLED_APPS += ('debug_toolbar',)
except ImportError:
pass
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'logging.NullHandler',
},
'logfile': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': 'var/workbench.log',
'maxBytes': 50000,
'backupCount': 2,
}
},
'loggers': {
'django.request': {
'handlers': ['logfile'],
'level': 'DEBUG',
'propagate': True,
},
'django': {
'level': 'DEBUG',
'handlers': ['logfile'],
}
}
}
WORKBENCH = {
'reset_state_on_restart': (
os.environ.get('WORKBENCH_RESET_STATE_ON_RESTART', "false").lower() == "true"
),
'services': {
'fs': 'xblock.reference.plugins.FSService'
}
}
|
the-stack_0_16098 | import os
import re
import logging
from airbrake.notifier import Airbrake
from .secrets import config
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class AirbrakeNotifier:
MAX_NOTIFICATIONS = 50
airbrake_notifier = Airbrake(project_id=config['airbrake_project_id'], api_key=config['airbrake_api_key'])
blacklisted_log_group_names = set(config['airbrake_blacklisted_log_group_names'])
blacklisted_log_message_strings_regex = re.compile('|'.join(config["airbrake_blacklisted_log_message_strings"]))
whitelisted_log_message_terms_regex_string = "|".join(config['airbrake_whitelisted_log_message_terms'])
whitelisted_log_message_terms_regexp = re.compile(whitelisted_log_message_terms_regex_string, re.IGNORECASE)
def __init__(self):
self._report = dict()
self._total_errors = 0
self._airbrake_rate_limited = False
def report(self):
results = []
for log_group, subcounts in self._report.items():
for message_type, count in subcounts.items():
results += [(log_group, message_type, count)]
return results
def notify_on_stream(self, log_event_stream):
for log_event in log_event_stream:
self.notify(log_event)
yield log_event
def notify(self, log_event):
message = log_event['@message']
log_group = log_event['@log_group']
log_stream = log_event['@log_stream']
error_str = None
if AirbrakeNotifier._is_message_appropriate_for_airbrake(message, log_group) and \
not AirbrakeNotifier._contains_blacklisted_string(message):
error_str = "'{0} {1} '@log_stream': {2}".format(log_group, message, log_stream)
try:
if not self._airbrake_rate_limited and self._total_errors < AirbrakeNotifier.MAX_NOTIFICATIONS:
AirbrakeNotifier.airbrake_notifier.notify(error_str)
except Exception as e:
message = str(e)
if message.startswith('420 Client Error'):
self._airbrake_rate_limited = True
else:
logger.error("Airbrake notification failed! {}".format(message))
self._observe(log_group, error_str)
def _observe(self, log_group, error_str):
if log_group not in self._report:
self._report[log_group] = {
'errors': 0,
'total': 0
}
if error_str:
self._report[log_group]['errors'] += 1
self._total_errors += 1
self._report[log_group]['total'] += 1
@staticmethod
def _is_message_appropriate_for_airbrake(message, log_group):
if log_group not in AirbrakeNotifier.blacklisted_log_group_names and \
AirbrakeNotifier.whitelisted_log_message_terms_regexp.search(message):
return True
return False
@staticmethod
def _contains_blacklisted_string(message):
if AirbrakeNotifier.blacklisted_log_message_strings_regex.search(message):
return True
return False
|
the-stack_0_16099 | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for MCMC diagnostic utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import test_util as tfp_test_util
from tensorflow_probability.python.mcmc.diagnostic import _reduce_variance
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import
@test_util.run_all_in_graph_and_eager_modes
class _EffectiveSampleSizeTest(object):
@property
def use_static_shape(self):
raise NotImplementedError(
"Subclass failed to implement `use_static_shape`.")
def _check_versus_expected_effective_sample_size(
self,
x_,
expected_ess,
atol=1e-2,
rtol=1e-2,
filter_threshold=None,
filter_beyond_lag=None,
filter_beyond_positive_pairs=False):
x = tf1.placeholder_with_default(
x_, shape=x_.shape if self.use_static_shape else None)
ess = tfp.mcmc.effective_sample_size(
x,
filter_threshold=filter_threshold,
filter_beyond_lag=filter_beyond_lag,
filter_beyond_positive_pairs=filter_beyond_positive_pairs)
if self.use_static_shape:
self.assertAllEqual(x.shape[1:], ess.shape)
ess_ = self.evaluate(ess)
self.assertAllClose(
np.ones_like(ess_) * expected_ess, ess_, atol=atol, rtol=rtol)
def testIidRank1NormalHasFullEssMaxLags10(self):
# With a length 5000 iid normal sequence, and filter_beyond_lag = 10, we
# should have a good estimate of ESS, and it should be close to the full
# sequence length of 5000.
# The choice of filter_beyond_lag = 10 is a short cutoff, reasonable only
# since we know the correlation length should be zero right away.
self._check_versus_expected_effective_sample_size(
x_=np.random.randn(5000).astype(np.float32),
expected_ess=5000,
filter_beyond_lag=10,
filter_threshold=None,
rtol=0.3)
def testIidRank2NormalHasFullEssMaxLags10(self):
# See similar test for Rank1Normal for reasoning.
self._check_versus_expected_effective_sample_size(
x_=np.random.randn(5000, 2).astype(np.float32),
expected_ess=5000,
filter_beyond_lag=10,
filter_threshold=None,
rtol=0.3)
def testIidRank1NormalHasFullEssMaxLagThresholdZero(self):
# With a length 5000 iid normal sequence, and filter_threshold = 0,
# we should have a super-duper estimate of ESS, and it should be very close
# to the full sequence length of 5000.
# The choice of filter_beyond_lag = 0 means we cutoff as soon as the
# auto-corr is below zero. This should happen very quickly, due to the fact
# that the theoretical auto-corr is [1, 0, 0,...]
self._check_versus_expected_effective_sample_size(
x_=np.random.randn(5000).astype(np.float32),
expected_ess=5000,
filter_beyond_lag=None,
filter_threshold=0.,
rtol=0.1)
def testIidRank2NormalHasFullEssMaxLagThresholdZero(self):
# See similar test for Rank1Normal for reasoning.
self._check_versus_expected_effective_sample_size(
x_=np.random.randn(5000, 2).astype(np.float32),
expected_ess=5000,
filter_beyond_lag=None,
filter_threshold=0.,
rtol=0.1)
def testIidRank1NormalHasFullEssMaxLagInitialPositive(self):
# See similar test for ThresholdZero for background. This time this uses the
# initial_positive sequence criterion. In this case, initial_positive
# sequence might be a little more noisy than the threshold case because it
# will typically not drop the lag-1 auto-correlation.
self._check_versus_expected_effective_sample_size(
x_=np.random.randn(5000).astype(np.float32),
expected_ess=5000,
filter_beyond_lag=None,
filter_threshold=None,
filter_beyond_positive_pairs=True,
rtol=0.25)
def testIidRank2NormalHasFullEssMaxLagInitialPositive(self):
# See similar test for Rank1Normal for reasoning.
self._check_versus_expected_effective_sample_size(
x_=np.random.randn(5000, 2).astype(np.float32),
expected_ess=5000,
filter_beyond_lag=None,
filter_threshold=None,
filter_beyond_positive_pairs=True,
rtol=0.25)
def testIidRank1NormalHasFullEssMaxLagInitialPositiveOddLength(self):
# See similar test for Rank1Normal for reasoning.
self._check_versus_expected_effective_sample_size(
x_=np.random.randn(4999).astype(np.float32),
expected_ess=4999,
filter_beyond_lag=None,
filter_threshold=None,
filter_beyond_positive_pairs=True,
rtol=0.2)
def testLength10CorrelationHasEssOneTenthTotalLengthUsingMaxLags50(self):
# Create x_, such that
# x_[i] = iid_x_[0], i = 0,...,9
# x_[i] = iid_x_[1], i = 10,..., 19,
# and so on.
iid_x_ = np.random.randn(5000, 1).astype(np.float32)
x_ = (iid_x_ * np.ones((5000, 10)).astype(np.float32)).reshape((50000,))
self._check_versus_expected_effective_sample_size(
x_=x_,
expected_ess=50000 // 10,
filter_beyond_lag=50,
filter_threshold=None,
rtol=0.2)
def testLength10CorrelationHasEssOneTenthTotalLengthUsingMaxLagsThresholdZero(
self):
# Create x_, such that
# x_[i] = iid_x_[0], i = 0,...,9
# x_[i] = iid_x_[1], i = 10,..., 19,
# and so on.
iid_x_ = np.random.randn(5000, 1).astype(np.float32)
x_ = (iid_x_ * np.ones((5000, 10)).astype(np.float32)).reshape((50000,))
self._check_versus_expected_effective_sample_size(
x_=x_,
expected_ess=50000 // 10,
filter_beyond_lag=None,
filter_threshold=0.,
rtol=0.1)
def testLength10CorrelationHasEssOneTenthTotalLengthUsingMaxLagsInitialPos(
self):
# Create x_, such that
# x_[i] = iid_x_[0], i = 0,...,9
# x_[i] = iid_x_[1], i = 10,..., 19,
# and so on.
iid_x_ = np.random.randn(5000, 1).astype(np.float32)
x_ = (iid_x_ * np.ones((5000, 10)).astype(np.float32)).reshape((50000,))
self._check_versus_expected_effective_sample_size(
x_=x_,
expected_ess=50000 // 10,
filter_beyond_lag=None,
filter_threshold=None,
filter_beyond_positive_pairs=True,
rtol=0.15)
def testListArgs(self):
# x_ has correlation length 10 ==> ESS = N / 10
# y_ has correlation length 1 ==> ESS = N
iid_x_ = np.random.randn(5000, 1).astype(np.float32)
x_ = (iid_x_ * np.ones((5000, 10)).astype(np.float32)).reshape((50000,))
y_ = np.random.randn(50000).astype(np.float32)
states = [x_, x_, y_, y_]
filter_threshold = [0., None, 0., None]
filter_beyond_lag = [None, 5, None, 5]
# See other tests for reasoning on tolerance.
ess = tfp.mcmc.effective_sample_size(
states,
filter_threshold=filter_threshold,
filter_beyond_lag=filter_beyond_lag)
ess_ = self.evaluate(ess)
self.assertAllEqual(4, len(ess_))
self.assertAllClose(50000 // 10, ess_[0], rtol=0.3)
self.assertAllClose(50000 // 10, ess_[1], rtol=0.3)
self.assertAllClose(50000, ess_[2], rtol=0.1)
self.assertAllClose(50000, ess_[3], rtol=0.1)
def testMaxLagsThresholdLessThanNeg1SameAsNone(self):
# Setting both means we filter out items R_k from the auto-correlation
# sequence if k > filter_beyond_lag OR k >= j where R_j < filter_threshold.
# x_ has correlation length 10.
iid_x_ = np.random.randn(500, 1).astype(np.float32)
x_ = (iid_x_ * np.ones((500, 10)).astype(np.float32)).reshape((5000,))
x = tf1.placeholder_with_default(
x_, shape=x_.shape if self.use_static_shape else None)
ess_none_none = tfp.mcmc.effective_sample_size(
x, filter_threshold=None, filter_beyond_lag=None)
ess_none_200 = tfp.mcmc.effective_sample_size(
x, filter_threshold=None, filter_beyond_lag=200)
ess_neg2_200 = tfp.mcmc.effective_sample_size(
x, filter_threshold=-2., filter_beyond_lag=200)
ess_neg2_none = tfp.mcmc.effective_sample_size(
x, filter_threshold=-2., filter_beyond_lag=None)
[ess_none_none_, ess_none_200_, ess_neg2_200_,
ess_neg2_none_] = self.evaluate(
[ess_none_none, ess_none_200, ess_neg2_200, ess_neg2_none])
# filter_threshold=-2 <==> filter_threshold=None.
self.assertAllClose(ess_none_none_, ess_neg2_none_)
self.assertAllClose(ess_none_200_, ess_neg2_200_)
def testMaxLagsArgsAddInAnOrManner(self):
# Setting both means we filter out items R_k from the auto-correlation
# sequence if k > filter_beyond_lag OR k >= j where R_j < filter_threshold.
# x_ has correlation length 10.
iid_x_ = np.random.randn(500, 1).astype(np.float32)
x_ = (iid_x_ * np.ones((500, 10)).astype(np.float32)).reshape((5000,))
x = tf1.placeholder_with_default(
x_, shape=x_.shape if self.use_static_shape else None)
ess_1_9 = tfp.mcmc.effective_sample_size(
x, filter_threshold=1., filter_beyond_lag=9)
ess_1_none = tfp.mcmc.effective_sample_size(
x, filter_threshold=1., filter_beyond_lag=None)
ess_none_9 = tfp.mcmc.effective_sample_size(
x, filter_threshold=1., filter_beyond_lag=9)
ess_1_9_, ess_1_none_, ess_none_9_ = self.evaluate(
[ess_1_9, ess_1_none, ess_none_9])
# Since R_k = 1 for k < 10, and R_k < 1 for k >= 10,
# filter_threshold = 1 <==> filter_beyond_lag = 9.
self.assertAllClose(ess_1_9_, ess_1_none_)
self.assertAllClose(ess_1_9_, ess_none_9_)
def testInitialPositiveAndLag(self):
# We will use the max_lags argument to verify that initial_positive sequence
# argument does what it should.
# This sequence begins to have non-positive pairwise sums at lag 38
x_ = np.linspace(-1., 1., 100).astype(np.float32)
x = tf1.placeholder_with_default(
x_, shape=x_.shape if self.use_static_shape else None)
ess_true_37 = tfp.mcmc.effective_sample_size(
x,
filter_beyond_positive_pairs=True,
filter_threshold=None,
filter_beyond_lag=37)
ess_true_none = tfp.mcmc.effective_sample_size(
x,
filter_beyond_positive_pairs=True,
filter_threshold=None,
filter_beyond_lag=None)
ess_false_37 = tfp.mcmc.effective_sample_size(
x,
filter_beyond_positive_pairs=False,
filter_threshold=None,
filter_beyond_lag=37)
ess_true_37_, ess_true_none_, ess_false_37_ = self.evaluate(
[ess_true_37, ess_true_none, ess_false_37])
self.assertAllClose(ess_true_37_, ess_true_none_)
self.assertAllClose(ess_true_37_, ess_false_37_)
def testInitialPositiveSuperEfficient(self):
# Initial positive sequence will correctly estimate the ESS of
# super-efficient MCMC chains.
# This sequence has strong anti-autocorrelation, so will get ESS larger than
# its length.
x_ = ((np.arange(0, 100) % 2).astype(np.float32) -
0.5) * np.exp(-np.linspace(0., 10., 100))
x = tf1.placeholder_with_default(
x_, shape=x_.shape if self.use_static_shape else None)
ess = tfp.mcmc.effective_sample_size(
x, filter_beyond_positive_pairs=True)
ess_ = self.evaluate(ess)
self.assertGreater(ess_, 100.)
@test_util.run_all_in_graph_and_eager_modes
class EffectiveSampleSizeStaticTest(tfp_test_util.TestCase,
_EffectiveSampleSizeTest):
@property
def use_static_shape(self):
return True
@test_util.run_all_in_graph_and_eager_modes
class EffectiveSampleSizeDynamicTest(tfp_test_util.TestCase,
_EffectiveSampleSizeTest):
@property
def use_static_shape(self):
return False
@test_util.run_all_in_graph_and_eager_modes
class _PotentialScaleReductionTest(object):
@property
def use_static_shape(self):
raise NotImplementedError(
"Subclass failed to implement `use_static_shape`.")
def testListOfStatesWhereFirstPassesSecondFails(self):
"""Simple test showing API with two states. Read first!."""
n_samples = 1000
# state_0 is two scalar chains taken from iid Normal(0, 1). Will pass.
state_0 = np.random.randn(n_samples, 2)
# state_1 is three 4-variate chains taken from Normal(0, 1) that have been
# shifted. Since every chain is shifted, they are not the same, and the
# test should fail.
offset = np.array([1., -1., 2.]).reshape(3, 1)
state_1 = np.random.randn(n_samples, 3, 4) + offset
rhat = tfp.mcmc.potential_scale_reduction(
chains_states=[state_0, state_1], independent_chain_ndims=1)
self.assertIsInstance(rhat, list)
rhat_0_, rhat_1_ = self.evaluate(rhat)
# r_hat_0 should be close to 1, meaning test is passed.
self.assertAllEqual((), rhat_0_.shape)
self.assertAllClose(1., rhat_0_, rtol=0.02)
# r_hat_1 should be greater than 1.2, meaning test has failed.
self.assertAllEqual((4,), rhat_1_.shape)
self.assertAllEqual(np.ones_like(rhat_1_).astype(bool), rhat_1_ > 1.2)
def check_results(self,
state_,
independent_chain_shape,
should_pass,
split_chains=False):
sample_ndims = 1
independent_chain_ndims = len(independent_chain_shape)
state = tf1.placeholder_with_default(
state_, shape=state_.shape if self.use_static_shape else None)
rhat = tfp.mcmc.potential_scale_reduction(
state,
independent_chain_ndims=independent_chain_ndims,
split_chains=split_chains)
if self.use_static_shape:
self.assertAllEqual(
state_.shape[sample_ndims + independent_chain_ndims:], rhat.shape)
rhat_ = self.evaluate(rhat)
if should_pass:
self.assertAllClose(np.ones_like(rhat_), rhat_, atol=0, rtol=0.02)
else:
self.assertAllEqual(np.ones_like(rhat_).astype(bool), rhat_ > 1.2)
def iid_normal_chains_should_pass_wrapper(self,
sample_shape,
independent_chain_shape,
other_shape,
split_chains=False,
dtype=np.float32):
"""Check results with iid normal chains."""
state_shape = sample_shape + independent_chain_shape + other_shape
state_ = np.random.randn(*state_shape).astype(dtype)
# The "other" dimensions do not have to be identical, just independent, so
# force them to not be identical.
if other_shape:
state_ *= np.random.rand(*other_shape).astype(dtype)
self.check_results(
state_,
independent_chain_shape,
should_pass=True,
split_chains=split_chains)
def testPassingIIDNdimsAreIndependentOneOtherZero(self):
self.iid_normal_chains_should_pass_wrapper(
sample_shape=[10000], independent_chain_shape=[4], other_shape=[])
def testPassingIIDNdimsAreIndependentOneOtherOne(self):
self.iid_normal_chains_should_pass_wrapper(
sample_shape=[10000], independent_chain_shape=[3], other_shape=[7])
def testPassingIIDNdimsAreIndependentOneOtherOneSplitChainsEvenNSamples(self):
self.iid_normal_chains_should_pass_wrapper(
sample_shape=[10000],
independent_chain_shape=[3],
other_shape=[7],
split_chains=True)
def testPassingIIDNdimsAreIndependentOneOtherOneSplitChainsOddNSamples(self):
# For odd number of samples we must remove last sample.
self.iid_normal_chains_should_pass_wrapper(
sample_shape=[10001],
independent_chain_shape=[3],
other_shape=[7],
split_chains=True)
def testPassingIIDNdimsAreIndependentOneOtherTwo(self):
self.iid_normal_chains_should_pass_wrapper(
sample_shape=[10000], independent_chain_shape=[2], other_shape=[5, 7])
def testPassingIIDNdimsAreIndependentTwoOtherTwo64Bit(self):
self.iid_normal_chains_should_pass_wrapper(
sample_shape=[10000],
independent_chain_shape=[2, 3],
other_shape=[5, 7],
dtype=np.float64)
def offset_normal_chains_should_fail_wrapper(
self, sample_shape, independent_chain_shape, other_shape):
"""Check results with normal chains that are offset from each other."""
state_shape = sample_shape + independent_chain_shape + other_shape
state_ = np.random.randn(*state_shape)
# Add a significant offset to the different (formerly iid) chains.
offset = np.linspace(
0, 2, num=np.prod(independent_chain_shape)).reshape([1] * len(
sample_shape) + independent_chain_shape + [1] * len(other_shape))
state_ += offset
self.check_results(state_, independent_chain_shape, should_pass=False)
def testFailingOffsetNdimsAreSampleOneIndependentOneOtherOne(self):
self.offset_normal_chains_should_fail_wrapper(
sample_shape=[10000], independent_chain_shape=[2], other_shape=[5])
def testLinearTrendPassesIfNoSplitChains(self):
# A problem with non-split Rhat is that it does not catch linear trends.
n_samples = 1000
n_chains = 10
state_ = (
np.random.randn(n_samples, n_chains) +
np.linspace(0, 1, n_samples).reshape(n_samples, 1))
self.check_results(
state_,
independent_chain_shape=[n_chains],
should_pass=True,
split_chains=False)
def testLinearTrendFailsIfSplitChains(self):
n_samples = 10000
n_chains = 10
state_ = (
np.random.randn(n_samples, n_chains) +
np.linspace(0, 10, n_samples).reshape(n_samples, 1))
self.check_results(
state_,
independent_chain_shape=[n_chains],
should_pass=False,
split_chains=True)
def testNotEnoughSamplesNoSplitChainsFailsIfValidateArgs(self):
input_ = np.random.rand(1, 10)
x = tf1.placeholder_with_default(
input_, shape=input_.shape if self.use_static_shape else None)
with self.assertRaisesError("Must provide at least 2 samples"):
self.evaluate(
tfp.mcmc.potential_scale_reduction(
# Require at least 2 samples...have only 1
x,
independent_chain_ndims=1,
validate_args=True))
def testNotEnoughSamplesWithSplitChainsFailsIfValidateArgs(self):
input_ = np.random.rand(3, 10)
x = tf1.placeholder_with_default(
input_, shape=input_.shape if self.use_static_shape else None)
with self.assertRaisesError("Must provide at least 4 samples"):
self.evaluate(
tfp.mcmc.potential_scale_reduction(
# Require at least 4 samples...have only 3
x,
independent_chain_ndims=1,
split_chains=True,
validate_args=True))
@test_util.run_all_in_graph_and_eager_modes
class PotentialScaleReductionStaticTest(tfp_test_util.TestCase,
_PotentialScaleReductionTest):
@property
def use_static_shape(self):
return True
def assertRaisesError(self, msg):
return self.assertRaisesRegexp(Exception, msg)
def testIndependentNdimsLessThanOneRaises(self):
with self.assertRaisesRegexp(ValueError, "independent_chain_ndims"):
tfp.mcmc.potential_scale_reduction(
np.random.rand(2, 3, 4), independent_chain_ndims=0)
@test_util.run_all_in_graph_and_eager_modes
class PotentialScaleReductionDynamicTest(tfp_test_util.TestCase,
_PotentialScaleReductionTest):
@property
def use_static_shape(self):
return False
def assertRaisesError(self, msg):
if tf.executing_eagerly():
return self.assertRaisesRegexp(Exception, msg)
return self.assertRaisesOpError(msg)
@test_util.run_all_in_graph_and_eager_modes
class _ReduceVarianceTest(object):
@property
def use_static_shape(self):
raise NotImplementedError(
"Subclass failed to implement `use_static_shape`.")
def check_versus_numpy(self, x_, axis, biased, keepdims):
x_ = np.asarray(x_)
x = tf1.placeholder_with_default(
x_, shape=x_.shape if self.use_static_shape else None)
var = _reduce_variance(
x, axis=axis, biased=biased, keepdims=keepdims)
np_var = np.var(x_, axis=axis, ddof=0 if biased else 1, keepdims=keepdims)
if self.use_static_shape:
self.assertAllEqual(np_var.shape, var.shape)
var_ = self.evaluate(var)
# We will mask below, which changes shape, so check shape explicitly here.
self.assertAllEqual(np_var.shape, var_.shape)
# We get NaN when we divide by zero due to the size being the same as ddof
nan_mask = np.isnan(np_var)
if nan_mask.any():
self.assertTrue(np.isnan(var_[nan_mask]).all())
self.assertAllClose(np_var[~nan_mask], var_[~nan_mask], atol=0, rtol=0.02)
def testScalarBiasedTrue(self):
self.check_versus_numpy(x_=-1.234, axis=None, biased=True, keepdims=False)
def testScalarBiasedFalse(self):
# This should result in NaN.
self.check_versus_numpy(x_=-1.234, axis=None, biased=False, keepdims=False)
def testShape2x3x4AxisNoneBiasedFalseKeepdimsFalse(self):
self.check_versus_numpy(
x_=np.random.randn(2, 3, 4), axis=None, biased=True, keepdims=False)
def testShape2x3x4Axis1BiasedFalseKeepdimsTrue(self):
self.check_versus_numpy(
x_=np.random.randn(2, 3, 4), axis=1, biased=True, keepdims=True)
def testShape2x3x4x5Axis13BiasedFalseKeepdimsTrue(self):
self.check_versus_numpy(
x_=np.random.randn(2, 3, 4, 5), axis=1, biased=True, keepdims=True)
def testShape2x3x4x5Axis13BiasedFalseKeepdimsFalse(self):
self.check_versus_numpy(
x_=np.random.randn(2, 3, 4, 5), axis=1, biased=False, keepdims=False)
@test_util.run_all_in_graph_and_eager_modes
class ReduceVarianceTestStaticShape(tfp_test_util.TestCase,
_ReduceVarianceTest):
@property
def use_static_shape(self):
return True
@test_util.run_all_in_graph_and_eager_modes
class ReduceVarianceTestDynamicShape(tfp_test_util.TestCase,
_ReduceVarianceTest):
@property
def use_static_shape(self):
return False
if __name__ == "__main__":
tf.test.main()
|
the-stack_0_16100 | from cryptography.fernet import Fernet
def read_key():
file = open('key.ley', 'rb')
key = file.read()
file.close()
return key
def encrpyt(data):
key = read_key()
encoded_data = data.encode()
f = Fernet(key)
encrypted = f.encrypt(encoded_data)
encrypted_decoded_data = encrypted.decode()
return encrypted_decoded_data
def decrpyt(data):
key = read_key()
encoded_data = data.encode()
f = Fernet(key)
decrpyted = decrpyted = f.decrypt(encoded_data)
decrpyted_decoded_data = decrpyted.decode()
return decrpyted_decoded_data |
the-stack_0_16101 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# Description: an implementation of a deep learning recommendation model (DLRM)
# The model input consists of dense and sparse features. The former is a vector
# of floating point values. The latter is a list of sparse indices into
# embedding tables, which consist of vectors of floating point values.
# The selected vectors are passed to mlp networks denoted by triangles,
# in some cases the vectors are interacted through operators (Ops).
#
# output:
# vector of values
# model: |
# /\
# /__\
# |
# _____________________> Op <___________________
# / | \
# /\ /\ /\
# /__\ /__\ ... /__\
# | | |
# | Op Op
# | ____/__\_____ ____/__\____
# | |_Emb_|____|__| ... |_Emb_|__|___|
# input:
# [ dense features ] [sparse indices] , ..., [sparse indices]
#
# More precise definition of model layers:
# 1) fully connected layers of an mlp
# z = f(y)
# y = Wx + b
#
# 2) embedding lookup (for a list of sparse indices p=[p1,...,pk])
# z = Op(e1,...,ek)
# obtain vectors e1=E[:,p1], ..., ek=E[:,pk]
#
# 3) Operator Op can be one of the following
# Sum(e1,...,ek) = e1 + ... + ek
# Dot(e1,...,ek) = [e1'e1, ..., e1'ek, ..., ek'e1, ..., ek'ek]
# Cat(e1,...,ek) = [e1', ..., ek']'
# where ' denotes transpose operation
#
# References:
# [1] Maxim Naumov, Dheevatsa Mudigere, Hao-Jun Michael Shi, Jianyu Huang,
# Narayanan Sundaram, Jongsoo Park, Xiaodong Wang, Udit Gupta, Carole-Jean Wu,
# Alisson G. Azzolini, Dmytro Dzhulgakov, Andrey Mallevich, Ilia Cherniavskii,
# Yinghai Lu, Raghuraman Krishnamoorthi, Ansha Yu, Volodymyr Kondratenko,
# Stephanie Pereira, Xianjie Chen, Wenlin Chen, Vijay Rao, Bill Jia, Liang Xiong,
# Misha Smelyanskiy, "Deep Learning Recommendation Model for Personalization and
# Recommendation Systems", CoRR, arXiv:1906.00091, 2019
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
# miscellaneous
import builtins
import datetime
import json
import sys
import time
# from ppuda.ghn.nn import GHN2
# onnx
# The onnx import causes deprecation warnings every time workers
# are spawned during testing. So, we filter out those warnings.
import warnings
# from ppuda.utils.utils import adjust_net
# data generation
import dlrm_data_pytorch as dp
# For distributed run
import extend_distributed as ext_dist
import mlperf_logger
# numpy
import numpy as np
import sklearn.metrics
# pytorch
import torch
import torch.nn as nn
from torch._ops import ops
from torch.autograd.profiler import record_function
from torch.nn.parallel.parallel_apply import parallel_apply
from torch.nn.parallel.replicate import replicate
from torch.nn.parallel.scatter_gather import gather, scatter
from torch.nn.parameter import Parameter
from torch.optim.lr_scheduler import _LRScheduler
import optim.rwsadagrad as RowWiseSparseAdagrad
from torch.utils.tensorboard import SummaryWriter
# mixed-dimension trick
from tricks.md_embedding_bag import PrEmbeddingBag, md_solver
# quotient-remainder trick
from tricks.qr_embedding_bag import QREmbeddingBag
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
try:
import onnx
except ImportError as error:
print("Unable to import onnx. ", error)
# from torchviz import make_dot
# import torch.nn.functional as Functional
# from torch.nn.parameter import Parameter
exc = getattr(builtins, "IOError", "FileNotFoundError")
def time_wrap(use_gpu):
if use_gpu:
torch.cuda.synchronize()
return time.time()
def dlrm_wrap(X, lS_o, lS_i, use_gpu, device, ndevices=1):
with record_function("DLRM forward"):
if use_gpu: # .cuda()
# lS_i can be either a list of tensors or a stacked tensor.
# Handle each case below:
if ndevices == 1:
lS_i = (
[S_i.to(device) for S_i in lS_i]
if isinstance(lS_i, list)
else lS_i.to(device)
)
lS_o = (
[S_o.to(device) for S_o in lS_o]
if isinstance(lS_o, list)
else lS_o.to(device)
)
return dlrm(X.to(device), lS_o, lS_i)
def loss_fn_wrap(Z, T, use_gpu, device):
with record_function("DLRM loss compute"):
if args.loss_function == "mse" or args.loss_function == "bce":
return dlrm.loss_fn(Z, T.to(device))
elif args.loss_function == "wbce":
loss_ws_ = dlrm.loss_ws[T.data.view(-1).long()].view_as(T).to(device)
loss_fn_ = dlrm.loss_fn(Z, T.to(device))
loss_sc_ = loss_ws_ * loss_fn_
return loss_sc_.mean()
# The following function is a wrapper to avoid checking this multiple times in th
# loop below.
def unpack_batch(b):
# Experiment with unweighted samples
return b[0], b[1], b[2], b[3], torch.ones(b[3].size()), None
class LRPolicyScheduler(_LRScheduler):
def __init__(self, optimizer, num_warmup_steps, decay_start_step, num_decay_steps):
self.num_warmup_steps = num_warmup_steps
self.decay_start_step = decay_start_step
self.decay_end_step = decay_start_step + num_decay_steps
self.num_decay_steps = num_decay_steps
if self.decay_start_step < self.num_warmup_steps:
sys.exit("Learning rate warmup must finish before the decay starts")
super(LRPolicyScheduler, self).__init__(optimizer)
def get_lr(self):
step_count = self._step_count
if step_count < self.num_warmup_steps:
# warmup
scale = 1.0 - (self.num_warmup_steps - step_count) / self.num_warmup_steps
lr = [base_lr * scale for base_lr in self.base_lrs]
self.last_lr = lr
elif self.decay_start_step <= step_count and step_count < self.decay_end_step:
# decay
decayed_steps = step_count - self.decay_start_step
scale = ((self.num_decay_steps - decayed_steps) / self.num_decay_steps) ** 2
min_lr = 0.0000001
lr = [max(min_lr, base_lr * scale) for base_lr in self.base_lrs]
self.last_lr = lr
else:
if self.num_decay_steps > 0:
# freeze at last, either because we're after decay
# or because we're between warmup and decay
lr = self.last_lr
else:
# do not adjust
lr = self.base_lrs
return lr
### define dlrm in PyTorch ###
class DLRM_Net(nn.Module):
def create_mlp(self, ln, sigmoid_layer):
# build MLP layer by layer
layers = nn.ModuleList()
for i in range(0, ln.size - 1):
n = ln[i]
m = ln[i + 1]
# construct fully connected operator
LL = nn.Linear(int(n), int(m), bias=True)
# initialize the weights
# with torch.no_grad():
# custom Xavier input, output or two-sided fill
mean = 0.0 # std_dev = np.sqrt(variance)
std_dev = np.sqrt(2 / (m + n)) # np.sqrt(1 / m) # np.sqrt(1 / n)
W = np.random.normal(mean, std_dev, size=(m, n)).astype(np.float32)
std_dev = np.sqrt(1 / m) # np.sqrt(2 / (m + 1))
bt = np.random.normal(mean, std_dev, size=m).astype(np.float32)
# approach 1
LL.weight.data = torch.tensor(W, requires_grad=True)
LL.bias.data = torch.tensor(bt, requires_grad=True)
# approach 2
# LL.weight.data.copy_(torch.tensor(W))
# LL.bias.data.copy_(torch.tensor(bt))
# approach 3
# LL.weight = Parameter(torch.tensor(W),requires_grad=True)
# LL.bias = Parameter(torch.tensor(bt),requires_grad=True)
layers.append(LL)
# construct sigmoid or relu operator
if i == sigmoid_layer:
layers.append(nn.Sigmoid())
else:
layers.append(nn.ReLU())
# approach 1: use ModuleList
# return layers
# approach 2: use Sequential container to wrap all layers
return torch.nn.Sequential(*layers)
def create_emb(self, m, ln, weighted_pooling=None):
emb_l = nn.ModuleList()
v_W_l = []
for i in range(0, ln.size):
if ext_dist.my_size > 1:
if i not in self.local_emb_indices:
continue
n = ln[i]
# construct embedding operator
if self.qr_flag and n > self.qr_threshold:
EE = QREmbeddingBag(
n,
m,
self.qr_collisions,
operation=self.qr_operation,
mode="sum",
sparse=True,
)
elif self.md_flag and n > self.md_threshold:
base = max(m)
_m = m[i] if n > self.md_threshold else base
EE = PrEmbeddingBag(n, _m, base)
# use np initialization as below for consistency...
W = np.random.uniform(
low=-np.sqrt(1 / n), high=np.sqrt(1 / n), size=(n, _m)
).astype(np.float32)
EE.embs.weight.data = torch.tensor(W, requires_grad=True)
else:
EE = nn.EmbeddingBag(n, m, mode="sum", sparse=False)
# initialize embeddings
# nn.init.uniform_(EE.weight, a=-np.sqrt(1 / n), b=np.sqrt(1 / n))
W = np.random.uniform(
low=-np.sqrt(1 / n), high=np.sqrt(1 / n), size=(n, m)
).astype(np.float32)
# approach 1
EE.weight.data = torch.tensor(W, requires_grad=True)
# approach 2
# EE.weight.data.copy_(torch.tensor(W))
# approach 3
# EE.weight = Parameter(torch.tensor(W),requires_grad=True)
if weighted_pooling is None:
v_W_l.append(None)
else:
v_W_l.append(torch.ones(n, dtype=torch.float32))
emb_l.append(EE)
return emb_l, v_W_l
def __init__(
self,
m_spa=None,
ln_emb=None,
ln_bot=None,
ln_top=None,
arch_interaction_op=None,
arch_interaction_itself=False,
sigmoid_bot=-1,
sigmoid_top=-1,
sync_dense_params=True,
loss_threshold=0.0,
ndevices=-1,
qr_flag=False,
qr_operation="mult",
qr_collisions=0,
qr_threshold=200,
md_flag=False,
md_threshold=200,
weighted_pooling=None,
loss_function="bce"
):
super(DLRM_Net, self).__init__()
if (
(m_spa is not None)
and (ln_emb is not None)
and (ln_bot is not None)
and (ln_top is not None)
and (arch_interaction_op is not None)
):
# save arguments
self.ndevices = ndevices
self.output_d = 0
self.parallel_model_batch_size = -1
self.parallel_model_is_not_prepared = True
self.arch_interaction_op = arch_interaction_op
self.arch_interaction_itself = arch_interaction_itself
self.sync_dense_params = sync_dense_params
self.loss_threshold = loss_threshold
self.loss_function=loss_function
if weighted_pooling is not None and weighted_pooling != "fixed":
self.weighted_pooling = "learned"
else:
self.weighted_pooling = weighted_pooling
# create variables for QR embedding if applicable
self.qr_flag = qr_flag
if self.qr_flag:
self.qr_collisions = qr_collisions
self.qr_operation = qr_operation
self.qr_threshold = qr_threshold
# create variables for MD embedding if applicable
self.md_flag = md_flag
if self.md_flag:
self.md_threshold = md_threshold
# If running distributed, get local slice of embedding tables
if ext_dist.my_size > 1:
n_emb = len(ln_emb)
if n_emb < ext_dist.my_size:
sys.exit(
"only (%d) sparse features for (%d) devices, table partitions will fail"
% (n_emb, ext_dist.my_size)
)
self.n_global_emb = n_emb
self.n_local_emb, self.n_emb_per_rank = ext_dist.get_split_lengths(
n_emb
)
self.local_emb_slice = ext_dist.get_my_slice(n_emb)
self.local_emb_indices = list(range(n_emb))[self.local_emb_slice]
# create operators
if ndevices <= 1:
self.emb_l, w_list = self.create_emb(m_spa, ln_emb, weighted_pooling)
if self.weighted_pooling == "learned":
self.v_W_l = nn.ParameterList()
for w in w_list:
self.v_W_l.append(Parameter(w))
else:
self.v_W_l = w_list
self.bot_l = self.create_mlp(ln_bot, sigmoid_bot)
self.top_l = self.create_mlp(ln_top, sigmoid_top)
# quantization
self.quantize_emb = False
self.emb_l_q = []
self.quantize_bits = 32
# specify the loss function
if self.loss_function == "mse":
self.loss_fn = torch.nn.MSELoss(reduction="mean")
elif self.loss_function == "bce":
self.loss_fn = torch.nn.BCELoss(reduction="mean")
elif self.loss_function == "wbce":
self.loss_ws = torch.tensor(
np.fromstring(args.loss_weights, dtype=float, sep="-")
)
self.loss_fn = torch.nn.BCELoss(reduction="none")
else:
sys.exit(
"ERROR: --loss-function=" + self.loss_function + " is not supported"
)
def apply_mlp(self, x, layers):
# approach 1: use ModuleList
# for layer in layers:
# x = layer(x)
# return x
# approach 2: use Sequential container to wrap all layers
return layers(x)
def apply_emb(self, lS_o, lS_i, emb_l, v_W_l):
# WARNING: notice that we are processing the batch at once. We implicitly
# assume that the data is laid out such that:
# 1. each embedding is indexed with a group of sparse indices,
# corresponding to a single lookup
# 2. for each embedding the lookups are further organized into a batch
# 3. for a list of embedding tables there is a list of batched lookups
ly = []
for k, sparse_index_group_batch in enumerate(lS_i):
sparse_offset_group_batch = lS_o[k]
# embedding lookup
# We are using EmbeddingBag, which implicitly uses sum operator.
# The embeddings are represented as tall matrices, with sum
# happening vertically across 0 axis, resulting in a row vector
# E = emb_l[k]
if v_W_l[k] is not None:
per_sample_weights = v_W_l[k].gather(0, sparse_index_group_batch)
else:
per_sample_weights = None
if self.quantize_emb:
s1 = self.emb_l_q[k].element_size() * self.emb_l_q[k].nelement()
s2 = self.emb_l_q[k].element_size() * self.emb_l_q[k].nelement()
print("quantized emb sizes:", s1, s2)
if self.quantize_bits == 4:
QV = ops.quantized.embedding_bag_4bit_rowwise_offsets(
self.emb_l_q[k],
sparse_index_group_batch,
sparse_offset_group_batch,
per_sample_weights=per_sample_weights,
)
elif self.quantize_bits == 8:
QV = ops.quantized.embedding_bag_byte_rowwise_offsets(
self.emb_l_q[k],
sparse_index_group_batch,
sparse_offset_group_batch,
per_sample_weights=per_sample_weights,
)
ly.append(QV)
else:
E = emb_l[k]
V = E(
sparse_index_group_batch,
sparse_offset_group_batch,
per_sample_weights=per_sample_weights,
)
ly.append(V)
# print(ly)
return ly
# using quantizing functions from caffe2/aten/src/ATen/native/quantized/cpu
def quantize_embedding(self, bits):
n = len(self.emb_l)
self.emb_l_q = [None] * n
for k in range(n):
if bits == 4:
self.emb_l_q[k] = ops.quantized.embedding_bag_4bit_prepack(
self.emb_l[k].weight
)
elif bits == 8:
self.emb_l_q[k] = ops.quantized.embedding_bag_byte_prepack(
self.emb_l[k].weight
)
else:
return
self.emb_l = None
self.quantize_emb = True
self.quantize_bits = bits
def interact_features(self, x, ly):
if self.arch_interaction_op == "dot":
# concatenate dense and sparse features
(batch_size, d) = x.shape
T = torch.cat([x] + ly, dim=1).view((batch_size, -1, d))
# perform a dot product
Z = torch.bmm(T, torch.transpose(T, 1, 2))
# append dense feature with the interactions (into a row vector)
# approach 1: all
# Zflat = Z.view((batch_size, -1))
# approach 2: unique
_, ni, nj = Z.shape
# approach 1: tril_indices
# offset = 0 if self.arch_interaction_itself else -1
# li, lj = torch.tril_indices(ni, nj, offset=offset)
# approach 2: custom
offset = 1 if self.arch_interaction_itself else 0
li = torch.tensor([i for i in range(ni) for j in range(i + offset)])
lj = torch.tensor([j for i in range(nj) for j in range(i + offset)])
Zflat = Z[:, li, lj]
# concatenate dense features and interactions
R = torch.cat([x] + [Zflat], dim=1)
elif self.arch_interaction_op == "cat":
# concatenation features (into a row vector)
R = torch.cat([x] + ly, dim=1)
else:
sys.exit(
"ERROR: --arch-interaction-op="
+ self.arch_interaction_op
+ " is not supported"
)
return R
def forward(self, dense_x, lS_o, lS_i):
if ext_dist.my_size > 1:
# multi-node multi-device run
return self.distributed_forward(dense_x, lS_o, lS_i)
elif self.ndevices <= 1:
# single device run
return self.sequential_forward(dense_x, lS_o, lS_i)
else:
# single-node multi-device run
return self.parallel_forward(dense_x, lS_o, lS_i)
def distributed_forward(self, dense_x, lS_o, lS_i):
batch_size = dense_x.size()[0]
# WARNING: # of ranks must be <= batch size in distributed_forward call
if batch_size < ext_dist.my_size:
sys.exit(
"ERROR: batch_size (%d) must be larger than number of ranks (%d)"
% (batch_size, ext_dist.my_size)
)
if batch_size % ext_dist.my_size != 0:
sys.exit(
"ERROR: batch_size %d can not split across %d ranks evenly"
% (batch_size, ext_dist.my_size)
)
dense_x = dense_x[ext_dist.get_my_slice(batch_size)]
lS_o = lS_o[self.local_emb_slice]
lS_i = lS_i[self.local_emb_slice]
if (len(self.emb_l) != len(lS_o)) or (len(self.emb_l) != len(lS_i)):
sys.exit(
"ERROR: corrupted model input detected in distributed_forward call"
)
# embeddings
with record_function("DLRM embedding forward"):
ly = self.apply_emb(lS_o, lS_i, self.emb_l, self.v_W_l)
# WARNING: Note that at this point we have the result of the embedding lookup
# for the entire batch on each rank. We would like to obtain partial results
# corresponding to all embedding lookups, but part of the batch on each rank.
# Therefore, matching the distribution of output of bottom mlp, so that both
# could be used for subsequent interactions on each device.
if len(self.emb_l) != len(ly):
sys.exit("ERROR: corrupted intermediate result in distributed_forward call")
a2a_req = ext_dist.alltoall(ly, self.n_emb_per_rank)
with record_function("DLRM bottom nlp forward"):
x = self.apply_mlp(dense_x, self.bot_l)
ly = a2a_req.wait()
ly = list(ly)
# interactions
with record_function("DLRM interaction forward"):
z = self.interact_features(x, ly)
# top mlp
with record_function("DLRM top nlp forward"):
p = self.apply_mlp(z, self.top_l)
# clamp output if needed
if 0.0 < self.loss_threshold and self.loss_threshold < 1.0:
z = torch.clamp(p, min=self.loss_threshold, max=(1.0 - self.loss_threshold))
else:
z = p
return z
def sequential_forward(self, dense_x, lS_o, lS_i):
# process dense features (using bottom mlp), resulting in a row vector
x = self.apply_mlp(dense_x, self.bot_l)
# debug prints
# print("intermediate")
# print(x.detach().cpu().numpy())
# process sparse features(using embeddings), resulting in a list of row vectors
ly = self.apply_emb(lS_o, lS_i, self.emb_l, self.v_W_l)
# for y in ly:
# print(y.detach().cpu().numpy())
# interact features (dense and sparse)
z = self.interact_features(x, ly)
# print(z.detach().cpu().numpy())
# obtain probability of a click (using top mlp)
p = self.apply_mlp(z, self.top_l)
# clamp output if needed
if 0.0 < self.loss_threshold and self.loss_threshold < 1.0:
z = torch.clamp(p, min=self.loss_threshold, max=(1.0 - self.loss_threshold))
else:
z = p
return z
def parallel_forward(self, dense_x, lS_o, lS_i):
### prepare model (overwrite) ###
# WARNING: # of devices must be >= batch size in parallel_forward call
batch_size = dense_x.size()[0]
ndevices = min(self.ndevices, batch_size, len(self.emb_l))
device_ids = range(ndevices)
# WARNING: must redistribute the model if mini-batch size changes(this is common
# for last mini-batch, when # of elements in the dataset/batch size is not even
if self.parallel_model_batch_size != batch_size:
self.parallel_model_is_not_prepared = True
if self.parallel_model_is_not_prepared or self.sync_dense_params:
# replicate mlp (data parallelism)
self.bot_l_replicas = replicate(self.bot_l, device_ids)
self.top_l_replicas = replicate(self.top_l, device_ids)
self.parallel_model_batch_size = batch_size
if self.parallel_model_is_not_prepared:
# distribute embeddings (model parallelism)
t_list = []
w_list = []
for k, emb in enumerate(self.emb_l):
d = torch.device("cuda:" + str(k % ndevices))
t_list.append(emb.to(d))
if self.weighted_pooling == "learned":
w_list.append(Parameter(self.v_W_l[k].to(d)))
elif self.weighted_pooling == "fixed":
w_list.append(self.v_W_l[k].to(d))
else:
w_list.append(None)
self.emb_l = nn.ModuleList(t_list)
if self.weighted_pooling == "learned":
self.v_W_l = nn.ParameterList(w_list)
else:
self.v_W_l = w_list
self.parallel_model_is_not_prepared = False
### prepare input (overwrite) ###
# scatter dense features (data parallelism)
# print(dense_x.device)
dense_x = scatter(dense_x, device_ids, dim=0)
# distribute sparse features (model parallelism)
if (len(self.emb_l) != len(lS_o)) or (len(self.emb_l) != len(lS_i)):
sys.exit("ERROR: corrupted model input detected in parallel_forward call")
t_list = []
i_list = []
for k, _ in enumerate(self.emb_l):
d = torch.device("cuda:" + str(k % ndevices))
t_list.append(lS_o[k].to(d))
i_list.append(lS_i[k].to(d))
lS_o = t_list
lS_i = i_list
### compute results in parallel ###
# bottom mlp
# WARNING: Note that the self.bot_l is a list of bottom mlp modules
# that have been replicated across devices, while dense_x is a tuple of dense
# inputs that has been scattered across devices on the first (batch) dimension.
# The output is a list of tensors scattered across devices according to the
# distribution of dense_x.
x = parallel_apply(self.bot_l_replicas, dense_x, None, device_ids)
# debug prints
# print(x)
# embeddings
ly = self.apply_emb(lS_o, lS_i, self.emb_l, self.v_W_l)
# debug prints
# print(ly)
# butterfly shuffle (implemented inefficiently for now)
# WARNING: Note that at this point we have the result of the embedding lookup
# for the entire batch on each device. We would like to obtain partial results
# corresponding to all embedding lookups, but part of the batch on each device.
# Therefore, matching the distribution of output of bottom mlp, so that both
# could be used for subsequent interactions on each device.
if len(self.emb_l) != len(ly):
sys.exit("ERROR: corrupted intermediate result in parallel_forward call")
t_list = []
for k, _ in enumerate(self.emb_l):
d = torch.device("cuda:" + str(k % ndevices))
y = scatter(ly[k], device_ids, dim=0)
t_list.append(y)
# adjust the list to be ordered per device
ly = list(map(lambda y: list(y), zip(*t_list)))
# debug prints
# print(ly)
# interactions
z = []
for k in range(ndevices):
zk = self.interact_features(x[k], ly[k])
z.append(zk)
# debug prints
# print(z)
# top mlp
# WARNING: Note that the self.top_l is a list of top mlp modules that
# have been replicated across devices, while z is a list of interaction results
# that by construction are scattered across devices on the first (batch) dim.
# The output is a list of tensors scattered across devices according to the
# distribution of z.
p = parallel_apply(self.top_l_replicas, z, None, device_ids)
### gather the distributed results ###
p0 = gather(p, self.output_d, dim=0)
# clamp output if needed
if 0.0 < self.loss_threshold and self.loss_threshold < 1.0:
z0 = torch.clamp(
p0, min=self.loss_threshold, max=(1.0 - self.loss_threshold)
)
else:
z0 = p0
return z0
def dash_separated_ints(value):
vals = value.split("-")
for val in vals:
try:
int(val)
except ValueError:
raise argparse.ArgumentTypeError(
"%s is not a valid dash separated list of ints" % value
)
return value
def dash_separated_floats(value):
vals = value.split("-")
for val in vals:
try:
float(val)
except ValueError:
raise argparse.ArgumentTypeError(
"%s is not a valid dash separated list of floats" % value
)
return value
def inference(
args,
dlrm,
best_acc_test,
best_auc_test,
test_ld,
device,
use_gpu,
log_iter=-1,
):
test_accu = 0
test_samp = 0
if args.mlperf_logging:
scores = []
targets = []
for i, testBatch in enumerate(test_ld):
# early exit if nbatches was set by the user and was exceeded
if nbatches > 0 and i >= nbatches:
break
X_test, lS_o_test, lS_i_test, T_test, W_test, CBPP_test = unpack_batch(
testBatch
)
# Skip the batch if batch size not multiple of total ranks
if ext_dist.my_size > 1 and X_test.size(0) % ext_dist.my_size != 0:
print("Warning: Skiping the batch %d with size %d" % (i, X_test.size(0)))
continue
# forward pass
Z_test = dlrm_wrap(
X_test,
lS_o_test,
lS_i_test,
use_gpu,
device,
ndevices=ndevices,
)
### gather the distributed results on each rank ###
# For some reason it requires explicit sync before all_gather call if
# tensor is on GPU memory
if Z_test.is_cuda:
torch.cuda.synchronize()
(_, batch_split_lengths) = ext_dist.get_split_lengths(X_test.size(0))
if ext_dist.my_size > 1:
Z_test = ext_dist.all_gather(Z_test, batch_split_lengths)
if args.mlperf_logging:
S_test = Z_test.detach().cpu().numpy() # numpy array
T_test = T_test.detach().cpu().numpy() # numpy array
scores.append(S_test)
targets.append(T_test)
else:
with record_function("DLRM accuracy compute"):
# compute loss and accuracy
S_test = Z_test.detach().cpu().numpy() # numpy array
T_test = T_test.detach().cpu().numpy() # numpy array
mbs_test = T_test.shape[0] # = mini_batch_size except last
A_test = np.sum((np.round(S_test, 0) == T_test).astype(np.uint8))
test_accu += A_test
test_samp += mbs_test
if args.mlperf_logging:
with record_function("DLRM mlperf sklearn metrics compute"):
scores = np.concatenate(scores, axis=0)
targets = np.concatenate(targets, axis=0)
metrics = {
"recall": lambda y_true, y_score: sklearn.metrics.recall_score(
y_true=y_true, y_pred=np.round(y_score)
),
"precision": lambda y_true, y_score: sklearn.metrics.precision_score(
y_true=y_true, y_pred=np.round(y_score)
),
"f1": lambda y_true, y_score: sklearn.metrics.f1_score(
y_true=y_true, y_pred=np.round(y_score)
),
"ap": sklearn.metrics.average_precision_score,
"roc_auc": sklearn.metrics.roc_auc_score,
"accuracy": lambda y_true, y_score: sklearn.metrics.accuracy_score(
y_true=y_true, y_pred=np.round(y_score)
),
}
validation_results = {}
for metric_name, metric_function in metrics.items():
validation_results[metric_name] = metric_function(targets, scores)
writer.add_scalar(
"mlperf-metrics-test/" + metric_name,
validation_results[metric_name],
log_iter,
)
acc_test = validation_results["accuracy"]
else:
acc_test = test_accu / test_samp
writer.add_scalar("Test/Acc", acc_test, log_iter)
model_metrics_dict = {
"nepochs": args.nepochs,
"nbatches": nbatches,
"nbatches_test": nbatches_test,
"state_dict": dlrm.state_dict(),
"test_acc": acc_test,
}
if args.mlperf_logging:
is_best = validation_results["roc_auc"] > best_auc_test
if is_best:
best_auc_test = validation_results["roc_auc"]
model_metrics_dict["test_auc"] = best_auc_test
print(
"recall {:.4f}, precision {:.4f},".format(
validation_results["recall"],
validation_results["precision"],
)
+ " f1 {:.4f}, ap {:.4f},".format(
validation_results["f1"], validation_results["ap"]
)
+ " auc {:.4f}, best auc {:.4f},".format(
validation_results["roc_auc"], best_auc_test
)
+ " accuracy {:3.3f} %, best accuracy {:3.3f} %".format(
validation_results["accuracy"] * 100, best_acc_test * 100
),
flush=True,
)
else:
is_best = acc_test > best_acc_test
if is_best:
best_acc_test = acc_test
print(
" accuracy {:3.3f} %, best {:3.3f} %".format(
acc_test * 100, best_acc_test * 100
),
flush=True,
)
return model_metrics_dict, is_best
def run():
### parse arguments ###
parser = argparse.ArgumentParser(
description="Train Deep Learning Recommendation Model (DLRM)"
)
# model related parameters
parser.add_argument("--arch-sparse-feature-size", type=int, default=2)
parser.add_argument(
"--arch-embedding-size", type=dash_separated_ints, default="4-3-2"
)
# j will be replaced with the table number
parser.add_argument("--arch-mlp-bot", type=dash_separated_ints, default="4-3-2")
parser.add_argument("--arch-mlp-top", type=dash_separated_ints, default="4-2-1")
parser.add_argument(
"--arch-interaction-op", type=str, choices=["dot", "cat"], default="dot"
)
parser.add_argument("--arch-interaction-itself", action="store_true", default=False)
parser.add_argument("--weighted-pooling", type=str, default=None)
# embedding table options
parser.add_argument("--md-flag", action="store_true", default=False)
parser.add_argument("--md-threshold", type=int, default=200)
parser.add_argument("--md-temperature", type=float, default=0.3)
parser.add_argument("--md-round-dims", action="store_true", default=False)
parser.add_argument("--qr-flag", action="store_true", default=False)
parser.add_argument("--qr-threshold", type=int, default=200)
parser.add_argument("--qr-operation", type=str, default="mult")
parser.add_argument("--qr-collisions", type=int, default=4)
# activations and loss
parser.add_argument("--activation-function", type=str, default="relu")
parser.add_argument("--loss-function", type=str, default="mse") # or bce or wbce
parser.add_argument(
"--loss-weights", type=dash_separated_floats, default="1.0-1.0"
) # for wbce
parser.add_argument("--loss-threshold", type=float, default=0.0) # 1.0e-7
parser.add_argument("--round-targets", type=bool, default=False)
# data
parser.add_argument("--data-size", type=int, default=1)
parser.add_argument("--num-batches", type=int, default=0)
parser.add_argument(
"--data-generation", type=str, default="random"
) # synthetic or dataset
parser.add_argument(
"--rand-data-dist", type=str, default="uniform"
) # uniform or gaussian
parser.add_argument("--rand-data-min", type=float, default=0)
parser.add_argument("--rand-data-max", type=float, default=1)
parser.add_argument("--rand-data-mu", type=float, default=-1)
parser.add_argument("--rand-data-sigma", type=float, default=1)
parser.add_argument("--data-trace-file", type=str, default="./input/dist_emb_j.log")
parser.add_argument("--data-set", type=str, default="kaggle") # or terabyte
parser.add_argument("--raw-data-file", type=str, default="")
parser.add_argument("--processed-data-file", type=str, default="")
parser.add_argument("--data-randomize", type=str, default="total") # or day or none
parser.add_argument("--data-trace-enable-padding", type=bool, default=False)
parser.add_argument("--max-ind-range", type=int, default=-1)
parser.add_argument("--data-sub-sample-rate", type=float, default=0.0) # in [0, 1]
parser.add_argument("--num-indices-per-lookup", type=int, default=10)
parser.add_argument("--num-indices-per-lookup-fixed", type=bool, default=False)
parser.add_argument("--num-workers", type=int, default=0)
parser.add_argument("--memory-map", action="store_true", default=False)
# training
parser.add_argument("--mini-batch-size", type=int, default=1)
parser.add_argument("--nepochs", type=int, default=1)
parser.add_argument("--learning-rate", type=float, default=0.01)
parser.add_argument("--print-precision", type=int, default=5)
parser.add_argument("--numpy-rand-seed", type=int, default=123)
parser.add_argument("--sync-dense-params", type=bool, default=True)
parser.add_argument("--optimizer", type=str, default="sgd")
parser.add_argument(
"--dataset-multiprocessing",
action="store_true",
default=False,
help="The Kaggle dataset can be multiprocessed in an environment \
with more than 7 CPU cores and more than 20 GB of memory. \n \
The Terabyte dataset can be multiprocessed in an environment \
with more than 24 CPU cores and at least 1 TB of memory.",
)
# inference
parser.add_argument("--inference-only", action="store_true", default=False)
# quantize
parser.add_argument("--quantize-mlp-with-bit", type=int, default=32)
parser.add_argument("--quantize-emb-with-bit", type=int, default=32)
# onnx
parser.add_argument("--save-onnx", action="store_true", default=False)
# gpu
parser.add_argument("--use-gpu", action="store_true", default=False)
# distributed
parser.add_argument("--local_rank", type=int, default=-1)
parser.add_argument("--dist-backend", type=str, default="")
# debugging and profiling
parser.add_argument("--print-freq", type=int, default=1)
parser.add_argument("--test-freq", type=int, default=1)
parser.add_argument("--test-mini-batch-size", type=int, default=-1)
parser.add_argument("--test-num-workers", type=int, default=-1)
parser.add_argument("--print-time", action="store_true", default=False)
parser.add_argument("--print-wall-time", action="store_true", default=False)
parser.add_argument("--debug-mode", action="store_true", default=False)
parser.add_argument("--enable-profiling", action="store_true", default=False)
parser.add_argument("--plot-compute-graph", action="store_true", default=False)
parser.add_argument("--tensor-board-filename", type=str, default="run_kaggle_pt")
# store/load model
parser.add_argument("--save-model", type=str, default="")
parser.add_argument("--load-model", type=str, default="")
# mlperf logging (disables other output and stops early)
parser.add_argument("--mlperf-logging", action="store_true", default=False)
# stop at target accuracy Kaggle 0.789, Terabyte (sub-sampled=0.875) 0.8107
parser.add_argument("--mlperf-acc-threshold", type=float, default=0.0)
# stop at target AUC Terabyte (no subsampling) 0.8025
parser.add_argument("--mlperf-auc-threshold", type=float, default=0.0)
parser.add_argument("--mlperf-bin-loader", action="store_true", default=False)
parser.add_argument("--mlperf-bin-shuffle", action="store_true", default=False)
# mlperf gradient accumulation iterations
parser.add_argument("--mlperf-grad-accum-iter", type=int, default=1)
# LR policy
parser.add_argument("--lr-num-warmup-steps", type=int, default=0)
parser.add_argument("--lr-decay-start-step", type=int, default=0)
parser.add_argument("--lr-num-decay-steps", type=int, default=0)
global args
global nbatches
global nbatches_test
global writer
args = parser.parse_args()
if args.dataset_multiprocessing:
assert float(sys.version[:3]) > 3.7, "The dataset_multiprocessing " + \
"flag is susceptible to a bug in Python 3.7 and under. " + \
"https://github.com/facebookresearch/dlrm/issues/172"
if args.mlperf_logging:
mlperf_logger.log_event(key=mlperf_logger.constants.CACHE_CLEAR, value=True)
mlperf_logger.log_start(
key=mlperf_logger.constants.INIT_START, log_all_ranks=True
)
if args.weighted_pooling is not None:
if args.qr_flag:
sys.exit("ERROR: quotient remainder with weighted pooling is not supported")
if args.md_flag:
sys.exit("ERROR: mixed dimensions with weighted pooling is not supported")
if args.quantize_emb_with_bit in [4, 8]:
if args.qr_flag:
sys.exit(
"ERROR: 4 and 8-bit quantization with quotient remainder is not supported"
)
if args.md_flag:
sys.exit(
"ERROR: 4 and 8-bit quantization with mixed dimensions is not supported"
)
if args.use_gpu:
sys.exit(
"ERROR: 4 and 8-bit quantization on GPU is not supported"
)
### some basic setup ###
np.random.seed(args.numpy_rand_seed)
np.set_printoptions(precision=args.print_precision)
torch.set_printoptions(precision=args.print_precision)
torch.manual_seed(args.numpy_rand_seed)
if args.test_mini_batch_size < 0:
# if the parameter is not set, use the training batch size
args.test_mini_batch_size = args.mini_batch_size
if args.test_num_workers < 0:
# if the parameter is not set, use the same parameter for training
args.test_num_workers = args.num_workers
use_gpu = args.use_gpu and torch.cuda.is_available()
if not args.debug_mode:
ext_dist.init_distributed(local_rank=args.local_rank, use_gpu=use_gpu, backend=args.dist_backend)
if use_gpu:
torch.cuda.manual_seed_all(args.numpy_rand_seed)
torch.backends.cudnn.deterministic = True
if ext_dist.my_size > 1:
ngpus = 1
device = torch.device("cuda", ext_dist.my_local_rank)
else:
ngpus = torch.cuda.device_count()
device = torch.device("cuda", 0)
print("Using {} GPU(s)...".format(ngpus))
else:
device = torch.device("cpu")
print("Using CPU...")
### prepare training data ###
ln_bot = np.fromstring(args.arch_mlp_bot, dtype=int, sep="-")
# input data
if args.mlperf_logging:
mlperf_logger.barrier()
mlperf_logger.log_end(key=mlperf_logger.constants.INIT_STOP)
mlperf_logger.barrier()
mlperf_logger.log_start(key=mlperf_logger.constants.RUN_START)
mlperf_logger.barrier()
if args.data_generation == "dataset":
train_data, train_ld, test_data, test_ld = dp.make_criteo_data_and_loaders(args)
table_feature_map = {idx: idx for idx in range(len(train_data.counts))}
nbatches = args.num_batches if args.num_batches > 0 else len(train_ld)
nbatches_test = len(test_ld)
ln_emb = train_data.counts
# enforce maximum limit on number of vectors per embedding
if args.max_ind_range > 0:
ln_emb = np.array(
list(
map(
lambda x: x if x < args.max_ind_range else args.max_ind_range,
ln_emb,
)
)
)
else:
ln_emb = np.array(ln_emb)
m_den = train_data.m_den
ln_bot[0] = m_den
else:
# input and target at random
ln_emb = np.fromstring(args.arch_embedding_size, dtype=int, sep="-")
m_den = ln_bot[0]
train_data, train_ld, test_data, test_ld = dp.make_random_data_and_loader(args, ln_emb, m_den)
nbatches = args.num_batches if args.num_batches > 0 else len(train_ld)
nbatches_test = len(test_ld)
args.ln_emb = ln_emb.tolist()
if args.mlperf_logging:
print("command line args: ", json.dumps(vars(args)))
### parse command line arguments ###
m_spa = args.arch_sparse_feature_size
ln_emb = np.asarray(ln_emb)
num_fea = ln_emb.size + 1 # num sparse + num dense features
m_den_out = ln_bot[ln_bot.size - 1]
if args.arch_interaction_op == "dot":
# approach 1: all
# num_int = num_fea * num_fea + m_den_out
# approach 2: unique
if args.arch_interaction_itself:
num_int = (num_fea * (num_fea + 1)) // 2 + m_den_out
else:
num_int = (num_fea * (num_fea - 1)) // 2 + m_den_out
elif args.arch_interaction_op == "cat":
num_int = num_fea * m_den_out
else:
sys.exit(
"ERROR: --arch-interaction-op="
+ args.arch_interaction_op
+ " is not supported"
)
arch_mlp_top_adjusted = str(num_int) + "-" + args.arch_mlp_top
ln_top = np.fromstring(arch_mlp_top_adjusted, dtype=int, sep="-")
# sanity check: feature sizes and mlp dimensions must match
if m_den != ln_bot[0]:
sys.exit(
"ERROR: arch-dense-feature-size "
+ str(m_den)
+ " does not match first dim of bottom mlp "
+ str(ln_bot[0])
)
if args.qr_flag:
if args.qr_operation == "concat" and 2 * m_spa != m_den_out:
sys.exit(
"ERROR: 2 arch-sparse-feature-size "
+ str(2 * m_spa)
+ " does not match last dim of bottom mlp "
+ str(m_den_out)
+ " (note that the last dim of bottom mlp must be 2x the embedding dim)"
)
if args.qr_operation != "concat" and m_spa != m_den_out:
sys.exit(
"ERROR: arch-sparse-feature-size "
+ str(m_spa)
+ " does not match last dim of bottom mlp "
+ str(m_den_out)
)
else:
if m_spa != m_den_out:
sys.exit(
"ERROR: arch-sparse-feature-size "
+ str(m_spa)
+ " does not match last dim of bottom mlp "
+ str(m_den_out)
)
if num_int != ln_top[0]:
sys.exit(
"ERROR: # of feature interactions "
+ str(num_int)
+ " does not match first dimension of top mlp "
+ str(ln_top[0])
)
# assign mixed dimensions if applicable
if args.md_flag:
m_spa = md_solver(
torch.tensor(ln_emb),
args.md_temperature, # alpha
d0=m_spa,
round_dim=args.md_round_dims,
).tolist()
# test prints (model arch)
if args.debug_mode:
print("model arch:")
print(
"mlp top arch "
+ str(ln_top.size - 1)
+ " layers, with input to output dimensions:"
)
print(ln_top)
print("# of interactions")
print(num_int)
print(
"mlp bot arch "
+ str(ln_bot.size - 1)
+ " layers, with input to output dimensions:"
)
print(ln_bot)
print("# of features (sparse and dense)")
print(num_fea)
print("dense feature size")
print(m_den)
print("sparse feature size")
print(m_spa)
print(
"# of embeddings (= # of sparse features) "
+ str(ln_emb.size)
+ ", with dimensions "
+ str(m_spa)
+ "x:"
)
print(ln_emb)
print("data (inputs and targets):")
for j, inputBatch in enumerate(train_ld):
X, lS_o, lS_i, T, W, CBPP = unpack_batch(inputBatch)
torch.set_printoptions(precision=4)
# early exit if nbatches was set by the user and has been exceeded
if nbatches > 0 and j >= nbatches:
break
print("mini-batch: %d" % j)
print(X.detach().cpu())
# transform offsets to lengths when printing
print(
torch.IntTensor(
[
np.diff(
S_o.detach().cpu().tolist() + list(lS_i[i].shape)
).tolist()
for i, S_o in enumerate(lS_o)
]
)
)
print([S_i.detach().cpu() for S_i in lS_i])
print(T.detach().cpu())
global ndevices
ndevices = min(ngpus, args.mini_batch_size, num_fea - 1) if use_gpu else -1
### construct the neural network specified above ###
# WARNING: to obtain exactly the same initialization for
# the weights we need to start from the same random seed.
# np.random.seed(args.numpy_rand_seed)
global dlrm
dlrm = DLRM_Net(
m_spa,
ln_emb,
ln_bot,
ln_top,
arch_interaction_op=args.arch_interaction_op,
arch_interaction_itself=args.arch_interaction_itself,
sigmoid_bot=-1,
sigmoid_top=ln_top.size - 2,
sync_dense_params=args.sync_dense_params,
loss_threshold=args.loss_threshold,
ndevices=ndevices,
qr_flag=args.qr_flag,
qr_operation=args.qr_operation,
qr_collisions=args.qr_collisions,
qr_threshold=args.qr_threshold,
md_flag=args.md_flag,
md_threshold=args.md_threshold,
weighted_pooling=args.weighted_pooling,
loss_function=args.loss_function
)
# test prints
if args.debug_mode:
print("initial parameters (weights and bias):")
for param in dlrm.parameters():
print(param.detach().cpu().numpy())
# print(dlrm)
if use_gpu:
# Custom Model-Data Parallel
# the mlps are replicated and use data parallelism, while
# the embeddings are distributed and use model parallelism
dlrm = dlrm.to(device) # .cuda()
if dlrm.ndevices > 1:
dlrm.emb_l, dlrm.v_W_l = dlrm.create_emb(
m_spa, ln_emb, args.weighted_pooling
)
else:
if dlrm.weighted_pooling == "fixed":
for k, w in enumerate(dlrm.v_W_l):
dlrm.v_W_l[k] = w.cuda()
# distribute data parallel mlps
if ext_dist.my_size > 1:
if use_gpu:
device_ids = [ext_dist.my_local_rank]
dlrm.bot_l = ext_dist.DDP(dlrm.bot_l, device_ids=device_ids)
dlrm.top_l = ext_dist.DDP(dlrm.top_l, device_ids=device_ids)
else:
dlrm.bot_l = ext_dist.DDP(dlrm.bot_l)
dlrm.top_l = ext_dist.DDP(dlrm.top_l)
if not args.inference_only:
#if use_gpu and args.optimizer in ["rwsadagrad", "adagrad"]:
#sys.exit("GPU version of Adagrad is not supported by PyTorch.")
# specify the optimizer algorithm
opts = {
"sgd": torch.optim.SGD,
"rwsadagrad": RowWiseSparseAdagrad.RWSAdagrad,
"adagrad": torch.optim.Adagrad,
"RMSprop": torch.optim.RMSprop,
"Adadelta" : torch.optim.Adadelta,
"Adam" :torch.optim.Adam,
# "AdaMax" : torch.optim.Adamax(parameters, lr=0.002, betas=(0.9, 0.999), eps=1e-08, weight_decay=0),
# "ASGD" : torch.optim.ASGD(parameters, lr=0.01, lambd=0.0001, alpha=0.75, t0=1000000.0, weight_decay=0)
}
parameters = (
dlrm.parameters()
if ext_dist.my_size == 1
else [
{
"params": [p for emb in dlrm.emb_l for p in emb.parameters()],
"lr": args.learning_rate,
},
# TODO check this lr setup
# bottom mlp has no data parallelism
# need to check how do we deal with top mlp
{
"params": dlrm.bot_l.parameters(),
"lr": args.learning_rate,
},
{
"params": dlrm.top_l.parameters(),
"lr": args.learning_rate,
},
]
)
if args.optimizer in ["rwsadagrad", "adagrad"]:
optimizer = opts[args.optimizer](parameters, lr=args.learning_rate)
elif args.optimizer == "RMSprop":
optimizer = opts[args.optimizer](parameters, lr=args.learning_rate, alpha=0.99, eps=1e-08, weight_decay=0, momentum=0, centered=False)
elif args.optimizer == "Adam":
optimizer = opts[args.optimizer](parameters, lr=args.learning_rate, betas=(0.9, 0.999), eps=1e-08)
elif args.optimizer == "Adadelta":
optimizer = opts[args.optimizer](parameters, lr=args.learning_rate, rho=0.9, eps=1e-06, weight_decay=0)
lr_scheduler = LRPolicyScheduler(
optimizer,
args.lr_num_warmup_steps,
args.lr_decay_start_step,
args.lr_num_decay_steps,
)
### main loop ###
# training or inference
best_acc_test = 0
best_auc_test = 0
skip_upto_epoch = 0
skip_upto_batch = 0
total_time = 0
total_loss = 0
total_iter = 0
total_samp = 0
if args.mlperf_logging:
mlperf_logger.mlperf_submission_log("dlrm")
mlperf_logger.log_event(
key=mlperf_logger.constants.SEED, value=args.numpy_rand_seed
)
mlperf_logger.log_event(
key=mlperf_logger.constants.GLOBAL_BATCH_SIZE, value=args.mini_batch_size
)
# Load model is specified
if not (args.load_model == ""):
print("Loading saved model {}".format(args.load_model))
if use_gpu:
if dlrm.ndevices > 1:
# NOTE: when targeting inference on multiple GPUs,
# load the model as is on CPU or GPU, with the move
# to multiple GPUs to be done in parallel_forward
ld_model = torch.load(args.load_model)
else:
# NOTE: when targeting inference on single GPU,
# note that the call to .to(device) has already happened
ld_model = torch.load(
args.load_model,
map_location=torch.device("cuda")
# map_location=lambda storage, loc: storage.cuda(0)
)
else:
# when targeting inference on CPU
ld_model = torch.load(args.load_model, map_location=torch.device("cpu"))
dlrm.load_state_dict(ld_model["state_dict"])
ld_j = ld_model["iter"]
ld_k = ld_model["epoch"]
ld_nepochs = ld_model["nepochs"]
ld_nbatches = ld_model["nbatches"]
ld_nbatches_test = ld_model["nbatches_test"]
ld_train_loss = ld_model["train_loss"]
ld_total_loss = ld_model["total_loss"]
if args.mlperf_logging:
ld_gAUC_test = ld_model["test_auc"]
ld_acc_test = ld_model["test_acc"]
if not args.inference_only:
optimizer.load_state_dict(ld_model["opt_state_dict"])
best_acc_test = ld_acc_test
total_loss = ld_total_loss
skip_upto_epoch = ld_k # epochs
skip_upto_batch = ld_j # batches
else:
args.print_freq = ld_nbatches
args.test_freq = 0
print(
"Saved at: epoch = {:d}/{:d}, batch = {:d}/{:d}, ntbatch = {:d}".format(
ld_k, ld_nepochs, ld_j, ld_nbatches, ld_nbatches_test
)
)
print(
"Training state: loss = {:.6f}".format(
ld_train_loss,
)
)
if args.mlperf_logging:
print(
"Testing state: accuracy = {:3.3f} %, auc = {:.3f}".format(
ld_acc_test * 100, ld_gAUC_test
)
)
else:
print("Testing state: accuracy = {:3.3f} %".format(ld_acc_test * 100))
if args.inference_only:
# Currently only dynamic quantization with INT8 and FP16 weights are
# supported for MLPs and INT4 and INT8 weights for EmbeddingBag
# post-training quantization during the inference.
# By default we don't do the quantization: quantize_{mlp,emb}_with_bit == 32 (FP32)
assert args.quantize_mlp_with_bit in [
8,
16,
32,
], "only support 8/16/32-bit but got {}".format(args.quantize_mlp_with_bit)
assert args.quantize_emb_with_bit in [
4,
8,
32,
], "only support 4/8/32-bit but got {}".format(args.quantize_emb_with_bit)
if args.quantize_mlp_with_bit != 32:
if args.quantize_mlp_with_bit in [8]:
quantize_dtype = torch.qint8
else:
quantize_dtype = torch.float16
dlrm = torch.quantization.quantize_dynamic(
dlrm, {torch.nn.Linear}, quantize_dtype
)
if args.quantize_emb_with_bit != 32:
dlrm.quantize_embedding(args.quantize_emb_with_bit)
# print(dlrm)
print("time/loss/accuracy (if enabled):")
if args.mlperf_logging:
# LR is logged twice for now because of a compliance checker bug
mlperf_logger.log_event(
key=mlperf_logger.constants.OPT_BASE_LR, value=args.learning_rate
)
mlperf_logger.log_event(
key=mlperf_logger.constants.OPT_LR_WARMUP_STEPS,
value=args.lr_num_warmup_steps,
)
# use logging keys from the official HP table and not from the logging library
mlperf_logger.log_event(
key="sgd_opt_base_learning_rate", value=args.learning_rate
)
mlperf_logger.log_event(
key="lr_decay_start_steps", value=args.lr_decay_start_step
)
mlperf_logger.log_event(
key="sgd_opt_learning_rate_decay_steps", value=args.lr_num_decay_steps
)
mlperf_logger.log_event(key="sgd_opt_learning_rate_decay_poly_power", value=2)
tb_file = "./" + args.tensor_board_filename
writer = SummaryWriter(tb_file)
ext_dist.barrier()
with torch.autograd.profiler.profile(
args.enable_profiling, use_cuda=use_gpu, record_shapes=True
) as prof:
if not args.inference_only:
k = 0
total_time_begin = 0
while k < args.nepochs:
if args.mlperf_logging:
mlperf_logger.barrier()
mlperf_logger.log_start(
key=mlperf_logger.constants.BLOCK_START,
metadata={
mlperf_logger.constants.FIRST_EPOCH_NUM: (k + 1),
mlperf_logger.constants.EPOCH_COUNT: 1,
},
)
mlperf_logger.barrier()
mlperf_logger.log_start(
key=mlperf_logger.constants.EPOCH_START,
metadata={mlperf_logger.constants.EPOCH_NUM: (k + 1)},
)
if k < skip_upto_epoch:
continue
if args.mlperf_logging:
previous_iteration_time = None
for j, inputBatch in enumerate(train_ld):
if j == 0 and args.save_onnx:
X_onnx, lS_o_onnx, lS_i_onnx, _, _, _ = unpack_batch(inputBatch)
if j < skip_upto_batch:
continue
X, lS_o, lS_i, T, W, CBPP = unpack_batch(inputBatch)
if args.mlperf_logging:
current_time = time_wrap(use_gpu)
if previous_iteration_time:
iteration_time = current_time - previous_iteration_time
else:
iteration_time = 0
previous_iteration_time = current_time
else:
t1 = time_wrap(use_gpu)
# early exit if nbatches was set by the user and has been exceeded
if nbatches > 0 and j >= nbatches:
break
# Skip the batch if batch size not multiple of total ranks
if ext_dist.my_size > 1 and X.size(0) % ext_dist.my_size != 0:
print(
"Warning: Skiping the batch %d with size %d"
% (j, X.size(0))
)
continue
mbs = T.shape[0] # = args.mini_batch_size except maybe for last
# forward pass
Z = dlrm_wrap(
X,
lS_o,
lS_i,
use_gpu,
device,
ndevices=ndevices,
)
if ext_dist.my_size > 1:
T = T[ext_dist.get_my_slice(mbs)]
W = W[ext_dist.get_my_slice(mbs)]
# loss
E = loss_fn_wrap(Z, T, use_gpu, device)
# compute loss and accuracy
L = E.detach().cpu().numpy() # numpy array
# training accuracy is not disabled
# S = Z.detach().cpu().numpy() # numpy array
# T = T.detach().cpu().numpy() # numpy array
# # print("res: ", S)
# # print("j, train: BCE ", j, L)
# mbs = T.shape[0] # = args.mini_batch_size except maybe for last
# A = np.sum((np.round(S, 0) == T).astype(np.uint8))
with record_function("DLRM backward"):
# scaled error gradient propagation
# (where we do not accumulate gradients across mini-batches)
if (args.mlperf_logging and (j + 1) % args.mlperf_grad_accum_iter == 0) or not args.mlperf_logging:
optimizer.zero_grad()
# backward pass
E.backward()
# optimizer
if (args.mlperf_logging and (j + 1) % args.mlperf_grad_accum_iter == 0) or not args.mlperf_logging:
optimizer.step()
lr_scheduler.step()
if args.mlperf_logging:
total_time += iteration_time
else:
t2 = time_wrap(use_gpu)
total_time += t2 - t1
total_loss += L * mbs
total_iter += 1
total_samp += mbs
should_print = ((j + 1) % args.print_freq == 0) or (
j + 1 == nbatches
)
should_test = (
(args.test_freq > 0)
and (args.data_generation in ["dataset", "random"])
and (((j + 1) % args.test_freq == 0) or (j + 1 == nbatches))
)
# print time, loss and accuracy
if should_print or should_test:
gT = 1000.0 * total_time / total_iter if args.print_time else -1
total_time = 0
train_loss = total_loss / total_samp
total_loss = 0
str_run_type = (
"inference" if args.inference_only else "training"
)
wall_time = ""
if args.print_wall_time:
wall_time = " ({})".format(time.strftime("%H:%M"))
print(
"Finished {} it {}/{} of epoch {}, {:.2f} ms/it,".format(
str_run_type, j + 1, nbatches, k, gT
)
+ " loss {:.6f}".format(train_loss)
+ wall_time,
flush=True,
)
log_iter = nbatches * k + j + 1
writer.add_scalar("Train/Loss", train_loss, log_iter)
total_iter = 0
total_samp = 0
# testing
if should_test:
epoch_num_float = (j + 1) / len(train_ld) + k + 1
if args.mlperf_logging:
mlperf_logger.barrier()
mlperf_logger.log_start(
key=mlperf_logger.constants.EVAL_START,
metadata={
mlperf_logger.constants.EPOCH_NUM: epoch_num_float
},
)
# don't measure training iter time in a test iteration
if args.mlperf_logging:
previous_iteration_time = None
print(
"Testing at - {}/{} of epoch {},".format(j + 1, nbatches, k)
)
model_metrics_dict, is_best = inference(
args,
dlrm,
best_acc_test,
best_auc_test,
test_ld,
device,
use_gpu,
log_iter,
)
if (
is_best
and not (args.save_model == "")
and not args.inference_only
):
model_metrics_dict["epoch"] = k
model_metrics_dict["iter"] = j + 1
model_metrics_dict["train_loss"] = train_loss
model_metrics_dict["total_loss"] = total_loss
model_metrics_dict[
"opt_state_dict"
] = optimizer.state_dict()
print("Saving model to {}".format(args.save_model))
torch.save(model_metrics_dict, args.save_model)
if args.mlperf_logging:
mlperf_logger.barrier()
mlperf_logger.log_end(
key=mlperf_logger.constants.EVAL_STOP,
metadata={
mlperf_logger.constants.EPOCH_NUM: epoch_num_float
},
)
# Uncomment the line below to print out the total time with overhead
# print("Total test time for this group: {}" \
# .format(time_wrap(use_gpu) - accum_test_time_begin))
if (
args.mlperf_logging
and (args.mlperf_acc_threshold > 0)
and (best_acc_test > args.mlperf_acc_threshold)
):
print(
"MLPerf testing accuracy threshold "
+ str(args.mlperf_acc_threshold)
+ " reached, stop training"
)
break
if (
args.mlperf_logging
and (args.mlperf_auc_threshold > 0)
and (best_auc_test > args.mlperf_auc_threshold)
):
print(
"MLPerf testing auc threshold "
+ str(args.mlperf_auc_threshold)
+ " reached, stop training"
)
if args.mlperf_logging:
mlperf_logger.barrier()
mlperf_logger.log_end(
key=mlperf_logger.constants.RUN_STOP,
metadata={
mlperf_logger.constants.STATUS: mlperf_logger.constants.SUCCESS
},
)
break
if args.mlperf_logging:
mlperf_logger.barrier()
mlperf_logger.log_end(
key=mlperf_logger.constants.EPOCH_STOP,
metadata={mlperf_logger.constants.EPOCH_NUM: (k + 1)},
)
mlperf_logger.barrier()
mlperf_logger.log_end(
key=mlperf_logger.constants.BLOCK_STOP,
metadata={mlperf_logger.constants.FIRST_EPOCH_NUM: (k + 1)},
)
k += 1 # nepochs
if args.mlperf_logging and best_auc_test <= args.mlperf_auc_threshold:
mlperf_logger.barrier()
mlperf_logger.log_end(
key=mlperf_logger.constants.RUN_STOP,
metadata={
mlperf_logger.constants.STATUS: mlperf_logger.constants.ABORTED
},
)
else:
print("Testing for inference only")
inference(
args,
dlrm,
best_acc_test,
best_auc_test,
test_ld,
device,
use_gpu,
)
# profiling
if args.enable_profiling:
time_stamp = str(datetime.datetime.now()).replace(" ", "_")
with open("dlrm_s_pytorch" + time_stamp + "_shape.prof", "w") as prof_f:
prof_f.write(
prof.key_averages(group_by_input_shape=True).table(
sort_by="self_cpu_time_total"
)
)
with open("dlrm_s_pytorch" + time_stamp + "_total.prof", "w") as prof_f:
prof_f.write(prof.key_averages().table(sort_by="self_cpu_time_total"))
prof.export_chrome_trace("dlrm_s_pytorch" + time_stamp + ".json")
# print(prof.key_averages().table(sort_by="cpu_time_total"))
# plot compute graph
if args.plot_compute_graph:
sys.exit(
"ERROR: Please install pytorchviz package in order to use the"
+ " visualization. Then, uncomment its import above as well as"
+ " three lines below and run the code again."
)
# V = Z.mean() if args.inference_only else E
# dot = make_dot(V, params=dict(dlrm.named_parameters()))
# dot.render('dlrm_s_pytorch_graph') # write .pdf file
# test prints
if not args.inference_only and args.debug_mode:
print("updated parameters (weights and bias):")
for param in dlrm.parameters():
print(param.detach().cpu().numpy())
# export the model in onnx
if args.save_onnx:
"""
# workaround 1: tensor -> list
if torch.is_tensor(lS_i_onnx):
lS_i_onnx = [lS_i_onnx[j] for j in range(len(lS_i_onnx))]
# workaound 2: list -> tensor
lS_i_onnx = torch.stack(lS_i_onnx)
"""
# debug prints
# print("inputs", X_onnx, lS_o_onnx, lS_i_onnx)
# print("output", dlrm_wrap(X_onnx, lS_o_onnx, lS_i_onnx, use_gpu, device))
dlrm_pytorch_onnx_file = "dlrm_s_pytorch.onnx"
batch_size = X_onnx.shape[0]
print("X_onnx.shape", X_onnx.shape)
if torch.is_tensor(lS_o_onnx):
print("lS_o_onnx.shape", lS_o_onnx.shape)
else:
for oo in lS_o_onnx:
print("oo.shape", oo.shape)
if torch.is_tensor(lS_i_onnx):
print("lS_i_onnx.shape", lS_i_onnx.shape)
else:
for ii in lS_i_onnx:
print("ii.shape", ii.shape)
# name inputs and outputs
o_inputs = (
["offsets"]
if torch.is_tensor(lS_o_onnx)
else ["offsets_" + str(i) for i in range(len(lS_o_onnx))]
)
i_inputs = (
["indices"]
if torch.is_tensor(lS_i_onnx)
else ["indices_" + str(i) for i in range(len(lS_i_onnx))]
)
all_inputs = ["dense_x"] + o_inputs + i_inputs
# debug prints
print("inputs", all_inputs)
# create dynamic_axis dictionaries
do_inputs = (
[{"offsets": {1: "batch_size"}}]
if torch.is_tensor(lS_o_onnx)
else [
{"offsets_" + str(i): {0: "batch_size"}} for i in range(len(lS_o_onnx))
]
)
di_inputs = (
[{"indices": {1: "batch_size"}}]
if torch.is_tensor(lS_i_onnx)
else [
{"indices_" + str(i): {0: "batch_size"}} for i in range(len(lS_i_onnx))
]
)
dynamic_axes = {"dense_x": {0: "batch_size"}, "pred": {0: "batch_size"}}
for do in do_inputs:
dynamic_axes.update(do)
for di in di_inputs:
dynamic_axes.update(di)
# debug prints
print(dynamic_axes)
# export model
torch.onnx.export(
dlrm,
(X_onnx, lS_o_onnx, lS_i_onnx),
dlrm_pytorch_onnx_file,
verbose=True,
use_external_data_format=True,
opset_version=11,
input_names=all_inputs,
output_names=["pred"],
dynamic_axes=dynamic_axes,
)
# recover the model back
dlrm_pytorch_onnx = onnx.load("dlrm_s_pytorch.onnx")
# check the onnx model
onnx.checker.check_model(dlrm_pytorch_onnx)
total_time_end = time_wrap(use_gpu)
if __name__ == "__main__":
run()
|
the-stack_0_16102 | # Copyright 2017 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
import copy
import datetime
from dateutil.tz import tzlocal
import time
from policyuniverse import expand_policy, get_actions_from_statement, all_permissions
from repokid.utils.dynamo import (add_to_end_of_list, get_role_data, role_ids_for_account, set_role_data,
store_initial_role_data)
from repokid import CONFIG as CONFIG
from repokid import LOGGER as LOGGER
import repokid.hooks
from repokid.role import Role
IAM_ACCESS_ADVISOR_UNSUPPORTED_SERVICES = frozenset(['lightsail', 'organizations', 'tag'])
IAM_ACCESS_ADVISOR_UNSUPPORTED_ACTIONS = frozenset(['iam:passrole'])
# permission decisions have the form repoable - boolean, and decider - string
class RepoablePermissionDecision(object):
def __init__(self):
self.repoable = None
self.decider = ''
def __repr__(self):
return('Is repoable: {}, Decider: {}'.format(self.repoable, self.decider))
def add_new_policy_version(dynamo_table, role, current_policy, update_source):
"""
Create a new entry in the history of policy versions in Dynamo. The entry contains the source of the new policy:
(scan, repo, or restore) the current time, and the current policy contents. Updates the role's policies with the
full policies including the latest.
Args:
role (Role)
current_policy (dict)
update_source (string): ['Repo', 'Scan', 'Restore']
Returns:
None
"""
policy_entry = {'Source': update_source, 'Discovered': datetime.datetime.utcnow().isoformat(),
'Policy': current_policy}
add_to_end_of_list(dynamo_table, role.role_id, 'Policies', policy_entry)
role.policies = get_role_data(dynamo_table, role.role_id, fields=['Policies'])['Policies']
def find_and_mark_inactive(dynamo_table, account_number, active_roles):
"""
Mark roles in the account that aren't currently active inactive. Do this by getting all roles in the account and
subtracting the active roles, any that are left are inactive and should be marked thusly.
Args:
account_number (string)
active_roles (set): the currently active roles discovered in the most recent scan
Returns:
None
"""
active_roles = set(active_roles)
known_roles = set(role_ids_for_account(dynamo_table, account_number))
inactive_roles = known_roles - active_roles
for roleID in inactive_roles:
role_dict = get_role_data(dynamo_table, roleID, fields=['Active', 'Arn'])
if role_dict.get('Active'):
set_role_data(dynamo_table, roleID, {'Active': False})
def find_newly_added_permissions(old_policy, new_policy):
"""
Compare and old version of policies to a new version and return a set of permissions that were added. This will
be used to maintain a list of permissions that were newly added and should not be repoed for a period of time.
Args:
old_policy
new_policy
Returns:
set: Exapnded set of permissions that are in the new policy and not the old one
"""
old_permissions = _get_role_permissions(Role({'Policies': [{'Policy': old_policy}]}))
new_permissions = _get_role_permissions(Role({'Policies': [{'Policy': new_policy}]}))
return new_permissions - old_permissions
def update_no_repo_permissions(dynamo_table, role, newly_added_permissions):
"""
Update Dyanmo entry for newly added permissions. Any that were newly detected get added with an expiration
date of now plus the config setting for 'repo_requirements': 'exclude_new_permissions_for_days'. Expired entries
get deleted. Also update the role object with the new no-repo-permissions.
Args:
role
newly_added_permissions (set)
Returns:
None
"""
current_ignored_permissions = get_role_data(
dynamo_table, role.role_id, fields=['NoRepoPermissions']).get('NoRepoPermissions', {})
new_ignored_permissions = {}
current_time = int(time.time())
new_perms_expire_time = current_time + (
24 * 60 * 60 * CONFIG['repo_requirements'].get('exclude_new_permissions_for_days', 14))
# only copy non-expired items to the new dictionary
for permission, expire_time in current_ignored_permissions.items():
if expire_time > current_time:
new_ignored_permissions[permission] = current_ignored_permissions[permission]
for permission in newly_added_permissions:
new_ignored_permissions[permission] = new_perms_expire_time
role.no_repo_permissions = new_ignored_permissions
set_role_data(dynamo_table, role.role_id, {'NoRepoPermissions': role.no_repo_permissions})
def update_opt_out(dynamo_table, role):
"""
Update opt-out object for a role - remove (set to empty dict) any entries that have expired
Opt-out objects should have the form {'expire': xxx, 'owner': xxx, 'reason': xxx}
Args:
role
Returns:
None
"""
if role.opt_out and int(role.opt_out['expire']) < int(time.time()):
set_role_data(dynamo_table, role.role_id, {'OptOut': {}})
def update_role_data(dynamo_table, account_number, role, current_policy, source='Scan', add_no_repo=True):
"""
Compare the current version of a policy for a role and what has been previously stored in Dynamo.
- If current and new policy versions are different store the new version in Dynamo. Add any newly added
permissions to temporary permission blacklist. Purge any old entries from permission blacklist.
- Refresh the updated time on the role policy
- If the role is completely new, store the first version in Dynamo
- Updates the role with full history of policies, including current version
Args:
dynamo_table
account_number
role (Role): current role being updated
current_policy (dict): representation of the current policy version
source: Default 'Scan' but could be Repo, Rollback, etc
Returns:
None
"""
# policy_entry: source, discovered, policy
stored_role = get_role_data(dynamo_table, role.role_id, fields=['OptOut', 'Policies'])
if not stored_role:
role_dict = store_initial_role_data(dynamo_table, role.arn, role.create_date, role.role_id, role.role_name,
account_number, current_policy)
role.set_attributes(role_dict)
LOGGER.info('Added new role ({}): {}'.format(role.role_id, role.arn))
else:
# is the policy list the same as the last we had?
old_policy = stored_role['Policies'][-1]['Policy']
if current_policy != old_policy:
add_new_policy_version(dynamo_table, role, current_policy, source)
LOGGER.info('{} has different inline policies than last time, adding to role store'.format(role.arn))
newly_added_permissions = find_newly_added_permissions(old_policy, current_policy)
else:
newly_added_permissions = set()
if add_no_repo:
update_no_repo_permissions(dynamo_table, role, newly_added_permissions)
update_opt_out(dynamo_table, role)
set_role_data(dynamo_table, role.role_id, {'Refreshed': datetime.datetime.utcnow().isoformat()})
# Update all data from Dynamo except CreateDate (it's in the wrong format) and DQ_by (we're going to recalc)
current_role_data = get_role_data(dynamo_table, role.role_id)
current_role_data.pop('CreateDate', None)
current_role_data.pop('DisqualifiedBy', None)
role.set_attributes(current_role_data)
def update_stats(dynamo_table, roles, source='Scan'):
"""
Create a new stats entry for each role in a set of roles and add it to Dynamo
Args:
roles (Roles): a list of all the role objects to update data for
source (string): the source of the new stats data (repo, scan, etc)
Returns:
None
"""
for role in roles:
new_stats = {'Date': datetime.datetime.utcnow().isoformat(),
'DisqualifiedBy': role.disqualified_by,
'PermissionsCount': role.total_permissions,
'RepoablePermissionsCount': role.repoable_permissions,
'Source': source}
try:
cur_stats = role.stats[-1]
except IndexError:
cur_stats = {'DisqualifiedBy': [], 'PermissionsCount': 0, 'RepoablePermissionsCount': 0}
for item in ['DisqualifiedBy', 'PermissionsCount', 'RepoablePermissionsCount']:
if new_stats.get(item) != cur_stats.get(item):
add_to_end_of_list(dynamo_table, role.role_id, 'Stats', new_stats)
def _calculate_repo_scores(roles, minimum_age, hooks):
"""
Get the total and repoable permissions count and set of repoable services for every role in the account.
For each role:
1) call _get_role_permissions
2) call _get_repoable_permissions (count), repoable_permissions (count), and repoable_services (list) for role
Each time we got the role permissions we built a list of any permissions that the role's policies granted access
to but weren't in our master list of permissions AWS has. At the end of this run we'll warn about any of these.
Args:
roles (Roles): The set of all roles we're analyzing
minimum_age
hooks
Returns:
None
"""
for role in roles:
permissions = _get_role_permissions(role)
role.total_permissions = len(permissions)
# if we don't have any access advisor data for a service than nothing is repoable
if not role.aa_data:
LOGGER.info('No data found in access advisor for {}'.format(role.role_id))
role.repoable_permissions = 0
role.repoable_services = []
continue
# permissions are only repoable if the role isn't being disqualified by filter(s)
if len(role.disqualified_by) == 0:
repoable_permissions = _get_repoable_permissions(role.account, role.role_name, permissions, role.aa_data,
role.no_repo_permissions, minimum_age, hooks)
(repoable_permissions_set, repoable_services_set) = _convert_repoable_perms_to_perms_and_services(
permissions, repoable_permissions)
role.repoable_permissions = len(repoable_permissions)
# we're going to store both repoable permissions and repoable services in the field "RepoableServices"
role.repoable_services = repoable_services_set + repoable_permissions_set
else:
role.repoable_permissions = 0
role.repoable_services = []
def _convert_repoable_perms_to_perms_and_services(total_permissions, repoable_permissions):
"""
Take a list of total permissions and repoable permissions and determine whether only a few permissions are being
repoed or if the entire service (all permissions from that service) are being removed.
Args:
total_permissions (list): A list of the total permissions a role has
repoable_permissions (list): A list of repoable permissions suggested to be removed
Returns:
list: Sorted list of permissions that will be individually removed but other permissions from the service will
be kept
list: Sorted list of services that will be completely removed
"""
repoed_permissions = set()
repoed_services = set()
total_perms_by_service = defaultdict(list)
repoable_perms_by_service = defaultdict(list)
# group total permissions and repoable permissions by service
for perm in total_permissions:
total_perms_by_service[perm.split(':')[0]].append(perm)
for perm in repoable_permissions:
repoable_perms_by_service[perm.split(':')[0]].append(perm)
for service in repoable_perms_by_service:
if all(perm in repoable_perms_by_service[service] for perm in total_perms_by_service[service]):
repoed_services.add(service)
else:
repoed_permissions.update(perm for perm in repoable_perms_by_service[service])
return (sorted(repoed_permissions), sorted(repoed_services))
def _convert_repoed_service_to_sorted_perms_and_services(repoed_services):
"""
Repokid stores a field RepoableServices that historically only stored services (when Access Advisor was only data).
Now this field is repurposed to store both services and permissions. We can tell the difference because permissions
always have the form <service>:<permission>. This function splits the contents of the field to sorted sets of
repoable services and permissions.
Args:
repoed_services (list): List from Dynamo of repoable services and permissions
Returns:
list: Sorted list of repoable permissions (where there are other permissions that aren't repoed)
list: Sorted list of repoable services (where the entire service is removed)
"""
repoable_permissions = set()
repoable_services = set()
for entry in repoed_services:
if len(entry.split(':')) == 2:
repoable_permissions.add(entry)
else:
repoable_services.add(entry)
return (sorted(repoable_permissions), sorted(repoable_services))
def _get_repoable_permissions(account_number, role_name, permissions, aa_data, no_repo_permissions, minimum_age,
hooks):
"""
Generate a list of repoable permissions for a role based on the list of all permissions the role's policies
currently allow and Access Advisor data for the services included in the role's policies.
The first step is to come up with a list of services that were used within the time threshold (the same defined)
in the age filter config. Permissions are repoable if they aren't in the used list, aren't in the constant list
of unsupported services/actions (IAM_ACCESS_ADVISOR_UNSUPPORTED_SERVICES, IAM_ACCESS_ADVISOR_UNSUPPORTED_ACTIONS),
and aren't being temporarily ignored because they're on the no_repo_permissions list (newly added).
Args:
account_number
role_name
permissions (list): The full list of permissions that the role's permissions allow
aa_data (list): A list of Access Advisor data for a role. Each element is a dictionary with a couple required
attributes: lastAuthenticated (epoch time in milliseconds when the service was last used and
serviceNamespace (the service used)
no_repo_permissions (dict): Keys are the name of permissions and values are the time the entry expires
minimum_age: Minimum age of a role (in days) for it to be repoable
hooks: Dict containing hook names and functions to run
Returns:
set: Permissions that are 'repoable' (not used within the time threshold)
"""
ago = datetime.timedelta(minimum_age)
now = datetime.datetime.now(tzlocal())
current_time = time.time()
no_repo_list = [perm.lower() for perm in no_repo_permissions if no_repo_permissions[perm] > current_time]
# cast all permissions to lowercase
permissions = [permission.lower() for permission in permissions]
potentially_repoable_permissions = {permission: RepoablePermissionDecision()
for permission in permissions if permission not in no_repo_list}
used_services = set()
for service in aa_data:
accessed = service['lastAuthenticated']
if not accessed:
continue
accessed = datetime.datetime.fromtimestamp(accessed / 1000, tzlocal())
if accessed > now - ago:
used_services.add(service['serviceNamespace'])
for permission_name, permission_decision in potentially_repoable_permissions.items():
if permission_name.split(':')[0] in IAM_ACCESS_ADVISOR_UNSUPPORTED_SERVICES:
LOGGER.warn('skipping {}'.format(permission_name))
continue
# we have an unused service but need to make sure it's repoable
if permission_name.split(':')[0] not in used_services:
if permission_name in IAM_ACCESS_ADVISOR_UNSUPPORTED_ACTIONS:
LOGGER.warn('skipping {}'.format(permission_name))
continue
permission_decision.repoable = True
permission_decision.decider = 'Access Advisor'
hooks_output = repokid.hooks.call_hooks(hooks, 'DURING_REPOABLE_CALCULATION',
{'account_number': account_number,
'role_name': role_name,
'potentially_repoable_permissions': potentially_repoable_permissions,
'minimum_age': minimum_age})
# TODO: make option to show source of repoable?
return set([permission_name for permission_name, permission_value in
hooks_output['potentially_repoable_permissions'].items() if permission_value.repoable])
def _get_repoed_policy(policies, repoable_permissions):
"""
This function contains the logic to rewrite the policy to remove any repoable permissions. To do so we:
- Iterate over role policies
- Iterate over policy statements
- Skip Deny statements
- Remove any actions that are in repoable_permissions
- Remove any statements that now have zero actions
- Remove any policies that now have zero statements
Args:
policies (dict): All of the inline policies as a dict with name and policy contents
repoable_permissions (set): A set of all of the repoable permissions for policies
Returns:
dict: The rewritten set of all inline policies
list: Any policies that are now empty as a result of the rewrites
"""
# work with our own copy; don't mess with the CACHE copy.
role_policies = copy.deepcopy(policies)
empty_policies = []
for policy_name, policy in role_policies.items():
# list of indexes in the policy that are empty
empty_statements = []
if type(policy['Statement']) is dict:
policy['Statement'] = [policy['Statement']]
for idx, statement in enumerate(policy['Statement']):
if statement['Effect'].lower() == 'allow':
statement_actions = get_actions_from_statement(statement)
statement_actions = statement_actions.difference(repoable_permissions)
# get_actions_from_statement has already inverted this so our new statement should be 'Action'
if 'NotAction' in statement:
del statement['NotAction']
# by putting this into a set, we lose order, which may be confusing to someone.
statement['Action'] = sorted(list(statement_actions))
# mark empty statements to be removed
if len(statement['Action']) == 0:
empty_statements.append(idx)
# do the actual removal of empty statements
for idx in sorted(empty_statements, reverse=True):
del policy['Statement'][idx]
# mark empty policies to be removed
if len(policy['Statement']) == 0:
empty_policies.append(policy_name)
# do the actual removal of empty policies.
for policy_name in empty_policies:
del role_policies[policy_name]
return role_policies, empty_policies
def _get_permissions_in_policy(policy_dict, warn_unknown_perms=False):
"""
Given a set of policies for a role, return a set of all allowed permissions
Args:
policy_dict
warn_unknown_perms
Returns
set - all permissions allowed by the policies
"""
permissions = set()
for policy_name, policy in policy_dict.items():
policy = expand_policy(policy=policy, expand_deny=False)
for statement in policy.get('Statement'):
if statement['Effect'].lower() == 'allow':
permissions = permissions.union(get_actions_from_statement(statement))
weird_permissions = permissions.difference(all_permissions)
if weird_permissions and warn_unknown_perms:
LOGGER.warn('Unknown permissions found: {}'.format(weird_permissions))
return permissions
def _get_role_permissions(role, warn_unknown_perms=False):
"""
Expand the most recent version of policies from a role to produce a list of all the permissions that are allowed
(permission is included in one or more statements that is allowed). To perform expansion the policyuniverse
library is used. The result is a list of all of the individual permissions that are allowed in any of the
statements. If our resultant list contains any permissions that aren't listed in the master list of permissions
we'll raise an exception with the set of unknown permissions found.
Args:
role (Role): The role object that we're getting a list of permissions for
Returns:
set: A set of permissions that the role has policies that allow
"""
return _get_permissions_in_policy(role.policies[-1]['Policy'])
def _get_services_in_permissions(permissions_set):
"""
Given a set of permissions, return a sorted set of services
Args:
permissions_set
Returns:
services_set
"""
services_set = set()
for permission in permissions_set:
try:
service = permission.split(':')[0]
except IndexError:
pass
else:
services_set.add(service)
return sorted(services_set)
|
the-stack_0_16103 | """Scraper for the Maryland Attorney General
CourtID: ag
Court Short Name: Maryland Attorney General
"""
import datetime
import os
from time import sleep
from lxml import html
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from juriscraper.AbstractSite import logger, phantomjs_executable_path
from juriscraper.OpinionSite import OpinionSite
from juriscraper.lib.string_utils import convert_date_string
class Site(OpinionSite):
"""This scraper is strange. The site it temperamental, and the javascript
seems to load successfully on some runs, but not on others. The dates are
also estimated, and the names are actually semi-long summaries. Furthermore,
the site's source is unmanageable, which has prevented us from being able to
create legitimate test/example files for coverage. We have a single example
file that's an empty document skeleton to prevent the test mechanism from
complaining. But it isn't a test providing real coverage.
We are doing the best we can with a bad site.
"""
def __init__(self, *args, **kwargs):
super(Site, self).__init__(*args, **kwargs)
self.court_id = self.__module__
self.year = datetime.date.today().year
self.domain = 'http://www.marylandattorneygeneral.gov'
self.url = '%s/Pages/Opinions/index.aspx' % self.domain
self.back_scrape_iterable = range(1993, self.year + 1)
self.parent_path_base = '//tbody/tr/td[contains(./text(), "%d")]'
self.parent_path = self.parent_path_base % self.year
self.cell_path = '//tbody[@isloaded="true"]/tr/td[%d]'
self.next_path = '//a[@title="Next"]'
self.driver = False
def _download(self, request_dict={}):
if self.test_mode_enabled():
return [super(Site, self)._download(request_dict)]
trees = self.get_dynamic_html_trees()
if not len(trees):
# No opinions for current year on page, SO no
# js to load. Return regular page html and
# extract 0 cases because nothing there
return [super(Site, self)._download(request_dict)]
return trees
def get_dynamic_html_trees(self):
# Initialize driver
driver = webdriver.PhantomJS(
executable_path=phantomjs_executable_path,
service_log_path=os.path.devnull, # Disable ghostdriver.log
)
driver.get(self.url)
# Find and activate the opinion drop-down for year
try:
date_anchor = driver.find_element_by_xpath('%s/a' % self.parent_path)
except NoSuchElementException:
# Year has no opinions drop-down on page
return []
date_anchor.click()
trees = [self.get_tree_from_driver_dom(driver)]
# Handle pagination if more than 30 results for year
while True:
try:
next_anchor = driver.find_element_by_xpath(self.next_path)
except NoSuchElementException:
# Less than 30 results
break
next_anchor.click()
trees.append(self.get_tree_from_driver_dom(driver))
return trees
def get_tree_from_driver_dom(self, driver):
# Wait for js to load and dom html to update
# Seems stupid, but necessary, and easier
# thank loading lots of selenium dependencies
# and using complex WebDriverWait with callbacks
# for attribute to appear, which don't even
# seem to work consistently with the site's
# finicky responses.
sleep(3)
source = driver.execute_script("return document.getElementsByTagName('html')[0].innerHTML")
tree = html.fromstring(source)
tree.make_links_absolute(self.domain)
return tree
def _get_case_names(self):
names = []
path = self.cell_path % 3
for tree in self.html:
names.extend([cell.text_content().strip() for cell in tree.xpath(path)])
return names
def _get_download_urls(self):
urls = []
path = (self.cell_path % 4) + '/a/@href'
for tree in self.html:
urls.extend([href for href in tree.xpath(path)])
return urls
def _get_case_dates(self):
today = datetime.date.today()
count = len(self._get_case_names())
middle_of_year = convert_date_string('July 2, %d' % self.year)
if self.year == today.year:
# Not a backscraper, assume cases were filed on day scraped.
return [today] * count
else:
# All we have is the year, so estimate the middle most day
return [middle_of_year] * count
def _get_docket_numbers(self):
dockets = []
path = self.cell_path % 1
for tree in self.html:
for cell in tree.xpath(path):
dockets.append(cell.text_content().replace('Unpublished', ''))
return dockets
def _get_precedential_statuses(self):
statuses = []
path = self.cell_path % 1
for tree in self.html:
for cell in tree.xpath(path):
if 'Unpublished' in cell.text_content():
statuses.append('Unpublished')
else:
statuses.append('Published')
return statuses
def _get_date_filed_is_approximate(self):
return ['True'] * len(self.case_names)
def _download_backwards(self, year):
"""Iterate over drop down for each year on the page"""
self.year = year
self.parent_path = self.parent_path_base % year
self.html = self._download()
|
the-stack_0_16106 | import binascii
import hashlib
import hmac
import os
import random
import struct
from time import time
import iofree
def pack_uint16(s):
return len(s).to_bytes(2, "big") + s
def sni(host):
return b"\x00\x00" + pack_uint16(pack_uint16(pack_uint16(b"\x00" + host)))
def pack_auth_data(key, session_id):
utc_time = int(time()) & 0xFFFFFFFF
data = struct.pack(">I", utc_time) + os.urandom(18)
data += hmac.new(key + session_id, data, hashlib.sha1).digest()[:10]
return data
@iofree.parser
def tls1_2_response(plugin):
tls_version = plugin.tls_version
with memoryview((yield from iofree.read(5))) as tls_plaintext_head:
assert (
tls_plaintext_head[:3] == b"\x16\x03\x03"
), "invalid tls head: handshake(22) protocol_version(3.1)"
length = int.from_bytes(tls_plaintext_head[-2:], "big")
assert length == length & 0x3FFF, f"{length} is over 2^14"
with memoryview((yield from iofree.read(length))) as fragment:
assert fragment[0] == 2, f"expect server_hello(2), bug got: {fragment[0]}"
handshake_length = int.from_bytes(fragment[1:4], "big")
server_hello = fragment[4 : handshake_length + 4]
assert server_hello[:2] == tls_version, "expect: server_version(3.3)"
verify_id = server_hello[2:34]
sha1 = hmac.new(
plugin.client.ns.cipher.master_key + plugin.session_id,
verify_id[:-10],
hashlib.sha1,
).digest()[:10]
assert sha1 == verify_id[-10:], "hmac verify failed"
assert server_hello[34] == 32, f"expect 32, but got {server_hello[34]}"
# verify_id = server_hello[35:67]
# sha1 = hmac.new(
# plugin.client.ns.cipher.master_key + plugin.session_id,
# fragment[:-10],
# hashlib.sha1,
# ).digest()[:10]
# assert sha1 == fragment[-10:], "hmac verify failed"
while True:
x = yield from iofree.peek(1)
if x[0] != 22:
break
with memoryview((yield from iofree.read(5))) as ticket_head:
length = int.from_bytes(ticket_head[-2:], "big")
assert length == length & 0x3FFF, f"{length} is over 2^14"
yield from iofree.read(length)
yield from ChangeCipherReader(
plugin, plugin.client.ns.cipher.master_key, plugin.session_id
)
yield from application_data(plugin)
@iofree.parser
def tls1_2_request(plugin):
parser = yield from iofree.get_parser()
tls_version = plugin.tls_version
with memoryview((yield from iofree.read(5))) as tls_plaintext_head:
assert (
tls_plaintext_head[:3] == b"\x16\x03\x01"
), "invalid tls head: handshake(22) protocol_version(3.1)"
length = int.from_bytes(tls_plaintext_head[-2:], "big")
assert length == length & 0x3FFF, f"{length} is over 2^14"
with memoryview((yield from iofree.read(length))) as fragment:
assert fragment[0] == 1, "expect client_hello(1), but got {fragment[0]}"
handshake_length = int.from_bytes(fragment[1:4], "big")
client_hello = fragment[4 : handshake_length + 4]
assert client_hello[:2] == tls_version, "expect: client_version(3.3)"
verify_id = client_hello[2:34]
# TODO: replay attact detect
gmt_unix_time = int.from_bytes(verify_id[:4], "big")
time_diff = (int(time()) & 0xFFFFFFFF) - gmt_unix_time
assert abs(time_diff) < plugin.time_tolerance, f"expired request: {time_diff}"
session_length = client_hello[34]
assert session_length >= 32, "session length should be >= 32"
session_id = client_hello[35 : 35 + session_length].tobytes()
sha1 = hmac.new(
plugin.server.cipher.master_key + session_id, verify_id[:22], hashlib.sha1
).digest()[:10]
assert verify_id[22:] == sha1, "hmac verify failed"
tail = client_hello[35 + session_length :]
cipher_suites = tail[:2].tobytes()
compression_methods = tail[2:3]
(cipher_suites, compression_methods)
random_bytes = pack_auth_data(plugin.server.cipher.master_key, session_id)
server_hello = (
tls_version
+ random_bytes
+ session_length.to_bytes(1, "big")
+ session_id
+ binascii.unhexlify(b"c02f000005ff01000100")
)
server_hello = b"\x02\x00" + pack_uint16(server_hello)
server_hello = b"\x16" + tls_version + pack_uint16(server_hello)
if random.randint(0, 8) < 1:
ticket = os.urandom((struct.unpack(">H", os.urandom(2))[0] % 164) * 2 + 64)
ticket = struct.pack(">H", len(ticket) + 4) + b"\x04\x00" + pack_uint16(ticket)
server_hello += b"\x16" + tls_version + ticket
change_cipher_spec = b"\x14" + tls_version + b"\x00\x01\x01"
finish_len = random.choice([32, 40])
change_cipher_spec += (
b"\x16"
+ tls_version
+ struct.pack(">H", finish_len)
+ os.urandom(finish_len - 10)
)
change_cipher_spec += hmac.new(
plugin.server.cipher.master_key + session_id, change_cipher_spec, hashlib.sha1
).digest()[:10]
parser.respond(data=server_hello + change_cipher_spec)
yield from ChangeCipherReader(plugin, plugin.server.cipher.master_key, session_id)
def ChangeCipherReader(plugin, key, session_id):
with memoryview((yield from iofree.read(11))) as data:
assert data[0] == 0x14, f"{data[0]} != change_cipher_spec(20) {data.tobytes()}"
assert (
data[1:3] == plugin.tls_version
), f"{data[1:3].tobytes()} != version({plugin.tls_version})"
assert data[3:6] == b"\x00\x01\x01", "bad ChangeCipherSpec"
assert data[6] == 0x16, f"{data[6]} != Finish(22)"
assert (
data[7:9] == plugin.tls_version
), f"{data[7:9]} != version({plugin.tls_version})"
assert data[9] == 0x00, f"{data[9]} != Finish(0)"
verify_len = int.from_bytes(data[9:11], "big")
with memoryview((yield from iofree.read(verify_len))) as verify:
sha1 = hmac.new(
key + session_id, b"".join([data, verify[:-10]]), hashlib.sha1
).digest()[:10]
assert sha1 == verify[-10:], "hmac verify failed"
@iofree.parser
def application_data(plugin):
parser = yield from iofree.get_parser()
while True:
with memoryview((yield from iofree.read(5))) as data:
assert (
data[0] == 0x17
), f"{data[0]} != application_data(23) {data.tobytes()}"
assert (
data[1:3] == plugin.tls_version
), f"{data[1:3].tobytes()} != version({plugin.tls_version})"
size = int.from_bytes(data[3:], "big")
assert size == size & 0x3FFF, f"{size} is over 2^14"
data = yield from iofree.read(size)
parser.respond(result=data)
|
the-stack_0_16107 | """
Tests that rely on a server running
"""
import base64
import json
import datetime
import os
import pytest
from omnisci import connect, ProgrammingError, DatabaseError
from omnisci.cursor import Cursor
from omnisci._parsers import Description, ColumnDetails
from omnisci.thrift.ttypes import TOmniSciException
# XXX: Make it hashable to silence warnings; see if this can be done upstream
# This isn't a huge deal, but our testing context mangers for asserting
# exceptions need hashability
TOmniSciException.__hash__ = lambda x: id(x)
omniscihost = os.environ.get('OMNISCI_HOST', 'localhost')
@pytest.mark.usefixtures("omnisci_server")
class TestIntegration:
def test_connect_binary(self):
con = connect(
user="admin",
password='HyperInteractive',
host=omniscihost,
port=6274,
protocol='binary',
dbname='omnisci',
)
assert con is not None
def test_connect_http(self):
con = connect(
user="admin",
password='HyperInteractive',
host=omniscihost,
port=6278,
protocol='http',
dbname='omnisci',
)
assert con is not None
def test_connect_uri(self):
uri = (
'omnisci://admin:HyperInteractive@{0}:6274/omnisci?'
'protocol=binary'.format(omniscihost)
)
con = connect(uri=uri)
assert con._user == 'admin'
assert con._password == 'HyperInteractive'
assert con._host == omniscihost
assert con._port == 6274
assert con._dbname == 'omnisci'
assert con._protocol == 'binary'
def test_connect_uri_and_others_raises(self):
uri = (
'omnisci://admin:HyperInteractive@{0}:6274/omnisci?'
'protocol=binary'.format(omniscihost)
)
with pytest.raises(TypeError):
connect(username='omnisci', uri=uri)
def test_invalid_sql(self, con):
with pytest.raises(ProgrammingError) as r:
con.cursor().execute("this is invalid;")
r.match("SQL Error:")
def test_nonexistant_table(self, con):
with pytest.raises(DatabaseError) as r:
con.cursor().execute("select it from fake_table;")
r.match("Table 'FAKE_TABLE' does not exist|Object 'fake_table' not")
def test_connection_execute(self, con):
result = con.execute("drop table if exists FOO;")
result = con.execute("create table FOO (a int);")
assert isinstance(result, Cursor)
con.execute("drop table if exists FOO;")
def test_select_sets_description(self, con):
c = con.cursor()
c.execute('drop table if exists stocks;')
create = (
'create table stocks (date_ text, trans text, symbol text, '
'qty int, price float, vol float);'
)
c.execute(create)
i1 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','RHAT',100,35.14,1.1);" # noqa
i2 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','GOOG',100,12.14,1.2);" # noqa
c.execute(i1)
c.execute(i2)
c.execute("select * from stocks")
expected = [
Description('date_', 6, None, None, None, None, True),
Description('trans', 6, None, None, None, None, True),
Description('symbol', 6, None, None, None, None, True),
Description('qty', 1, None, None, None, None, True),
Description('price', 3, None, None, None, None, True),
Description('vol', 3, None, None, None, None, True),
]
assert c.description == expected
c.execute('drop table if exists stocks;')
def test_select_parametrized(self, con):
c = con.cursor()
c.execute('drop table if exists stocks;')
create = (
'create table stocks (date_ text, trans text, symbol text, '
'qty int, price float, vol float);'
)
c.execute(create)
i1 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','RHAT',100,35.14,1.1);" # noqa
i2 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','GOOG',100,12.14,1.2);" # noqa
c.execute(i1)
c.execute(i2)
c.execute(
'select symbol, qty from stocks where symbol = :symbol',
{'symbol': 'GOOG'},
)
result = list(c)
expected = [
('GOOG', 100),
] # noqa
assert result == expected
c.execute('drop table if exists stocks;')
def test_executemany_parametrized(self, con):
c = con.cursor()
c.execute('drop table if exists stocks;')
create = (
'create table stocks (date_ text, trans text, symbol text, '
'qty int, price float, vol float);'
)
c.execute(create)
i1 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','RHAT',100,35.14,1.1);" # noqa
i2 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','GOOG',100,12.14,1.2);" # noqa
c.execute(i1)
c.execute(i2)
parameters = [{'symbol': 'GOOG'}, {'symbol': "RHAT"}]
expected = [[('GOOG', 100)], [('RHAT', 100)]]
query = 'select symbol, qty from stocks where symbol = :symbol'
c = con.cursor()
result = c.executemany(query, parameters)
assert result == expected
c.execute('drop table if exists stocks;')
def test_executemany_parametrized_insert(self, con):
c = con.cursor()
c.execute('drop table if exists stocks;')
create = (
'create table stocks (date_ text, trans text, symbol text, '
'qty int, price float, vol float);'
)
c.execute(create)
i1 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','RHAT',100,35.14,1.1);" # noqa
i2 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','GOOG',100,12.14,1.2);" # noqa
c.execute(i1)
c.execute(i2)
c = con.cursor()
c.execute("drop table if exists stocks2;")
# Create table
c.execute('CREATE TABLE stocks2 (symbol text, qty int);')
params = [{"symbol": "GOOG", "qty": 10}, {"symbol": "AAPL", "qty": 20}]
query = "INSERT INTO stocks2 VALUES (:symbol, :qty);"
result = c.executemany(query, params)
assert result == [[], []] # TODO: not sure if this is standard
c.execute("drop table stocks2;")
c.execute('drop table if exists stocks;')
def test_fetchone(self, con):
c = con.cursor()
c.execute('drop table if exists stocks;')
create = (
'create table stocks (date_ text, trans text, symbol text, '
'qty int, price float, vol float);'
)
c.execute(create)
i1 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','RHAT',100,35.14,1.1);" # noqa
i2 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','GOOG',100,12.14,1.2);" # noqa
c.execute(i1)
c.execute(i2)
c.execute("select symbol, qty from stocks")
result = c.fetchone()
expected = ('RHAT', 100)
assert result == expected
c.execute('drop table if exists stocks;')
def test_fetchmany(self, con):
c = con.cursor()
c.execute('drop table if exists stocks;')
create = (
'create table stocks (date_ text, trans text, symbol text, '
'qty int, price float, vol float);'
)
c.execute(create)
i1 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','RHAT',100,35.14,1.1);" # noqa
i2 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','GOOG',100,12.14,1.2);" # noqa
c.execute(i1)
c.execute(i2)
c.execute("select symbol, qty from stocks")
result = c.fetchmany()
expected = [('RHAT', 100)]
assert result == expected
c.execute("select symbol, qty from stocks")
result = c.fetchmany(size=10)
expected = [('RHAT', 100), ('GOOG', 100)]
assert result == expected
c.execute('drop table if exists stocks;')
def test_select_dates(self, con):
c = con.cursor()
c.execute('drop table if exists dates;')
c.execute(
'create table dates (date_ DATE, datetime_ TIMESTAMP, '
'time_ TIME);'
)
i1 = (
"INSERT INTO dates VALUES ('2006-01-05','2006-01-01T12:00:00',"
"'12:00:00');"
)
i2 = (
"INSERT INTO dates VALUES ('1901-12-14','1901-12-13T20:45:53',"
"'23:59:00');"
)
c.execute(i1)
c.execute(i2)
result = list(c.execute("select * from dates"))
expected = [
(
datetime.date(2006, 1, 5),
datetime.datetime(2006, 1, 1, 12),
datetime.time(12),
),
(
datetime.date(1901, 12, 14),
datetime.datetime(1901, 12, 13, 20, 45, 53),
datetime.time(23, 59),
),
]
assert result == expected
c.execute('drop table if exists dates;')
class TestExtras:
def test_sql_validate(self, con):
from omnisci.common.ttypes import TTypeInfo
c = con.cursor()
c.execute('drop table if exists stocks;')
create = (
'create table stocks (date_ text, trans text, symbol text, '
'qty int, price float, vol float);'
)
c.execute(create)
q = "select * from stocks"
results = con._client.sql_validate(con._session, q)
col_names = sorted([r.col_name for r in results])
col_types = [r.col_type for r in results]
expected_col_names = [
'date_',
'price',
'qty',
'symbol',
'trans',
'vol',
]
expected_types = [
TTypeInfo(
type=6,
encoding=4,
nullable=True,
is_array=False,
precision=0,
scale=0,
comp_param=32,
size=-1,
),
TTypeInfo(
type=6,
encoding=4,
nullable=True,
is_array=False,
precision=0,
scale=0,
comp_param=32,
size=-1,
),
TTypeInfo(
type=6,
encoding=4,
nullable=True,
is_array=False,
precision=0,
scale=0,
comp_param=32,
size=-1,
),
TTypeInfo(
type=1,
encoding=0,
nullable=True,
is_array=False,
precision=0,
scale=0,
comp_param=0,
size=-1,
),
TTypeInfo(
type=3,
encoding=0,
nullable=True,
is_array=False,
precision=0,
scale=0,
comp_param=0,
size=-1,
),
TTypeInfo(
type=3,
encoding=0,
nullable=True,
is_array=False,
precision=0,
scale=0,
comp_param=0,
size=-1,
),
]
assert col_types == expected_types
assert col_names == expected_col_names
|
the-stack_0_16109 | import discord, asyncio, random, time
from . import world, worldToImage, rpglang, menus
from .datatypes import RPGUser, BiomeType, LocationType, Biome, Location, ItemType, Item, Weapon, WeaponType, WAttribute, Chunk
from libs import modutil
from ..rpg import rpgcmd
from discord.ext import commands
import discord
def check_if_user_is_loaded(ctx):
return RPGUser.isLoaded(ctx.message.author.id)
@rpgcmd.command(name="user")
async def userSubCommand(ctx, *args):
_u = RPGUser.get(args[0])
_toSend = "```\n---- USER ----\nID: {id}\nPosition: ({x}, {y})\nFighters: {fighters}\nTutorialDone: {tutorialState}\nInventory: {inv}```".format(
id = _u.id,
x = _u.x,
y = _u.y,
fighters = ", ".join(list(map(lambda f: f.name, _u.fighters))),
tutorialState = str(_u.tutorialDone),
inv = ", ".join(list(map(lambda f: f.getFullName(), _u.inventory))) if len(_u.inventory) > 0 else "(Nothing)"
)
await ctx.send(_toSend)
@rpgcmd.command(name="loc")
async def infoSubCommand(ctx, *args):
_x = int(args[0])
_y = int(args[1])
_b = world.getPos(_x, _y)
_toSend = "```\n---- BIOME ----\nType: {type}\nx, y: ({x}, {y})\nLoot: {loot}\nLocation Connected: {location}".format(
type = _b.stype,
x = _b.x,
y = _b.y,
loot = ", ".join(list(map(lambda l: l.getType().name,_b.loot))) if len(_b.loot) > 0 else "(Nothing)",
location = _b.locationConnected.getType().systemName if _b.locationConnected != None else "None"
)
if _b.locationConnected != None:
_l = _b.locationConnected
_toSend = _toSend + "\n\n---- LOCATION ----\nType: {type}\nrecursion: {recursion}\nConnections: {connections}\nLoot: {loot}".format(
type = _l.stype,
recursion = _l.recursion,
connections = ", ".join(list(map(lambda l: l.getType().systemName, _l.connections))) if len(_l.connections) > 0 else "(Nothing)",
loot = ", ".join(list(map(lambda l: l.getType().systemName, _l.loot))) if len(_l.loot) > 0 else "(Nothing)"
)
await ctx.send(_toSend + "```")
@rpgcmd.command(name="tp")
async def tpSubCommand(ctx, *args):
if len(args) == 1: # tp player
user = RPGUser.get(ctx.message.author.id)
_x = int(RPGUser.get(args[0]).x)
_y = int(RPGUser.get(args[0]).y)
elif len(args) == 2: # tp x y
_x = int(args[0])
_y = int(args[1])
user = RPGUser.get(ctx.message.author.id)
elif len(args) == 3: # tp player x y
user = RPGUser.get(args[0])
_x = int(args[1])
_y = int(args[2])
else:
await ctx.send("`Usage: rpg tp <player [x y] / x y>`")
return
if user==None:
await ctx.send(rpglang.getl("USER_NOT_REGISTERED"))
return
user.setPos(_x, _y)
await ctx.send("Teleported {} to ({}, {}).".format(user.getName(), _x, _y))
@rpgcmd.group(invoke_without_command=True, name="data")
async def datacmd(ctx):
await ctx.send("`Usage: rpg data <biome/location/weapon/wattribute> [systemname]`")
@datacmd.command(name="biome")
async def data_biomecmd(ctx, *args):
if len(args) > 0:
_x = list(filter(lambda n: n.systemName == args[0], BiomeType.all.values()))[0]
_toSend = "```\n--------- Biome ---------\nname: {name}\npossibleBiomesNear: {possibleBiomesNear}\npossibleLocations: {possibleLocations}\ngender: {gender}\nemoji: {emoji}```".format(
name = _x.name,
possibleBiomesNear = ",".join(list(map(lambda n: n.name, _x.possibleBiomesNear.keys()))),
possibleLocations = ",".join(list(map(lambda n: n.name, _x.possibleLocations))),
gender = "Female" if _x.gender else "Male",
emoji = _x.emoji)
await ctx.send(_toSend)
else:
await ctx.send("```\nBiomes:\n{}```".format("\n".join(list(map(lambda b: b.systemName,BiomeType.all.values())))))
@datacmd.command(name="location")
async def data_locationcmd(ctx, *args):
await ctx.send("```\nLocations:\n{}```".format("\n".join(list(map(lambda b: b.systemName,LocationType.all.values())))))
@datacmd.command(name="weapon")
async def data_weaponcmd(ctx, *args):
await ctx.send("```\nWeapons:\n{}```".format("\n".join(list(map(lambda b: b.systemName,WeaponType.all.values())))))
@datacmd.command(name="wattribute")
async def data_wattcmd(ctx, *args):
if len(args) > 0:
_x = list(filter(lambda n: n.nameM == args[0], WAttribute.all))[0]
_toSend = "```\n--------- wAttribute ---------\nnameM: {nameM}\nnameF: {nameF}\ntype: {type}\nclassification: {classification}\nattackMod: {attackMod}\nagilityMod: {agilityMod}```".format(
nameM = _x.nameM,
nameF = _x.nameF,
type = _x.type,
classification = _x.classification,
attackMod = _x.attackMod,
agilityMod = _x.agilityMod)
await ctx.send(_toSend)
else:
await ctx.send("```\nWeapon Adjectives:\n{}```".format("\n".join(list(map(lambda b: b.nameM,WAttribute.all)))))
# Sends a map of the world to a channel.
@rpgcmd.command(name="map")
async def worldSubCommand(ctx, *args):
user = RPGUser.get(ctx.message.author.id)
if user==None:
await ctx.send(rpglang.getl("USER_NOT_REGISTERED"))
return
cx = int(args[0]) * Chunk.size
cy = int(args[1]) * Chunk.size
ux = user.x
uy = user.y
worldmap = ""
for iy in range(cx, cx + Chunk.size):
for ix in range(cy, cy + Chunk.size):
if ux == ix and uy == iy:
worldmap += "X"
elif world.getPos(ix,iy).locationConnected != None:
worldmap += "C"
else:
worldmap += world.getPos(ix,iy).getType().emoji
worldmap += "\n"
worldmap.rstrip("\n")
worldToImage.represent(worldmap, modutil.absolutePath + "/map.png")
await ctx.send(file=discord.File(modutil.absolutePath + "/map.png"))
@rpgcmd.command(name="save")
async def saveSubCommand(ctx, *args):
RPGUser.saveAll()
await ctx.send("Saved!")
@rpgcmd.command(name="inv")
async def invSubCommand(ctx):
user = RPGUser.get(ctx.message.author.id)
if user==None:
await ctx.send(rpglang.getl("USER_NOT_REGISTERED"))
return
await menus.showInventoryMenu(ctx, user)
@rpgcmd.command(name="s")
async def startSubCommand(ctx):
user = RPGUser.get(ctx.message.author.id)
if user==None:
await ctx.send(rpglang.getl("USER_NOT_REGISTERED"))
return
if not user.tutorialDone:
await menus.showTutorialMenu(ctx, user)
else:
await menus.showActionMenu(ctx, user, rpglang.getl("CONTINUE_ADVENTURE")) |
the-stack_0_16112 | # -*- coding: utf-8 -*-
'''
# Copyright (c) Microsoft Corporation. All Rights Reserved. Licensed under the MIT License. See License in the project root for license information.
#
# This file was generated and any changes will be overwritten.
'''
from __future__ import unicode_literals
from ..model.external_link import ExternalLink
from ..one_drive_object_base import OneDriveObjectBase
class NotebookLinks(OneDriveObjectBase):
def __init__(self, prop_dict={}):
self._prop_dict = prop_dict
@property
def one_note_client_url(self):
"""
Gets and sets the oneNoteClientUrl
Returns:
:class:`ExternalLink<onedrivesdk.model.external_link.ExternalLink>`:
The oneNoteClientUrl
"""
if "oneNoteClientUrl" in self._prop_dict:
if isinstance(self._prop_dict["oneNoteClientUrl"], OneDriveObjectBase):
return self._prop_dict["oneNoteClientUrl"]
else :
self._prop_dict["oneNoteClientUrl"] = ExternalLink(self._prop_dict["oneNoteClientUrl"])
return self._prop_dict["oneNoteClientUrl"]
return None
@one_note_client_url.setter
def one_note_client_url(self, val):
self._prop_dict["oneNoteClientUrl"] = val
@property
def one_note_web_url(self):
"""
Gets and sets the oneNoteWebUrl
Returns:
:class:`ExternalLink<onedrivesdk.model.external_link.ExternalLink>`:
The oneNoteWebUrl
"""
if "oneNoteWebUrl" in self._prop_dict:
if isinstance(self._prop_dict["oneNoteWebUrl"], OneDriveObjectBase):
return self._prop_dict["oneNoteWebUrl"]
else :
self._prop_dict["oneNoteWebUrl"] = ExternalLink(self._prop_dict["oneNoteWebUrl"])
return self._prop_dict["oneNoteWebUrl"]
return None
@one_note_web_url.setter
def one_note_web_url(self, val):
self._prop_dict["oneNoteWebUrl"] = val
|
the-stack_0_16114 | import argparse
import logging
import os
import random
import numpy as np
import torch
import torch.backends.cudnn as cudnn
from networks.vit_seg_modeling import VisionTransformer as ViT_seg
from networks.vit_seg_modeling import CONFIGS as CONFIGS_ViT_seg
from trainer import trainer_synapse
from utils import Params
params=Params("./params.json")
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str,
default='../data/Synapse/train_npz', help='root dir for data')
parser.add_argument('--dataset', type=str,
default='Synapse', help='experiment_name')
parser.add_argument('--list_dir', type=str,
default='./lists/lists_Synapse', help='list dir')
parser.add_argument('--num_classes', type=int,
default=14, help='output channel of network')
parser.add_argument('--max_iterations', type=int,
default=30000, help='maximum epoch number to train')
parser.add_argument('--max_epochs', type=int,
default=150, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int,
default=2, help='batch_size per gpu')
parser.add_argument('--n_gpu', type=int, default=1, help='total gpu')
parser.add_argument('--deterministic', type=int, default=1,
help='whether use deterministic training')
parser.add_argument('--base_lr', type=float, default=0.01,
help='segmentation network learning rate')
parser.add_argument('--img_size', type=int,
default=224, help='input patch size of network input')
parser.add_argument('--seed', type=int,
default=1234, help='random seed')
parser.add_argument('--n_skip', type=int,
default=3, help='using number of skip-connect, default is num')
parser.add_argument('--vit_name', type=str,
default='R50-ViT-B_16', help='select one vit model')
parser.add_argument('--vit_patches_size', type=int,
default=16, help='vit_patches_size, default is 16')
args = parser.parse_args()
if __name__ == "__main__":
if not args.deterministic:
cudnn.benchmark = True
cudnn.deterministic = False
else:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
#torch.cuda.manual_seed(args.seed)
dataset_name = args.dataset
dataset_config = {
'Synapse': {
'root_path': '../data/Synapse/train_npz',
'list_dir': './lists/lists_Synapse',
'num_classes': params.num_classes,
},
}
args.num_classes = dataset_config[dataset_name]['num_classes']
args.root_path = dataset_config[dataset_name]['root_path']
args.list_dir = dataset_config[dataset_name]['list_dir']
args.is_pretrain = True
args.exp = 'TU_' + dataset_name + str(args.img_size)
snapshot_path = "../model/{}/{}".format(args.exp, 'TU')
snapshot_path = snapshot_path + '_pretrain' if args.is_pretrain else snapshot_path
snapshot_path += '_' + args.vit_name
snapshot_path = snapshot_path + '_skip' + str(args.n_skip)
snapshot_path = snapshot_path + '_vitpatch' + str(args.vit_patches_size) if args.vit_patches_size!=16 else snapshot_path
snapshot_path = snapshot_path+'_'+str(args.max_iterations)[0:2]+'k' if args.max_iterations != 30000 else snapshot_path
snapshot_path = snapshot_path + '_epo' +str(args.max_epochs) if args.max_epochs != 30 else snapshot_path
snapshot_path = snapshot_path+'_bs'+str(args.batch_size)
snapshot_path = snapshot_path + '_lr' + str(args.base_lr) if args.base_lr != 0.01 else snapshot_path
snapshot_path = snapshot_path + '_'+str(args.img_size)
snapshot_path = snapshot_path + '_s'+str(args.seed) if args.seed!=1234 else snapshot_path
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
config_vit = CONFIGS_ViT_seg[args.vit_name]
config_vit.n_classes = args.num_classes
config_vit.n_skip = args.n_skip
if args.vit_name.find('R50') != -1:
config_vit.patches.grid = (int(args.img_size / args.vit_patches_size), int(args.img_size / args.vit_patches_size))
net = ViT_seg(config_vit, img_size=args.img_size, num_classes=config_vit.n_classes)#.cuda()
if torch.cuda.is_available():
net=net.cuda()
net.load_from(weights=np.load(config_vit.pretrained_path))
trainer = {'Synapse': trainer_synapse,}
trainer[dataset_name](args, net, snapshot_path) |
the-stack_0_16115 |
from numpy import array, exp, linspace, sqrt, pi
import matplotlib.pyplot as plt
# Suppose we have the following dataset, which we believe is described by a
# Gaussian peak plus a constant background. Our goal in this example is to
# infer the area of the Gaussian.
x_data = [0.00, 0.80, 1.60, 2.40, 3.20, 4.00, 4.80, 5.60,
6.40, 7.20, 8.00, 8.80, 9.60, 10.4, 11.2, 12.0]
y_data = [2.473, 1.329, 2.370, 1.135, 5.861, 7.045, 9.942, 7.335,
3.329, 5.348, 1.462, 2.476, 3.096, 0.784, 3.342, 1.877]
y_error = [1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1.]
plt.errorbar(x_data, y_data, yerr=y_error, ls='dashed', marker='D', c='red', markerfacecolor='none')
plt.ylabel('y')
plt.xlabel('x')
plt.grid()
plt.show()
# The first step is to implement our model. For simple models like this one
# this can be done using just a function, but as models become more complex
# it is becomes useful to build them as classes.
class PeakModel(object):
def __init__(self, x_data):
"""
The __init__ should be used to pass in any data which is required
by the model to produce predictions of the y-data values.
"""
self.x = x_data
def __call__(self, theta):
return self.forward_model(self.x, theta)
@staticmethod
def forward_model(x, theta):
"""
The forward model must make a prediction of the experimental data we would expect to measure
given a specific set model parameters 'theta'.
"""
# unpack the model parameters
area, width, center, background = theta
# return the prediction of the data
z = (x - center) / width
gaussian = exp(-0.5*z**2)/(sqrt(2*pi)*width)
return area*gaussian + background
# Inference-tools has a variety of Likelihood classes which allow you to easily construct a
# likelihood function given the measured data and your forward-model.
from inference.likelihoods import GaussianLikelihood
likelihood = GaussianLikelihood(y_data=y_data, sigma=y_error, forward_model=PeakModel(x_data))
# Instances of the likelihood classes can be called as functions, and return the log-likelihood
# when passed a vector of model parameters:
initial_guess = array([10., 2., 5., 2.])
guess_log_likelihood = likelihood(initial_guess)
print(guess_log_likelihood)
# We could at this stage pair the likelihood object with an optimiser in order to obtain
# the maximum-likelihood estimate of the parameters. In this example however, we want to
# construct the posterior distribution for the model parameters, and that means we need
# a prior.
# The inference.priors module contains classes which allow for easy construction of
# prior distributions across all model parameters.
from inference.priors import ExponentialPrior, UniformPrior, JointPrior
# If we want different model parameters to have different prior distributions, as in this
# case where we give three variables an exponential prior and one a uniform prior, we first
# construct each type of prior separately:
prior_components = [
ExponentialPrior(beta=[50., 20., 20.], variable_indices=[0, 1, 3]),
UniformPrior(lower=0., upper=12., variable_indices=[2])
]
# Now we use the JointPrior class to combine the various components into a single prior
# distribution which covers all the model parameters.
prior = JointPrior(components=prior_components, n_variables=4)
# As with the likelihood, prior objects can also be called as function to return a
# log-probability value when passed a vector of model parameters. We can also draw
# samples from the prior directly using the sample() method:
prior_sample = prior.sample()
print(prior_sample)
# The likelihood and prior can be easily combined into a posterior distribution
# using the Posterior class:
from inference.posterior import Posterior
posterior = Posterior(likelihood=likelihood, prior=prior)
# Now we have constructed a posterior distribution, we can sample from it
# using Markov-chain Monte-Carlo (MCMC).
# The inference.mcmc module contains implementations of various MCMC sampling algorithms.
# Here we import the PcaChain class and use it to create a Markov-chain object:
from inference.mcmc import PcaChain
chain = PcaChain(posterior=posterior, start=initial_guess)
# We generate samples by advancing the chain by a chosen number of steps using the advance method:
chain.advance(25000)
# we can check the status of the chain using the plot_diagnostics method:
chain.plot_diagnostics()
# The burn-in (how many samples from the start of the chain are discarded)
# can be chosen by setting the burn attribute of the chain object:
chain.burn = 5000
# we can get a quick overview of the posterior using the matrix_plot method
# of chain objects, which plots all possible 1D & 2D marginal distributions
# of the full parameter set (or a chosen sub-set).
chain.matrix_plot(labels=['area', 'width', 'center', 'background'])
# We can easily estimate 1D marginal distributions for any parameter
# using the get_marginal method:
area_pdf = chain.get_marginal(0)
area_pdf.plot_summary(label='Gaussian area')
# We can assess the level of uncertainty in the model predictions by passing each sample
# through the forward-model and observing the distribution of model expressions that result:
# generate an axis on which to evaluate the model
x_fits = linspace(0, 12, 500)
# get the sample
sample = chain.get_sample()
# pass each through the forward model
curves = array([PeakModel.forward_model(x_fits, theta) for theta in sample])
# We could plot the predictions for each sample all on a single graph, but this is
# often cluttered and difficult to interpret.
# A better option is to use the hdi_plot function from the plotting module to plot
# highest-density intervals for each point where the model is evaluated:
from inference.plotting import hdi_plot
plt.figure(figsize=(8, 5))
hdi_plot(x_fits, curves, intervals=[0.68, 0.95])
# plot the MAP estimate (the sample with the single highest posterior probability)
plt.plot(x_fits, PeakModel.forward_model(x_fits, chain.mode()), ls='dashed', lw=3, c='C0', label='MAP estimate')
# build the rest of the plot
plt.errorbar(x_data, y_data, yerr=y_error, linestyle='none', c='red', label='data',
marker='D', markerfacecolor='none', markeredgewidth=1.5, markersize=6)
plt.xlabel('x')
plt.ylabel('y')
plt.legend()
plt.grid()
plt.tight_layout()
plt.show()
|
the-stack_0_16116 | import dash_html_components as html
import dash_core_components as dcc
import dash_bootstrap_components as dbc
from datetime import datetime
import dateutil.relativedelta
from plotly.subplots import make_subplots
class marketcapViewClass:
def getMarketcapContent(self, data, bgImage):
content = [dbc.Modal([dbc.ModalHeader("Info DefiChain Market Cap"),
dbc.ModalBody(self.getMarketcapExplanation()),
dbc.ModalFooter(dbc.Button("close", id="closeInfoMarketcap", className="ml-auto"))],
id="modalMarketcap", size='xl'),
html.Div(id='hidden', style = {'display':'none'}),
dbc.Card(dbc.CardBody([html.H4(['DefiChain Market Cap']),
html.Table([html.Tr([html.Td('Select currency for Market Cap representation:'),
html.Td(dcc.Dropdown(id='marketCapCurrencySelection', options=[{'label': 'USD', 'value': 'USD'},
{'label': 'BTC', 'value': 'BTC'}],
value='USD', clearable=False, style=dict(width='150px', verticalAlign="bottom")))])]),
dbc.Col(dcc.Graph(config={'displayModeBar': False}, id='figureMarketcap')),
dbc.Row(dbc.Col(dbc.Button("Info/Explanation", id="openInfoMarketcap")))
]))]
return content
@staticmethod
def createMarketCapFig(data, selection, bgImage):
figMarketcap = make_subplots(
rows=1, cols=1,
vertical_spacing=0.15,
row_width=[1], # from bottom to top
specs=[[{}]],
shared_xaxes=True,
subplot_titles=([]))
if selection == 'BTC':
columnName = 'marketCapBTC'
yAxisLabel = 'Market Cap in BTC'
hoverTemplateRepresenation = '%{y:,.2f}BTC'
else:
columnName = 'marketCapUSD'
yAxisLabel = 'Market Cap in $'
hoverTemplateRepresenation = '$%{y:,.0f}'
lastValidDate = datetime.strptime(data[columnName].dropna().index.values[-1], '%Y-%m-%d')
date2MonthsBack = lastValidDate - dateutil.relativedelta.relativedelta(months=2)
trace_marketcap = dict(type='scatter', name='Market Cap',
x=data[columnName].dropna().index.values, y=data[columnName].dropna().values,
mode='lines', line=dict(color='#ff00af'), line_width=2, hovertemplate=hoverTemplateRepresenation)
figMarketcap.add_trace(trace_marketcap, 1, 1)
figMarketcap.update_yaxes(title_text=yAxisLabel, tickformat=",.0f", gridcolor='#6c757d', color='#6c757d', zerolinecolor='#6c757d', row=1,
col=1) # ,range=[-50, 200]
figMarketcap.update_xaxes(title_text="Date", gridcolor='#6c757d', color='#6c757d', zerolinecolor='#6c757d',
range=[date2MonthsBack.strftime('%Y-%m-%d'), lastValidDate], row=1, col=1)
# Add range slider
figMarketcap.update_layout(xaxis=dict(
rangeselector=dict(
buttons=list([dict(count=30, label="30d", step="day", stepmode="backward"),
dict(count=2, label="2m", step="month", stepmode="backward"),
dict(count=6, label="6m", step="month", stepmode="backward"),
dict(count=1, label="YTD", step="year", stepmode="todate"),
dict(count=1, label="1y", step="year", stepmode="backward"),
dict(step="all")])),
rangeslider=dict(visible=False),
type="date"))
# add background picture
figMarketcap.add_layout_image(dict(source=bgImage, xref="paper", yref="paper", x=0.5, y=0.5, sizex=0.6, sizey=0.6, xanchor="center", yanchor="middle", opacity=0.2))
figMarketcap.update_layout(margin={"t": 60, "l": 0, "b": 0, 'r': 0},
hovermode='x unified',
hoverlabel=dict(font_color="#6c757d",
bgcolor='#ffffff', ),
legend=dict(orientation="h",
yanchor="top",
y=-0.12,
xanchor="right",
x=1),
)
figMarketcap.layout.plot_bgcolor = '#ffffff' # background plotting area
figMarketcap.layout.paper_bgcolor = 'rgba(0,0,0,0)' # background around plotting area
figMarketcap.layout.legend.font.color = '#6c757d' # font color legend
return figMarketcap
@staticmethod
def getMarketcapExplanation():
mcCardExplanation = [html.P(['The market cap of a cryptocurrency coin is the product of the circulating coin amount and the coin price. It is used to compare coins against each other, '
'because the price alone has no meaning. Beside the common used USD representation, you can also choose a BTC representation. Due to the strong correlation '
'of DFI and BTC, this could give more insights in the development of DefiChain.' ],style={'text-align': 'justify'}),
html.P([html.B('Hint:'),' The presented diagrams are interactive. You can zoom in (select range with mouse) and rescale (double-click in diagram) as you like.'
' For specific questions it could be helpful to only show a selection of the available data. To exclude entries from the graph click on the corresponding legend entry.'],
style={'text-align': 'justify', 'fontSize':'0.7rem','color':'#6c757d'})
]
return mcCardExplanation |
the-stack_0_16117 | """ define the IntervalIndex """
from operator import le, lt
import textwrap
from typing import Any, Optional, Tuple, Union
import numpy as np
from pandas._config import get_option
from pandas._libs import lib
from pandas._libs.interval import Interval, IntervalMixin, IntervalTree
from pandas._libs.tslibs import Timedelta, Timestamp, to_offset
from pandas._typing import AnyArrayLike, Label
from pandas.util._decorators import Appender, Substitution, cache_readonly
from pandas.util._exceptions import rewrite_exception
from pandas.core.dtypes.cast import (
find_common_type,
infer_dtype_from_scalar,
maybe_downcast_to_dtype,
)
from pandas.core.dtypes.common import (
ensure_platform_int,
is_categorical_dtype,
is_datetime64tz_dtype,
is_datetime_or_timedelta_dtype,
is_dtype_equal,
is_float,
is_float_dtype,
is_integer,
is_integer_dtype,
is_interval_dtype,
is_list_like,
is_number,
is_object_dtype,
is_scalar,
)
from pandas.core.dtypes.missing import isna
from pandas.core.algorithms import take_1d
from pandas.core.arrays.interval import IntervalArray, _interval_shared_docs
import pandas.core.common as com
from pandas.core.indexers import is_valid_positional_slice
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import (
Index,
InvalidIndexError,
_index_shared_docs,
default_pprint,
ensure_index,
maybe_extract_name,
)
from pandas.core.indexes.datetimes import DatetimeIndex, date_range
from pandas.core.indexes.extension import ExtensionIndex, inherit_names
from pandas.core.indexes.multi import MultiIndex
from pandas.core.indexes.timedeltas import TimedeltaIndex, timedelta_range
from pandas.core.ops import get_op_result_name
from pandas.tseries.offsets import DateOffset
_VALID_CLOSED = {"left", "right", "both", "neither"}
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(
dict(
klass="IntervalIndex",
qualname="IntervalIndex",
target_klass="IntervalIndex or list of Intervals",
name=textwrap.dedent(
"""\
name : object, optional
Name to be stored in the index.
"""
),
)
)
def _get_next_label(label):
dtype = getattr(label, "dtype", type(label))
if isinstance(label, (Timestamp, Timedelta)):
dtype = "datetime64"
if is_datetime_or_timedelta_dtype(dtype) or is_datetime64tz_dtype(dtype):
return label + np.timedelta64(1, "ns")
elif is_integer_dtype(dtype):
return label + 1
elif is_float_dtype(dtype):
return np.nextafter(label, np.infty)
else:
raise TypeError(f"cannot determine next label for type {repr(type(label))}")
def _get_prev_label(label):
dtype = getattr(label, "dtype", type(label))
if isinstance(label, (Timestamp, Timedelta)):
dtype = "datetime64"
if is_datetime_or_timedelta_dtype(dtype) or is_datetime64tz_dtype(dtype):
return label - np.timedelta64(1, "ns")
elif is_integer_dtype(dtype):
return label - 1
elif is_float_dtype(dtype):
return np.nextafter(label, -np.infty)
else:
raise TypeError(f"cannot determine next label for type {repr(type(label))}")
def _new_IntervalIndex(cls, d):
"""
This is called upon unpickling, rather than the default which doesn't have
arguments and breaks __new__.
"""
return cls.from_arrays(**d)
class SetopCheck:
"""
This is called to decorate the set operations of IntervalIndex
to perform the type check in advance.
"""
def __init__(self, op_name):
self.op_name = op_name
def __call__(self, setop):
def func(intvidx_self, other, sort=False):
intvidx_self._assert_can_do_setop(other)
other = ensure_index(other)
if not isinstance(other, IntervalIndex):
result = getattr(intvidx_self.astype(object), self.op_name)(other)
if self.op_name in ("difference",):
result = result.astype(intvidx_self.dtype)
return result
elif intvidx_self.closed != other.closed:
raise ValueError(
"can only do set operations between two IntervalIndex "
"objects that are closed on the same side"
)
# GH 19016: ensure set op will not return a prohibited dtype
subtypes = [intvidx_self.dtype.subtype, other.dtype.subtype]
common_subtype = find_common_type(subtypes)
if is_object_dtype(common_subtype):
raise TypeError(
f"can only do {self.op_name} between two IntervalIndex "
"objects that have compatible dtypes"
)
return setop(intvidx_self, other, sort)
return func
@Appender(
_interval_shared_docs["class"]
% dict(
klass="IntervalIndex",
summary="Immutable index of intervals that are closed on the same side.",
name=_index_doc_kwargs["name"],
versionadded="0.20.0",
extra_attributes="is_overlapping\nvalues\n",
extra_methods="",
examples=textwrap.dedent(
"""\
Examples
--------
A new ``IntervalIndex`` is typically constructed using
:func:`interval_range`:
>>> pd.interval_range(start=0, end=5)
IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]],
closed='right',
dtype='interval[int64]')
It may also be constructed using one of the constructor
methods: :meth:`IntervalIndex.from_arrays`,
:meth:`IntervalIndex.from_breaks`, and :meth:`IntervalIndex.from_tuples`.
See further examples in the doc strings of ``interval_range`` and the
mentioned constructor methods.
"""
),
)
)
@inherit_names(["set_closed", "to_tuples"], IntervalArray, wrap=True)
@inherit_names(
["__array__", "overlaps", "contains", "left", "right", "length"], IntervalArray,
)
@inherit_names(
["is_non_overlapping_monotonic", "mid", "closed"], IntervalArray, cache=True,
)
class IntervalIndex(IntervalMixin, ExtensionIndex):
_typ = "intervalindex"
_comparables = ["name"]
_attributes = ["name"]
# we would like our indexing holder to defer to us
_defer_to_indexing = True
# Immutable, so we are able to cache computations like isna in '_mask'
_mask = None
_data: IntervalArray
# --------------------------------------------------------------------
# Constructors
def __new__(
cls,
data,
closed=None,
dtype=None,
copy: bool = False,
name=None,
verify_integrity: bool = True,
):
name = maybe_extract_name(name, data, cls)
with rewrite_exception("IntervalArray", cls.__name__):
array = IntervalArray(
data,
closed=closed,
copy=copy,
dtype=dtype,
verify_integrity=verify_integrity,
)
return cls._simple_new(array, name)
@classmethod
def _simple_new(cls, array: IntervalArray, name: Label = None):
"""
Construct from an IntervalArray
Parameters
----------
array : IntervalArray
name : Label, default None
Attached as result.name
"""
assert isinstance(array, IntervalArray), type(array)
result = IntervalMixin.__new__(cls)
result._data = array
result.name = name
result._cache = {}
result._no_setting_name = False
result._reset_identity()
return result
@classmethod
@Appender(
_interval_shared_docs["from_breaks"]
% dict(
klass="IntervalIndex",
examples=textwrap.dedent(
"""\
Examples
--------
>>> pd.IntervalIndex.from_breaks([0, 1, 2, 3])
IntervalIndex([(0, 1], (1, 2], (2, 3]],
closed='right',
dtype='interval[int64]')
"""
),
)
)
def from_breaks(
cls, breaks, closed: str = "right", name=None, copy: bool = False, dtype=None
):
with rewrite_exception("IntervalArray", cls.__name__):
array = IntervalArray.from_breaks(
breaks, closed=closed, copy=copy, dtype=dtype
)
return cls._simple_new(array, name=name)
@classmethod
@Appender(
_interval_shared_docs["from_arrays"]
% dict(
klass="IntervalIndex",
examples=textwrap.dedent(
"""\
Examples
--------
>>> pd.IntervalIndex.from_arrays([0, 1, 2], [1, 2, 3])
IntervalIndex([(0, 1], (1, 2], (2, 3]],
closed='right',
dtype='interval[int64]')
"""
),
)
)
def from_arrays(
cls,
left,
right,
closed: str = "right",
name=None,
copy: bool = False,
dtype=None,
):
with rewrite_exception("IntervalArray", cls.__name__):
array = IntervalArray.from_arrays(
left, right, closed, copy=copy, dtype=dtype
)
return cls._simple_new(array, name=name)
@classmethod
@Appender(
_interval_shared_docs["from_tuples"]
% dict(
klass="IntervalIndex",
examples=textwrap.dedent(
"""\
Examples
--------
>>> pd.IntervalIndex.from_tuples([(0, 1), (1, 2)])
IntervalIndex([(0, 1], (1, 2]],
closed='right',
dtype='interval[int64]')
"""
),
)
)
def from_tuples(
cls, data, closed: str = "right", name=None, copy: bool = False, dtype=None
):
with rewrite_exception("IntervalArray", cls.__name__):
arr = IntervalArray.from_tuples(data, closed=closed, copy=copy, dtype=dtype)
return cls._simple_new(arr, name=name)
# --------------------------------------------------------------------
@Appender(Index._shallow_copy.__doc__)
def _shallow_copy(self, values=None, name: Label = lib.no_default):
name = self.name if name is lib.no_default else name
cache = self._cache.copy() if values is None else {}
if values is None:
values = self._data
result = self._simple_new(values, name=name)
result._cache = cache
return result
@cache_readonly
def _isnan(self):
"""
Return a mask indicating if each value is NA.
"""
if self._mask is None:
self._mask = isna(self.left)
return self._mask
@cache_readonly
def _engine(self):
left = self._maybe_convert_i8(self.left)
right = self._maybe_convert_i8(self.right)
return IntervalTree(left, right, closed=self.closed)
def __contains__(self, key: Any) -> bool:
"""
return a boolean if this key is IN the index
We *only* accept an Interval
Parameters
----------
key : Interval
Returns
-------
bool
"""
hash(key)
if not isinstance(key, Interval):
return False
try:
self.get_loc(key)
return True
except KeyError:
return False
@cache_readonly
def _multiindex(self) -> MultiIndex:
return MultiIndex.from_arrays([self.left, self.right], names=["left", "right"])
@cache_readonly
def values(self) -> IntervalArray:
"""
Return the IntervalIndex's data as an IntervalArray.
"""
return self._data
@property
def _has_complex_internals(self) -> bool:
# used to avoid libreduction code paths, which raise or require conversion
return True
def __array_wrap__(self, result, context=None):
# we don't want the superclass implementation
return result
def __reduce__(self):
d = dict(left=self.left, right=self.right)
d.update(self._get_attributes_dict())
return _new_IntervalIndex, (type(self), d), None
@Appender(Index.astype.__doc__)
def astype(self, dtype, copy=True):
with rewrite_exception("IntervalArray", type(self).__name__):
new_values = self._values.astype(dtype, copy=copy)
if is_interval_dtype(new_values.dtype):
return self._shallow_copy(new_values)
return Index.astype(self, dtype, copy=copy)
@property
def inferred_type(self) -> str:
"""Return a string of the type inferred from the values"""
return "interval"
@Appender(Index.memory_usage.__doc__)
def memory_usage(self, deep: bool = False) -> int:
# we don't use an explicit engine
# so return the bytes here
return self.left.memory_usage(deep=deep) + self.right.memory_usage(deep=deep)
# IntervalTree doesn't have a is_monotonic_decreasing, so have to override
# the Index implementation
@cache_readonly
def is_monotonic_decreasing(self) -> bool:
"""
Return True if the IntervalIndex is monotonic decreasing (only equal or
decreasing values), else False
"""
return self[::-1].is_monotonic_increasing
@cache_readonly
def is_unique(self):
"""
Return True if the IntervalIndex contains unique elements, else False.
"""
left = self.left
right = self.right
if self.isna().sum() > 1:
return False
if left.is_unique or right.is_unique:
return True
seen_pairs = set()
check_idx = np.where(left.duplicated(keep=False))[0]
for idx in check_idx:
pair = (left[idx], right[idx])
if pair in seen_pairs:
return False
seen_pairs.add(pair)
return True
@property
def is_overlapping(self) -> bool:
"""
Return True if the IntervalIndex has overlapping intervals, else False.
Two intervals overlap if they share a common point, including closed
endpoints. Intervals that only have an open endpoint in common do not
overlap.
.. versionadded:: 0.24.0
Returns
-------
bool
Boolean indicating if the IntervalIndex has overlapping intervals.
See Also
--------
Interval.overlaps : Check whether two Interval objects overlap.
IntervalIndex.overlaps : Check an IntervalIndex elementwise for
overlaps.
Examples
--------
>>> index = pd.IntervalIndex.from_tuples([(0, 2), (1, 3), (4, 5)])
>>> index
IntervalIndex([(0, 2], (1, 3], (4, 5]],
closed='right',
dtype='interval[int64]')
>>> index.is_overlapping
True
Intervals that share closed endpoints overlap:
>>> index = pd.interval_range(0, 3, closed='both')
>>> index
IntervalIndex([[0, 1], [1, 2], [2, 3]],
closed='both',
dtype='interval[int64]')
>>> index.is_overlapping
True
Intervals that only have an open endpoint in common do not overlap:
>>> index = pd.interval_range(0, 3, closed='left')
>>> index
IntervalIndex([[0, 1), [1, 2), [2, 3)],
closed='left',
dtype='interval[int64]')
>>> index.is_overlapping
False
"""
# GH 23309
return self._engine.is_overlapping
def _should_fallback_to_positional(self) -> bool:
# integer lookups in Series.__getitem__ are unambiguously
# positional in this case
return self.dtype.subtype.kind in ["m", "M"]
def _maybe_cast_slice_bound(self, label, side, kind):
return getattr(self, side)._maybe_cast_slice_bound(label, side, kind)
@Appender(Index._convert_list_indexer.__doc__)
def _convert_list_indexer(self, keyarr):
"""
we are passed a list-like indexer. Return the
indexer for matching intervals.
"""
locs = self.get_indexer_for(keyarr)
# we have missing values
if (locs == -1).any():
raise KeyError
return locs
def _can_reindex(self, indexer: np.ndarray) -> None:
"""
Check if we are allowing reindexing with this particular indexer.
Parameters
----------
indexer : an integer indexer
Raises
------
ValueError if its a duplicate axis
"""
# trying to reindex on an axis with duplicates
if self.is_overlapping and len(indexer):
raise ValueError("cannot reindex from an overlapping axis")
def _needs_i8_conversion(self, key) -> bool:
"""
Check if a given key needs i8 conversion. Conversion is necessary for
Timestamp, Timedelta, DatetimeIndex, and TimedeltaIndex keys. An
Interval-like requires conversion if it's endpoints are one of the
aforementioned types.
Assumes that any list-like data has already been cast to an Index.
Parameters
----------
key : scalar or Index-like
The key that should be checked for i8 conversion
Returns
-------
bool
"""
if is_interval_dtype(key) or isinstance(key, Interval):
return self._needs_i8_conversion(key.left)
i8_types = (Timestamp, Timedelta, DatetimeIndex, TimedeltaIndex)
return isinstance(key, i8_types)
def _maybe_convert_i8(self, key):
"""
Maybe convert a given key to it's equivalent i8 value(s). Used as a
preprocessing step prior to IntervalTree queries (self._engine), which
expects numeric data.
Parameters
----------
key : scalar or list-like
The key that should maybe be converted to i8.
Returns
-------
scalar or list-like
The original key if no conversion occurred, int if converted scalar,
Int64Index if converted list-like.
"""
original = key
if is_list_like(key):
key = ensure_index(key)
if not self._needs_i8_conversion(key):
return original
scalar = is_scalar(key)
if is_interval_dtype(key) or isinstance(key, Interval):
# convert left/right and reconstruct
left = self._maybe_convert_i8(key.left)
right = self._maybe_convert_i8(key.right)
constructor = Interval if scalar else IntervalIndex.from_arrays
return constructor(left, right, closed=self.closed)
if scalar:
# Timestamp/Timedelta
key_dtype, key_i8 = infer_dtype_from_scalar(key, pandas_dtype=True)
else:
# DatetimeIndex/TimedeltaIndex
key_dtype, key_i8 = key.dtype, Index(key.asi8)
if key.hasnans:
# convert NaT from it's i8 value to np.nan so it's not viewed
# as a valid value, maybe causing errors (e.g. is_overlapping)
key_i8 = key_i8.where(~key._isnan)
# ensure consistency with IntervalIndex subtype
subtype = self.dtype.subtype
if not is_dtype_equal(subtype, key_dtype):
raise ValueError(
f"Cannot index an IntervalIndex of subtype {subtype} with "
f"values of dtype {key_dtype}"
)
return key_i8
def _check_method(self, method):
if method is None:
return
if method in ["bfill", "backfill", "pad", "ffill", "nearest"]:
raise NotImplementedError(
f"method {method} not yet implemented for IntervalIndex"
)
raise ValueError("Invalid fill method")
def _searchsorted_monotonic(self, label, side, exclude_label=False):
if not self.is_non_overlapping_monotonic:
raise KeyError(
"can only get slices from an IntervalIndex if bounds are "
"non-overlapping and all monotonic increasing or decreasing"
)
if isinstance(label, IntervalMixin):
raise NotImplementedError("Interval objects are not currently supported")
# GH 20921: "not is_monotonic_increasing" for the second condition
# instead of "is_monotonic_decreasing" to account for single element
# indexes being both increasing and decreasing
if (side == "left" and self.left.is_monotonic_increasing) or (
side == "right" and not self.left.is_monotonic_increasing
):
sub_idx = self.right
if self.open_right or exclude_label:
label = _get_next_label(label)
else:
sub_idx = self.left
if self.open_left or exclude_label:
label = _get_prev_label(label)
return sub_idx._searchsorted_monotonic(label, side)
def get_loc(
self, key, method: Optional[str] = None, tolerance=None
) -> Union[int, slice, np.ndarray]:
"""
Get integer location, slice or boolean mask for requested label.
Parameters
----------
key : label
method : {None}, optional
* default: matches where the label is within an interval only.
Returns
-------
int if unique index, slice if monotonic index, else mask
Examples
--------
>>> i1, i2 = pd.Interval(0, 1), pd.Interval(1, 2)
>>> index = pd.IntervalIndex([i1, i2])
>>> index.get_loc(1)
0
You can also supply a point inside an interval.
>>> index.get_loc(1.5)
1
If a label is in several intervals, you get the locations of all the
relevant intervals.
>>> i3 = pd.Interval(0, 2)
>>> overlapping_index = pd.IntervalIndex([i1, i2, i3])
>>> overlapping_index.get_loc(0.5)
array([ True, False, True])
Only exact matches will be returned if an interval is provided.
>>> index.get_loc(pd.Interval(0, 1))
0
"""
self._check_method(method)
if not is_scalar(key):
raise InvalidIndexError(key)
if isinstance(key, Interval):
if self.closed != key.closed:
raise KeyError(key)
mask = (self.left == key.left) & (self.right == key.right)
else:
# assume scalar
op_left = le if self.closed_left else lt
op_right = le if self.closed_right else lt
try:
mask = op_left(self.left, key) & op_right(key, self.right)
except TypeError as err:
# scalar is not comparable to II subtype --> invalid label
raise KeyError(key) from err
matches = mask.sum()
if matches == 0:
raise KeyError(key)
elif matches == 1:
return mask.argmax()
return lib.maybe_booleans_to_slice(mask.view("u1"))
@Substitution(
**dict(
_index_doc_kwargs,
**{
"raises_section": textwrap.dedent(
"""
Raises
------
NotImplementedError
If any method argument other than the default of
None is specified as these are not yet implemented.
"""
)
},
)
)
@Appender(_index_shared_docs["get_indexer"])
def get_indexer(
self,
target: AnyArrayLike,
method: Optional[str] = None,
limit: Optional[int] = None,
tolerance: Optional[Any] = None,
) -> np.ndarray:
self._check_method(method)
if self.is_overlapping:
raise InvalidIndexError(
"cannot handle overlapping indices; "
"use IntervalIndex.get_indexer_non_unique"
)
target_as_index = ensure_index(target)
if isinstance(target_as_index, IntervalIndex):
# equal indexes -> 1:1 positional match
if self.equals(target_as_index):
return np.arange(len(self), dtype="intp")
# different closed or incompatible subtype -> no matches
common_subtype = find_common_type(
[self.dtype.subtype, target_as_index.dtype.subtype]
)
if self.closed != target_as_index.closed or is_object_dtype(common_subtype):
return np.repeat(np.intp(-1), len(target_as_index))
# non-overlapping -> at most one match per interval in target_as_index
# want exact matches -> need both left/right to match, so defer to
# left/right get_indexer, compare elementwise, equality -> match
left_indexer = self.left.get_indexer(target_as_index.left)
right_indexer = self.right.get_indexer(target_as_index.right)
indexer = np.where(left_indexer == right_indexer, left_indexer, -1)
elif is_categorical_dtype(target_as_index.dtype):
# get an indexer for unique categories then propagate to codes via take_1d
categories_indexer = self.get_indexer(target_as_index.categories)
indexer = take_1d(categories_indexer, target_as_index.codes, fill_value=-1)
elif not is_object_dtype(target_as_index):
# homogeneous scalar index: use IntervalTree
target_as_index = self._maybe_convert_i8(target_as_index)
indexer = self._engine.get_indexer(target_as_index.values)
else:
# heterogeneous scalar index: defer elementwise to get_loc
# (non-overlapping so get_loc guarantees scalar of KeyError)
indexer = []
for key in target_as_index:
try:
loc = self.get_loc(key)
except KeyError:
loc = -1
except InvalidIndexError as err:
# i.e. non-scalar key
raise TypeError(key) from err
indexer.append(loc)
return ensure_platform_int(indexer)
@Appender(_index_shared_docs["get_indexer_non_unique"] % _index_doc_kwargs)
def get_indexer_non_unique(
self, target: AnyArrayLike
) -> Tuple[np.ndarray, np.ndarray]:
target_as_index = ensure_index(target)
# check that target_as_index IntervalIndex is compatible
if isinstance(target_as_index, IntervalIndex):
common_subtype = find_common_type(
[self.dtype.subtype, target_as_index.dtype.subtype]
)
if self.closed != target_as_index.closed or is_object_dtype(common_subtype):
# different closed or incompatible subtype -> no matches
return (
np.repeat(-1, len(target_as_index)),
np.arange(len(target_as_index)),
)
if is_object_dtype(target_as_index) or isinstance(
target_as_index, IntervalIndex
):
# target_as_index might contain intervals: defer elementwise to get_loc
indexer, missing = [], []
for i, key in enumerate(target_as_index):
try:
locs = self.get_loc(key)
if isinstance(locs, slice):
locs = np.arange(locs.start, locs.stop, locs.step, dtype="intp")
locs = np.array(locs, ndmin=1)
except KeyError:
missing.append(i)
locs = np.array([-1])
indexer.append(locs)
indexer = np.concatenate(indexer)
else:
target_as_index = self._maybe_convert_i8(target_as_index)
indexer, missing = self._engine.get_indexer_non_unique(
target_as_index.values
)
return ensure_platform_int(indexer), ensure_platform_int(missing)
def get_indexer_for(self, target: AnyArrayLike, **kwargs) -> np.ndarray:
"""
Guaranteed return of an indexer even when overlapping.
This dispatches to get_indexer or get_indexer_non_unique
as appropriate.
Returns
-------
numpy.ndarray
List of indices.
"""
if self.is_overlapping:
return self.get_indexer_non_unique(target)[0]
return self.get_indexer(target, **kwargs)
def _convert_slice_indexer(self, key: slice, kind: str):
if not (key.step is None or key.step == 1):
# GH#31658 if label-based, we require step == 1,
# if positional, we disallow float start/stop
msg = "label-based slicing with step!=1 is not supported for IntervalIndex"
if kind == "loc":
raise ValueError(msg)
elif kind == "getitem":
if not is_valid_positional_slice(key):
# i.e. this cannot be interpreted as a positional slice
raise ValueError(msg)
return super()._convert_slice_indexer(key, kind)
@Appender(Index.where.__doc__)
def where(self, cond, other=None):
if other is None:
other = self._na_value
values = np.where(cond, self._values, other)
result = IntervalArray(values)
return self._shallow_copy(result)
def delete(self, loc):
"""
Return a new IntervalIndex with passed location(-s) deleted
Returns
-------
IntervalIndex
"""
new_left = self.left.delete(loc)
new_right = self.right.delete(loc)
result = self._data._shallow_copy(new_left, new_right)
return self._shallow_copy(result)
def insert(self, loc, item):
"""
Return a new IntervalIndex inserting new item at location. Follows
Python list.append semantics for negative values. Only Interval
objects and NA can be inserted into an IntervalIndex
Parameters
----------
loc : int
item : object
Returns
-------
IntervalIndex
"""
if isinstance(item, Interval):
if item.closed != self.closed:
raise ValueError(
"inserted item must be closed on the same side as the index"
)
left_insert = item.left
right_insert = item.right
elif is_scalar(item) and isna(item):
# GH 18295
left_insert = right_insert = item
else:
raise ValueError(
"can only insert Interval objects and NA into an IntervalIndex"
)
new_left = self.left.insert(loc, left_insert)
new_right = self.right.insert(loc, right_insert)
result = self._data._shallow_copy(new_left, new_right)
return self._shallow_copy(result)
@Appender(_index_shared_docs["take"] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs):
result = self._data.take(
indices, axis=axis, allow_fill=allow_fill, fill_value=fill_value, **kwargs
)
return self._shallow_copy(result)
# --------------------------------------------------------------------
# Rendering Methods
# __repr__ associated methods are based on MultiIndex
def _format_with_header(self, header, **kwargs):
return header + list(self._format_native_types(**kwargs))
def _format_native_types(self, na_rep="NaN", quoting=None, **kwargs):
# GH 28210: use base method but with different default na_rep
return super()._format_native_types(na_rep=na_rep, quoting=quoting, **kwargs)
def _format_data(self, name=None):
# TODO: integrate with categorical and make generic
# name argument is unused here; just for compat with base / categorical
n = len(self)
max_seq_items = min((get_option("display.max_seq_items") or n) // 10, 10)
formatter = str
if n == 0:
summary = "[]"
elif n == 1:
first = formatter(self[0])
summary = f"[{first}]"
elif n == 2:
first = formatter(self[0])
last = formatter(self[-1])
summary = f"[{first}, {last}]"
else:
if n > max_seq_items:
n = min(max_seq_items // 2, 10)
head = [formatter(x) for x in self[:n]]
tail = [formatter(x) for x in self[-n:]]
head_joined = ", ".join(head)
tail_joined = ", ".join(tail)
summary = f"[{head_joined} ... {tail_joined}]"
else:
tail = [formatter(x) for x in self]
joined = ", ".join(tail)
summary = f"[{joined}]"
return summary + "," + self._format_space()
def _format_attrs(self):
attrs = [("closed", repr(self.closed))]
if self.name is not None:
attrs.append(("name", default_pprint(self.name)))
attrs.append(("dtype", f"'{self.dtype}'"))
return attrs
def _format_space(self) -> str:
space = " " * (len(type(self).__name__) + 1)
return f"\n{space}"
# --------------------------------------------------------------------
def argsort(self, *args, **kwargs) -> np.ndarray:
return np.lexsort((self.right, self.left))
def equals(self, other) -> bool:
"""
Determines if two IntervalIndex objects contain the same elements.
"""
if self.is_(other):
return True
# if we can coerce to an II
# then we can compare
if not isinstance(other, IntervalIndex):
if not is_interval_dtype(other):
return False
other = Index(other)
return (
self.left.equals(other.left)
and self.right.equals(other.right)
and self.closed == other.closed
)
@Appender(Index.intersection.__doc__)
@SetopCheck(op_name="intersection")
def intersection(
self, other: "IntervalIndex", sort: bool = False
) -> "IntervalIndex":
if self.left.is_unique and self.right.is_unique:
taken = self._intersection_unique(other)
elif other.left.is_unique and other.right.is_unique and self.isna().sum() <= 1:
# Swap other/self if other is unique and self does not have
# multiple NaNs
taken = other._intersection_unique(self)
else:
# duplicates
taken = self._intersection_non_unique(other)
if sort is None:
taken = taken.sort_values()
return taken
def _intersection_unique(self, other: "IntervalIndex") -> "IntervalIndex":
"""
Used when the IntervalIndex does not have any common endpoint,
no mater left or right.
Return the intersection with another IntervalIndex.
Parameters
----------
other : IntervalIndex
Returns
-------
IntervalIndex
"""
lindexer = self.left.get_indexer(other.left)
rindexer = self.right.get_indexer(other.right)
match = (lindexer == rindexer) & (lindexer != -1)
indexer = lindexer.take(match.nonzero()[0])
return self.take(indexer)
def _intersection_non_unique(self, other: "IntervalIndex") -> "IntervalIndex":
"""
Used when the IntervalIndex does have some common endpoints,
on either sides.
Return the intersection with another IntervalIndex.
Parameters
----------
other : IntervalIndex
Returns
-------
IntervalIndex
"""
mask = np.zeros(len(self), dtype=bool)
if self.hasnans and other.hasnans:
first_nan_loc = np.arange(len(self))[self.isna()][0]
mask[first_nan_loc] = True
other_tups = set(zip(other.left, other.right))
for i, tup in enumerate(zip(self.left, self.right)):
if tup in other_tups:
mask[i] = True
return self[mask]
def _setop(op_name: str, sort=None):
@SetopCheck(op_name=op_name)
def func(self, other, sort=sort):
result = getattr(self._multiindex, op_name)(other._multiindex, sort=sort)
result_name = get_op_result_name(self, other)
# GH 19101: ensure empty results have correct dtype
if result.empty:
result = result._values.astype(self.dtype.subtype)
else:
result = result._values
return type(self).from_tuples(result, closed=self.closed, name=result_name)
return func
@property
def is_all_dates(self) -> bool:
"""
This is False even when left/right contain datetime-like objects,
as the check is done on the Interval itself
"""
return False
union = _setop("union")
difference = _setop("difference")
symmetric_difference = _setop("symmetric_difference")
# TODO: arithmetic operations
# GH#30817 until IntervalArray implements inequalities, get them from Index
def __lt__(self, other):
return Index.__lt__(self, other)
def __le__(self, other):
return Index.__le__(self, other)
def __gt__(self, other):
return Index.__gt__(self, other)
def __ge__(self, other):
return Index.__ge__(self, other)
IntervalIndex._add_logical_methods_disabled()
def _is_valid_endpoint(endpoint) -> bool:
"""
Helper for interval_range to check if start/end are valid types.
"""
return any(
[
is_number(endpoint),
isinstance(endpoint, Timestamp),
isinstance(endpoint, Timedelta),
endpoint is None,
]
)
def _is_type_compatible(a, b) -> bool:
"""
Helper for interval_range to check type compat of start/end/freq.
"""
is_ts_compat = lambda x: isinstance(x, (Timestamp, DateOffset))
is_td_compat = lambda x: isinstance(x, (Timedelta, DateOffset))
return (
(is_number(a) and is_number(b))
or (is_ts_compat(a) and is_ts_compat(b))
or (is_td_compat(a) and is_td_compat(b))
or com.any_none(a, b)
)
def interval_range(
start=None, end=None, periods=None, freq=None, name=None, closed="right"
):
"""
Return a fixed frequency IntervalIndex.
Parameters
----------
start : numeric or datetime-like, default None
Left bound for generating intervals.
end : numeric or datetime-like, default None
Right bound for generating intervals.
periods : int, default None
Number of periods to generate.
freq : numeric, str, or DateOffset, default None
The length of each interval. Must be consistent with the type of start
and end, e.g. 2 for numeric, or '5H' for datetime-like. Default is 1
for numeric and 'D' for datetime-like.
name : str, default None
Name of the resulting IntervalIndex.
closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both
or neither.
Returns
-------
IntervalIndex
See Also
--------
IntervalIndex : An Index of intervals that are all closed on the same side.
Notes
-----
Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,
exactly three must be specified. If ``freq`` is omitted, the resulting
``IntervalIndex`` will have ``periods`` linearly spaced elements between
``start`` and ``end``, inclusively.
To learn more about datetime-like frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
Examples
--------
Numeric ``start`` and ``end`` is supported.
>>> pd.interval_range(start=0, end=5)
IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]],
closed='right', dtype='interval[int64]')
Additionally, datetime-like input is also supported.
>>> pd.interval_range(start=pd.Timestamp('2017-01-01'),
... end=pd.Timestamp('2017-01-04'))
IntervalIndex([(2017-01-01, 2017-01-02], (2017-01-02, 2017-01-03],
(2017-01-03, 2017-01-04]],
closed='right', dtype='interval[datetime64[ns]]')
The ``freq`` parameter specifies the frequency between the left and right.
endpoints of the individual intervals within the ``IntervalIndex``. For
numeric ``start`` and ``end``, the frequency must also be numeric.
>>> pd.interval_range(start=0, periods=4, freq=1.5)
IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]],
closed='right', dtype='interval[float64]')
Similarly, for datetime-like ``start`` and ``end``, the frequency must be
convertible to a DateOffset.
>>> pd.interval_range(start=pd.Timestamp('2017-01-01'),
... periods=3, freq='MS')
IntervalIndex([(2017-01-01, 2017-02-01], (2017-02-01, 2017-03-01],
(2017-03-01, 2017-04-01]],
closed='right', dtype='interval[datetime64[ns]]')
Specify ``start``, ``end``, and ``periods``; the frequency is generated
automatically (linearly spaced).
>>> pd.interval_range(start=0, end=6, periods=4)
IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]],
closed='right',
dtype='interval[float64]')
The ``closed`` parameter specifies which endpoints of the individual
intervals within the ``IntervalIndex`` are closed.
>>> pd.interval_range(end=5, periods=4, closed='both')
IntervalIndex([[1, 2], [2, 3], [3, 4], [4, 5]],
closed='both', dtype='interval[int64]')
"""
start = com.maybe_box_datetimelike(start)
end = com.maybe_box_datetimelike(end)
endpoint = start if start is not None else end
if freq is None and com.any_none(periods, start, end):
freq = 1 if is_number(endpoint) else "D"
if com.count_not_none(start, end, periods, freq) != 3:
raise ValueError(
"Of the four parameters: start, end, periods, and "
"freq, exactly three must be specified"
)
if not _is_valid_endpoint(start):
raise ValueError(f"start must be numeric or datetime-like, got {start}")
elif not _is_valid_endpoint(end):
raise ValueError(f"end must be numeric or datetime-like, got {end}")
if is_float(periods):
periods = int(periods)
elif not is_integer(periods) and periods is not None:
raise TypeError(f"periods must be a number, got {periods}")
if freq is not None and not is_number(freq):
try:
freq = to_offset(freq)
except ValueError as err:
raise ValueError(
f"freq must be numeric or convertible to DateOffset, got {freq}"
) from err
# verify type compatibility
if not all(
[
_is_type_compatible(start, end),
_is_type_compatible(start, freq),
_is_type_compatible(end, freq),
]
):
raise TypeError("start, end, freq need to be type compatible")
# +1 to convert interval count to breaks count (n breaks = n-1 intervals)
if periods is not None:
periods += 1
if is_number(endpoint):
# force consistency between start/end/freq (lower end if freq skips it)
if com.all_not_none(start, end, freq):
end -= (end - start) % freq
# compute the period/start/end if unspecified (at most one)
if periods is None:
periods = int((end - start) // freq) + 1
elif start is None:
start = end - (periods - 1) * freq
elif end is None:
end = start + (periods - 1) * freq
breaks = np.linspace(start, end, periods)
if all(is_integer(x) for x in com.not_none(start, end, freq)):
# np.linspace always produces float output
breaks = maybe_downcast_to_dtype(breaks, "int64")
else:
# delegate to the appropriate range function
if isinstance(endpoint, Timestamp):
range_func = date_range
else:
range_func = timedelta_range
breaks = range_func(start=start, end=end, periods=periods, freq=freq)
return IntervalIndex.from_breaks(breaks, name=name, closed=closed)
|
the-stack_0_16119 | """add discovery
Revision ID: 5a05464c07ae
Revises: c4a0292785e6
Create Date: 2017-09-06 21:55:21.193584
"""
# revision identifiers, used by Alembic.
revision = "5a05464c07ae"
down_revision = "c4a0292785e6"
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column("elements", sa.Column("discoverers", sa.String))
op.add_column("elements", sa.Column("discovery_year", sa.Integer))
op.add_column("elements", sa.Column("discovery_location", sa.String))
def downgrade():
with op.batch_alter_table("elements") as batch_op:
batch_op.drop_column("discoverers")
batch_op.drop_column("discovery_year")
batch_op.drop_column("discovery_location")
|
the-stack_0_16120 | # PySpice example code
import matplotlib.pyplot as plt
import PySpice.Logging.Logging as Logging
from PySpice.Doc.ExampleTools import find_libraries
from PySpice.Probe.Plot import plot
from PySpice.Spice.Library import SpiceLibrary
from PySpice.Spice.Netlist import Circuit
from PySpice.Unit import *
logger = Logging.setup_logging()
libraries_path = find_libraries()
spice_library = SpiceLibrary(libraries_path)
circuit = Circuit('NMOS Transistor')
circuit.include(spice_library['ptm65nm_nmos'])
# Define the DC supply voltage value
Vdd = 1.1
# Instanciate circuit elements
Vgate = circuit.V('gate', 'gatenode', circuit.gnd, 0@u_V)
Vdrain = circuit.V('drain', 'vdd', circuit.gnd, u_V(Vdd))
# M <name> <drain node> <gate node> <source node> <bulk/substrate node>
circuit.MOSFET(1, 'vdd', 'gatenode', circuit.gnd, circuit.gnd, model='ptm65nm_nmos')
#r# We plot the characteristics :math:`Id = f(Vgs)` using a DC sweep simulation.
simulator = circuit.simulator(temperature=25, nominal_temperature=25)
analysis = simulator.dc(Vgate=slice(0, Vdd, .01))
figure, ax = plt.subplots(figsize=(20, 10))
ax.plot(analysis['gatenode'], u_mA(-analysis.Vdrain))
ax.legend('NMOS characteristic')
ax.grid()
ax.set_xlabel('Vgs [V]')
ax.set_ylabel('Id [mA]')
plt.tight_layout()
plt.show()
#f# save_figure('figure', 'transistor-nmos-plot.png')
|
the-stack_0_16123 | import hashlib
import itertools
import re
import sys
import warnings
from collections import defaultdict
from importlib import import_module
from types import ModuleType
from content_editor.models import Type
from django.conf import settings
from django.core.checks import Warning
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models import Q, signals
from django.urls import NoReverseMatch, include, re_path, reverse
from django.utils.translation import get_language, gettext_lazy as _
from feincms3.mixins import ChoicesCharField
__all__ = (
"PageTypeMixin",
"TemplateType",
"ApplicationType",
"apps_middleware",
"apps_urlconf",
"page_for_app_request",
"reverse_any",
"reverse_app",
"reverse_fallback",
)
_APPS_MODEL = None
def reverse_any(viewnames, urlconf=None, args=None, kwargs=None, *fargs, **fkwargs):
"""
Tries reversing a list of viewnames with the same arguments, and returns
the first result where no ``NoReverseMatch`` exception is raised.
Usage::
url = reverse_any(
("blog:article-detail", "articles:article-detail"),
kwargs={"slug": "article-slug"},
)
"""
for viewname in viewnames:
try:
return reverse(viewname, urlconf, args, kwargs, *fargs, **fkwargs)
except NoReverseMatch:
pass
raise NoReverseMatch(
"Reverse for any of '%s' with arguments '%s' and keyword arguments"
" '%s' not found." % ("', '".join(viewnames), args or [], kwargs or {})
)
def reverse_app(namespaces, viewname, *args, languages=None, **kwargs):
"""
Reverse app URLs, preferring the active language.
``reverse_app`` first generates a list of viewnames and passes them on
to ``reverse_any``.
Assuming that we're trying to reverse the URL of an article detail view,
that the project is configured with german, english and french as available
languages, french as active language and that the current article is a
publication, the viewnames are:
- ``apps-fr.publications.article-detail``
- ``apps-fr.articles.article-detail``
- ``apps-de.publications.article-detail``
- ``apps-de.articles.article-detail``
- ``apps-en.publications.article-detail``
- ``apps-en.articles.article-detail``
reverse_app tries harder returning an URL in the correct language than
returning an URL for the correct instance namespace.
Example::
url = reverse_app(
("category-1", "blog"),
"post-detail",
kwargs={"year": 2016, "slug": "my-cat"},
)
"""
if languages is None:
current = get_language()
languages = sorted(
(row[0] for row in settings.LANGUAGES), key=lambda lang: lang != current
)
viewnames = [
":".join(r)
for r in itertools.product(
(
f"{_APPS_MODEL.LANGUAGE_CODES_NAMESPACE}-{language}"
for language in languages
),
(namespaces if isinstance(namespaces, (list, tuple)) else (namespaces,)),
(viewname,),
)
]
return reverse_any(viewnames, *args, **kwargs)
def reverse_fallback(fallback, fn, *args, **kwargs):
"""
Returns the result of ``fn(*args, **kwargs)``, or ``fallback`` if the
former raises a ``NoReverseMatch`` exception. This is especially useful for
reversing app URLs from outside the app and you do not want crashes if the
app isn't available anywhere.
The following two examples are equivalent, choose whichever you like best::
reverse_fallback(
"/",
lambda: reverse_app(
("articles",),
"article-detail",
kwargs={"slug": self.slug},
),
)
reverse_fallback(
"/",
reverse_app
("articles",),
"article-detail",
kwargs={"slug": self.slug},
)
"""
try:
return fn(*args, **kwargs)
except NoReverseMatch:
return fallback
def apps_urlconf(*, apps=None):
"""
Generates a dynamic URLconf Python module including all application page
types in their assigned place and adding the ``urlpatterns`` from
``ROOT_URLCONF`` at the end. Returns the value of ``ROOT_URLCONF`` directly
if there are no active application page types.
Since Django uses an LRU cache for URL resolvers, we try hard to only
generate a changed URLconf when application URLs actually change.
The application URLconfs are put in nested namespaces:
- The outer application namespace is ``apps`` by default. This value can be
overridden by setting the ``LANGUAGE_CODES_NAMESPACE`` class attribute of
the page class to a different value. The instance namespaces consist of
the ``LANGUAGE_CODES_NAMESPACE`` value with a language added at the end.
As long as you're always using ``reverse_app`` you do not have to know
the specifics.
- The inner namespace is the app namespace, where the application
namespace is defined by the app itself (assign ``app_name`` in the
same module as ``urlpatterns``) and the instance namespace is defined
by the application name (from ``TYPES``).
Modules stay around as long as the Python (most of the time WSGI) process
lives. Unloading modules is tricky and probably not worth it since the
URLconf modules shouldn't gobble up much memory.
The set of applications can be overridden by passing a list of
``(path, page_type, app_namespace, language_code)`` tuples.
"""
if apps is None:
fields = ("path", "page_type", "app_namespace", "language_code")
apps = (
_APPS_MODEL._default_manager.active()
.with_tree_fields(False)
.exclude(app_namespace="")
.values_list(*fields)
.order_by(*fields)
)
if not apps:
# No point wrapping ROOT_URLCONF if there are no additional URLs
return settings.ROOT_URLCONF
key = ",".join(itertools.chain.from_iterable(apps))
module_name = "urlconf_%s" % hashlib.md5(key.encode("utf-8")).hexdigest()
if module_name not in sys.modules:
types = {app.key: app for app in _APPS_MODEL.TYPES if app.get("urlconf")}
m = ModuleType(module_name)
mapping = defaultdict(list)
for path, page_type, app_namespace, language_code in apps:
if page_type not in types:
continue
mapping[language_code].append(
re_path(
r"^%s" % re.escape(path.lstrip("/")),
include(types[page_type]["urlconf"], namespace=app_namespace),
)
)
m.urlpatterns = [
re_path(
r"",
include(
(instances, _APPS_MODEL.LANGUAGE_CODES_NAMESPACE),
namespace="%s-%s"
% (_APPS_MODEL.LANGUAGE_CODES_NAMESPACE, language_code),
),
)
for language_code, instances in mapping.items()
]
# Append patterns from ROOT_URLCONF instead of including them because
# i18n_patterns only work in the root URLconf.
urlconf = import_module(settings.ROOT_URLCONF)
m.urlpatterns += urlconf.urlpatterns
for attribute in ["handler400", "handler403", "handler404", "handler500"]:
if hasattr(urlconf, attribute):
setattr(m, attribute, getattr(urlconf, attribute))
sys.modules[module_name] = m
return module_name
def page_for_app_request(request, *, queryset=None):
"""
Returns the current page if we're inside an app. Should only be called
while processing app views. Will pass along exceptions caused by
non-existing or duplicated apps (this should never happen inside an app
because :func:`~feincms3.applications.apps_urlconf` wouldn't have added the app
in the first place if a matching page wouldn't exist, but still.)
Example::
def article_detail(request, slug):
page = page_for_app_request(request)
page.activate_language(request)
instance = get_object_or_404(Article, slug=slug)
return render(
request,
"articles/article_detail.html",
{"article": article, "page": page},
)
It is possible to override the queryset used to fetch a page instance. The
default implementation simply uses the first concrete subclass of
:class:`~feincms3.applications.PageTypeMixin`.
"""
if queryset is None:
queryset = _APPS_MODEL._default_manager.active().with_tree_fields()
# Unguarded - if this fails, we shouldn't even be here.
return queryset.get(
language_code=request.resolver_match.namespaces[0][
len(_APPS_MODEL.LANGUAGE_CODES_NAMESPACE) + 1 :
],
app_namespace=request.resolver_match.namespaces[1],
)
def apps_middleware(get_response):
"""
This middleware must be put in ``MIDDLEWARE``; it simply assigns
the return value of :func:`~feincms3.applications.apps_urlconf` to
``request.urlconf``. This middleware should probably be one of the first
since it has to run before any resolving happens.
"""
def middleware(request):
request.urlconf = apps_urlconf()
return get_response(request)
return middleware
class TemplateType(Type):
_REQUIRED = {"key", "title", "template_name", "regions", "app_namespace"}
def __init__(self, **kwargs):
kwargs.setdefault("app_namespace", lambda instance: "")
super().__init__(**kwargs)
class ApplicationType(Type):
_REQUIRED = {"key", "title", "urlconf", "app_namespace"}
def __init__(self, **kwargs):
kwargs.setdefault("template_name", "")
kwargs.setdefault("regions", [])
kwargs.setdefault("app_namespace", lambda instance: instance.page_type)
super().__init__(**kwargs)
class PageTypeMixin(models.Model):
"""
The page class should inherit this mixin. It adds a ``page_type`` field
containing the selected page type, and an ``app_namespace`` field which
contains the instance namespace of the application, if the type of the page
is an application type. The field is empty e.g. for template page types.
Note that currently the :class:`~feincms3.mixins.LanguageMixin` is a
required dependency of :mod:`feincms3.applications`.
``TYPES`` contains a list of page type instances, either
:class:`~feincms3.applications.TemplateType` or
:class:`~feincms3.applications.ApplicationType` and maybe others in the
future. The configuration values are specific to each type, common to all
of them are a key (stored in the ``page_type`` field) and a user-visible
title.
Template types additionally require a ``template_name`` and a ``regions``
value.
Application types require a ``urlconf`` value and support the following
options:
- ``urlconf``: The path to the URLconf module for the application. Besides
the ``urlpatterns`` list the module should probably also specify a
``app_name``.
- ``required_fields``: A list of page class fields which must be non-empty
for the application to work. The values are checked in
``PageTypeMixin.clean_fields``.
- ``app_namespace``: A callable which receives the page instance
as its only argument and returns a string suitable for use as an
instance namespace.
Usage::
from content_editor.models import Region
from django.utils.translation import gettext_lazy as _
from feincms3.applications import PageTypeMixin
from feincms3.mixins import LanguageMixin
from feincms3.pages import AbstractPage
class Page(AbstractPage, PageTypeMixin, LanguageMixin):
TYPES = [
# It is recommended to always put a TemplateType type first
# because it will be the default type:
TemplateType(
key="standard",
title=_("Standard"),
template_name="pages/standard.html",
regions=[Region(key="main", title=_("Main"))],
),
ApplicationType(
key="publications",
title=_("publications"),
urlconf="app.articles.urls",
),
ApplicationType(
key="blog",
title=_("blog"),
urlconf="app.articles.urls",
),
ApplicationType(
key="contact",
title=_("contact form"),
urlconf="app.forms.contact_urls",
),
ApplicationType(
key="teams",
title=_("teams"),
urlconf="app.teams.urls",
app_namespace=lambda page: f"{page.page_type}-{page.team_id}",
required_fields=["team"],
),
]
"""
#: Override this to set a different name for the outer namespace.
LANGUAGE_CODES_NAMESPACE = "apps"
page_type = ChoicesCharField(_("page type"), max_length=100)
app_namespace = models.CharField(
("app instance namespace"), max_length=100, blank=True, editable=False
)
class Meta:
abstract = True
@property
def type(self):
"""
Returns the appropriate page type instance, either the selected type or
the first type in the list of ``TYPES`` if no type is selected or if
the type does not exist anymore.
"""
return self.TYPES_DICT.get(self.page_type, self.TYPES[0])
@property
def regions(self):
return self.type.regions
def save(self, *args, **kwargs):
"""
Updates ``app_namespace``.
"""
self.app_namespace = self.type.app_namespace(self)
super().save(*args, **kwargs)
save.alters_data = True
def clean_fields(self, exclude=None):
"""
Checks that required fields are given and that an app namespace only
exists once per site and language.
"""
exclude = [] if exclude is None else exclude
super().clean_fields(exclude)
type = self.type
if type and type.get("required_fields"):
missing = [
field for field in type["required_fields"] if not getattr(self, field)
]
if missing:
error = _('This field is required for the page type "%s".') % (
self.get_page_type_display(),
)
errors = {}
for field in missing:
if field in exclude:
errors.setdefault("__all__", []).append(f"{field}: {error}")
else:
errors[field] = error
raise ValidationError(errors)
if type and type.app_namespace(self):
if self.__class__._default_manager.filter(
Q(app_namespace=type.app_namespace(self)),
Q(language_code=self.language_code),
~Q(pk=self.pk),
).exists():
fields = ["__all__", "page_type"]
fields.extend(type.get("required_fields", ()))
raise ValidationError(
{
field: _("This exact app already exists.")
for field in fields
if field not in exclude
}
)
@staticmethod
def fill_page_type_choices(sender, **kwargs):
"""
Fills in the choices for ``page_type`` from the ``TYPES``
class variable. This method is a receiver of Django's
``class_prepared`` signal.
"""
if issubclass(sender, PageTypeMixin) and not sender._meta.abstract:
field = sender._meta.get_field("page_type")
field.choices = [(app.key, app.title) for app in sender.TYPES]
field.default = sender.TYPES[0].key
sender.TYPES_DICT = {app.key: app for app in sender.TYPES}
global _APPS_MODEL
_APPS_MODEL = sender
@classmethod
def check(cls, **kwargs):
errors = super().check(**kwargs)
errors.extend(cls._check_feincms3_appsmixin_templatemixin_clash(**kwargs))
return errors
@classmethod
def _check_feincms3_appsmixin_templatemixin_clash(cls, **kwargs):
from feincms3.mixins import TemplateMixin
if not cls._meta.abstract and issubclass(cls, TemplateMixin):
return [
Warning(
f"The model {cls._meta.label} extends both"
" PageTypeMixin and TemplateMixin. The new PageTypeMixin includes"
" the functionality of the TemplateMixin, please remove"
" the latter, fill in ``page_type`` fields either from"
" ``application`` (if non-empty) or from ``template_key``,"
" and rename ``app_instance_namespace`` to ``app_namespace``.",
obj=cls,
id="feincms3.W002",
)
]
return []
@property
def application(self):
warnings.warn(
"AppsMixin.application is PageTypeMixin.page_type now.",
DeprecationWarning,
stacklevel=2,
)
return self.page_type
@property
def app_instance_namespace(self):
warnings.warn(
"AppsMixin.app_instance_namespace is PageTypeMixin.app_namespace now.",
DeprecationWarning,
stacklevel=2,
)
return self.app_namespace
signals.class_prepared.connect(PageTypeMixin.fill_page_type_choices)
|
the-stack_0_16125 | import ipaddress, subprocess
from flask import request, Response
from flask_restful import Resource
from wgpt.models import db, Client, Server, Cluster, ClientSchema, ServerSchema, ClusterSchema
from wgpt.wg_ssh_update import send_ssh_command
clients_schema = ClientSchema(many=True)
client_schema = ClientSchema()
def generate_client_keypair():
p_genkey = subprocess.Popen(["wg", "genkey"], stdout=subprocess.PIPE)
privkey = p_genkey.communicate()[0].decode().strip()
p_pubkey = subprocess.Popen(["wg", "pubkey"], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
p_pubkey.stdin.write(privkey.encode("ascii"))
pubkey = p_pubkey.communicate()[0].decode().strip()
return privkey, pubkey
def get_next_availabe_ip(network, client_ips):
hosts_iterator = (host for host in network if host not in client_ips)
try:
client_ip = str(next(hosts_iterator))
return client_ip
except:
return False
class AddClientToServer(Resource):
def post(self, server_id):
json_data = request.get_json(force=True)
if not json_data:
return {'message':'No input data provided'}, 400
server = Server.query.filter_by(id=server_id).first()
if not server:
return {'status':'failure', 'message':'Server not found'}
# Fetch client information for server to obtain free IP addresses
clients = Client.query.filter_by(server_id=json_data['server_id'])
client_ipv4 = None
client_ipv6 = None
if server.server_networkv4:
server_networkv4 = ipaddress.ip_network(server.server_networkv4).hosts()
clients_ipv4 = []
for client in clients:
clients_ipv4.append(ipaddress.ip_address(client.client_ipv4))
client_ipv4 = get_next_availabe_ip(server_networkv4, clients_ipv4)
if server.server_networkv6:
server_networkv6 = ipaddress.ip_network(server.server_networkv6).hosts()
clients_ipv6 = []
for client in clients:
clients_ipv6.append(ipaddress.ip_address(client.client_ipv6))
client_ipv6 = get_next_availabe_ip(server_networkv6, clients_ipv6)
# Generate keys
if client_ipv4 or client_ipv6:
client_keypair = generate_client_keypair()
client_privkey = client_keypair[0]
client_pubkey = client_keypair[1]
else:
return {'status':'failure','message':'Could not assign IP to client. Network full?'}
# Create the client through SSH on server if configured
if server.server_ssh_key:
if client_ipv4:
ssh_success = send_ssh_command('add', server.server_ip + ' ' + server.server_ssh_key, client_pubkey, client_ipv4)
if not ssh_success:
return {'status':'failure', 'message':'There was a problem with the ssh provisioning.'}
if client_ipv6:
ssh_success = send_ssh_command('add', server.server_ip + ' ' + server.server_ssh_key, client_pubkey, client_ipv6)
if not ssh_success:
return {'status':'failure', 'message':'There was a problem with the ssh provisioning.'}
# Store the client information in the database
new_client = Client(
client_ipv4 = client_ipv4,
client_ipv6 = client_ipv6,
client_pubkey = client_pubkey,
server_id = json_data['server_id'],
cluster_id = None,
client_description = json_data['client_description']
)
db.session.add(new_client)
db.session.commit()
data= {}
data['client_id'] = new_client.id
data['client_ipv4'] = client_ipv4
data['client_ipv6'] = client_ipv6
data['client_pubkey'] = client_pubkey
data['client_privkey'] = client_privkey
data['server_ip'] = server.server_ip
data['server_port'] = server.server_port
data['server_pubkey'] = server.server_pubkey
if server.server_dns is not None:
data['server_dns'] = server.server_dns
data['server_id'] = json_data['server_id']
return {'status': 'success', 'data': data}
class AddClientToCluster(Resource):
def post(self, cluster_id):
json_data = request.get_json(force=True)
if not json_data:
return {'message':'No input data provided'}, 400
cluster = Cluster.query.filter_by(id=cluster_id).first()
if not cluster:
return {'status':'failure', 'message':'Cluster not found'}
# Get relevant server data
# Get occupied client ip addresses on server
clients = Client.query.filter_by(cluster_id=cluster_id)
client_ipv4 = None
client_ipv6 = None
if cluster.cluster_networkv4:
cluster_networkv4 = ipaddress.ip_network(cluster.cluster_networkv4).hosts()
clients_ipv4 = []
for client in clients:
clients_ipv4.append(ipaddress.ip_address(client.client_ipv4))
client_ipv4 = get_next_availabe_ip(cluster_networkv4, clients_ipv4)
if cluster.cluster_networkv6:
cluster_networkv6 = ipaddress.ip_network(cluster.cluster_networkv6).hosts()
clients_ipv6 = []
for client in clients:
clients_ipv6.append(ipaddress.ip_address(client.client_ipv6))
client_ipv6 = get_next_availabe_ip(cluster_networkv6, clients_ipv6)
# Generate keys
if client_ipv4 or client_ipv6:
client_keypair = generate_client_keypair()
client_privkey = client_keypair[0]
client_pubkey = client_keypair[1]
else:
return {'status':'failure','message':'Could not assign IP to client. Network full?'}
# Store the client information in the database
new_client = Client(
client_ipv4 = client_ipv4,
client_ipv6 = client_ipv6,
client_description = json_data['client_description'],
client_pubkey = client_pubkey,
cluster_id = json_data['cluster_id'],
server_id = None,
)
db.session.add(new_client)
db.session.commit()
return {'status': 'success', 'data': {
'client_id': new_client.id,
'client_ipv4': client_ipv4,
'client_ipv6': client_ipv6,
'client_pubkey': client_pubkey,
'client_privkey': client_privkey,
'cluster_pubkey': cluster.cluster_pubkey,
'cluster_dns': cluster.cluster_dns,
'cluster_id': cluster.id
}
}
class GetClient(Resource):
def get(self, client_id):
clients = Client.query.filter_by(id=client_id).first()
if not clients:
return {'status':'failure', 'message':'No client with that id found'}, 200
clients = client_schema.dump(clients)
return {'status':'success', 'data':clients}, 200
class GetClients(Resource):
def get(self):
clients = Client.query.all()
clients = clients_schema.dump(clients)
return {'status':'success', 'data':clients}, 200
class GetClientsByServerId(Resource):
def get(self, server_id):
clients = Client.query.filter_by(server_id=server_id).all()
if not clients:
return {'status':'failure', 'message':'no clients found for that server id'}
clients = clients_schema.dump(clients)
return {'status':'success', 'data':clients}, 200
class GetClientsByClusterId(Resource):
def get(self, cluster_id):
clients = Client.query.filter_by(cluster_id=cluster_id).all()
if not clients:
return {'status':'failure', 'message':'no clients found for that cluster id'}
clients = clients_schema.dump(clients)
return {'status':'success', 'data':clients}, 200
class DeleteClient(Resource):
def delete(self, client_id):
if client_id:
client = Client.query.filter_by(id=client_id).first()
if not client:
return {'status':'failure', 'message':'no clients found for that id'}
server = Server.query.filter_by(id=client.server_id).first()
# Create the client through SSH on server if configured
if server.server_ssh_key:
if client.client_ipv4:
ssh_success = send_ssh_command('remove', server.server_ip + ' ' + server.server_ssh_key, client.client_pubkey, client.client_ipv4)
if not ssh_success:
return {'status':'failure', 'message':'There was a problem with the ssh provisioning.'}
if client.client_ipv6:
ssh_success = send_ssh_command('remove', server.server_ip + ' ' + server.server_ssh_key, client.client_pubkey, client.client_ipv6)
if not ssh_success:
return {'status':'failure', 'message':'There was a problem with the ssh provisioning.'}
db.session.delete(client)
db.session.commit()
return { 'status' : 'success', 'data':{'client_id':client_id}}, 200
|
the-stack_0_16127 | # -*- coding: utf-8 -*-
import lemoncheesecake.api as lcc
from lemoncheesecake.matching import check_that, check_that_in, is_, is_str, is_list, is_integer, require_that, \
require_that_in, has_length
from common.base_test import BaseTest
SUITE = {
"description": "Method 'get_account_history'"
}
@lcc.prop("main", "type")
@lcc.prop("positive", "type")
@lcc.tags("api", "history_api", "get_account_history")
@lcc.suite("Check work of method 'get_account_history'", rank=1)
class GetAccountHistory(BaseTest):
def __init__(self):
super().__init__()
self.__database_api_identifier = None
self.__registration_api_identifier = None
self.__history_api_identifier = None
self.echo_acc0 = None
def setup_suite(self):
super().setup_suite()
lcc.set_step("Setup for {}".format(self.__class__.__name__))
self.__database_api_identifier = self.get_identifier("database")
self.__registration_api_identifier = self.get_identifier("registration")
self.__history_api_identifier = self.get_identifier("history")
lcc.log_info(
"API identifiers are: database='{}', registration='{}', "
"history='{}'".format(self.__database_api_identifier, self.__registration_api_identifier,
self.__history_api_identifier))
self.echo_acc0 = self.get_account_id(self.accounts[0], self.__database_api_identifier,
self.__registration_api_identifier)
lcc.log_info("Echo account is '{}'".format(self.echo_acc0))
@lcc.test("Simple work of method 'get_account_history'")
def method_main_check(self):
operation_history_obj = "{}0".format(self.get_object_type(self.echo.config.object_types.OPERATION_HISTORY))
stop, start = operation_history_obj, operation_history_obj
limit = 1
lcc.set_step("Get account history")
params = [self.echo_acc0, stop, limit, start]
response_id = self.send_request(self.get_request("get_account_history", params), self.__history_api_identifier)
response = self.get_response(response_id)
lcc.log_info(
"Call method 'get_account_history' with: account='{}', stop='{}', limit='{}', start='{}' parameters".format(
self.echo_acc0, stop, limit, start))
lcc.set_step("Check response from method 'get_account_history'")
results = response["result"]
check_that(
"'number of history results'",
results, has_length(limit)
)
for result in results:
if not self.validator.is_operation_history_id(result["id"]):
lcc.log_error("Wrong format of 'operation id', got: {}".format(result["id"]))
else:
lcc.log_info("'operation_id' has correct format: operation_history_id")
check_that_in(
result,
"op", is_list(),
"result", is_list(),
"block_num", is_integer(),
"trx_in_block", is_integer(),
"op_in_trx", is_integer(),
"virtual_op", is_integer(),
quiet=True
)
@lcc.prop("positive", "type")
@lcc.tags("api", "history_api", "get_account_history")
@lcc.suite("Positive testing of method 'get_account_history'", rank=2)
class PositiveTesting(BaseTest):
def __init__(self):
super().__init__()
self.__database_api_identifier = None
self.__registration_api_identifier = None
self.__history_api_identifier = None
self.echo_acc0 = None
self.echo_acc1 = None
def get_account_history(self, account, stop, limit, start, negative=False):
lcc.log_info("Get '{}' account history".format(account))
params = [account, stop, limit, start]
response_id = self.send_request(self.get_request("get_account_history", params), self.__history_api_identifier)
return self.get_response(response_id, negative=negative)
def setup_suite(self):
super().setup_suite()
self._connect_to_echopy_lib()
lcc.set_step("Setup for {}".format(self.__class__.__name__))
self.__database_api_identifier = self.get_identifier("database")
self.__registration_api_identifier = self.get_identifier("registration")
self.__history_api_identifier = self.get_identifier("history")
lcc.log_info(
"API identifiers are: database='{}', registration='{}', "
"history='{}'".format(self.__database_api_identifier, self.__registration_api_identifier,
self.__history_api_identifier))
self.echo_acc0 = self.get_account_id(self.accounts[0], self.__database_api_identifier,
self.__registration_api_identifier)
self.echo_acc1 = self.get_account_id(self.accounts[1], self.__database_api_identifier,
self.__registration_api_identifier)
lcc.log_info("Echo accounts are: #1='{}', #2='{}'".format(self.echo_acc0, self.echo_acc1))
def teardown_suite(self):
self._disconnect_to_echopy_lib()
super().teardown_suite()
@lcc.test("Check new account history")
@lcc.depends_on("HistoryApi.GetAccountHistory.GetAccountHistory.method_main_check")
def new_account_history(self, get_random_valid_account_name):
new_account = get_random_valid_account_name
operation_history_obj = "{}0".format(self.get_object_type(self.echo.config.object_types.OPERATION_HISTORY))
stop, start = operation_history_obj, operation_history_obj
limit = 100
lcc.set_step("Create and get new account")
new_account = self.get_account_id(new_account, self.__database_api_identifier,
self.__registration_api_identifier)
lcc.log_info("New Echo account created, account_id='{}'".format(new_account))
lcc.set_step("Get new account history")
response = self.get_account_history(new_account, stop, limit, start)
lcc.set_step("Check new account history")
expected_number_of_operations = 1
require_that(
"'new account history'",
response["result"], has_length(expected_number_of_operations)
)
check_that(
"'id single operation'",
response["result"][0]["op"][0],
is_(self.echo.config.operation_ids.ACCOUNT_CREATE)
)
@lcc.test("Check limit number of operations to retrieve")
@lcc.depends_on("HistoryApi.GetAccountHistory.GetAccountHistory.method_main_check")
def limit_operations_to_retrieve(self, get_random_valid_account_name, get_random_integer_up_to_hundred):
new_account = get_random_valid_account_name
operation_history_obj = "{}0".format(self.get_object_type(self.echo.config.object_types.OPERATION_HISTORY))
stop, start = operation_history_obj, operation_history_obj
min_limit = 1
max_limit = 100
default_account_create_operation, default_get_assets_operation = 1, 1
operation_count = get_random_integer_up_to_hundred
lcc.set_step("Create and get new account")
new_account = self.get_account_id(new_account, self.__database_api_identifier,
self.__registration_api_identifier)
lcc.log_info("New Echo account created, account_id='{}'".format(new_account))
lcc.set_step("Perform operations using a new account. Operation count equal to limit")
self.utils.perform_transfer_operations(self, new_account, self.echo_acc0, self.__database_api_identifier,
operation_count=operation_count, only_in_history=True)
lcc.log_info("Fill account history with '{}' number of transfer operations".format(operation_count))
lcc.set_step(
"Check that count of new account history with the maximum limit is equal to operation_count")
response = self.get_account_history(new_account, stop, max_limit, start)
if operation_count == 1:
operation_count = operation_count + default_get_assets_operation
check_that(
"'number of history results'",
response["result"], has_length(operation_count + default_account_create_operation)
)
lcc.set_step("Check minimum list length account history")
response = self.get_account_history(new_account, stop, min_limit, start)
check_that(
"'number of history results'",
response["result"], has_length(min_limit)
)
lcc.set_step("Perform operations using a new account to create max_limit operations")
operation_count = max_limit - operation_count - default_account_create_operation
self.utils.perform_transfer_operations(self, new_account, self.echo_acc0, self.__database_api_identifier,
operation_count=operation_count, only_in_history=True)
lcc.log_info(
"Fill account history with '{}' number of transfer operations".format(operation_count))
lcc.set_step(
"Check that count of new account history with the limit = max_limit is equal to max_limit")
response = self.get_account_history(new_account, stop, max_limit, start)
check_that(
"'number of history results'",
response["result"], has_length(max_limit)
)
@lcc.test("Check stop and start IDs of the operations in account history")
@lcc.depends_on("HistoryApi.GetAccountHistory.GetAccountHistory.method_main_check")
def stop_and_start_operations(self, get_random_integer, get_random_integer_up_to_hundred):
transfer_amount_1 = get_random_integer
transfer_amount_2 = get_random_integer_up_to_hundred
operation_history_obj = "{}0".format(self.get_object_type(self.echo.config.object_types.OPERATION_HISTORY))
stop, start = operation_history_obj, operation_history_obj
operations = []
operation_ids = []
lcc.set_step("Perform one operation")
operation_count = 1
broadcast_result = self.utils.perform_transfer_operations(self, self.echo_acc0, self.echo_acc1,
self.__database_api_identifier,
transfer_amount=transfer_amount_1,
operation_count=operation_count, only_in_history=True)
lcc.log_info("Fill account history with '{}' number of transfer operations".format(operation_count))
operations.append(broadcast_result["trx"]["operations"][0])
limit = operation_count
lcc.set_step("Get account history. Limit: '{}'".format(limit))
response = self.get_account_history(self.echo_acc0, stop, limit, start)
lcc.set_step("Check account history to see added operation and store operation id")
require_that(
"'account history'",
response["result"][0]["op"], is_list(operations[0])
)
operation_id = response["result"][0]["id"]
lcc.set_step("Perform another operations")
operation_count = 5
broadcast_result = self.utils.perform_transfer_operations(self, self.echo_acc0, self.echo_acc1,
self.__database_api_identifier,
transfer_amount=transfer_amount_2,
operation_count=operation_count, only_in_history=True)
lcc.log_info("Fill account history with '{}' number of transfer operations".format(operation_count))
for i in range(operation_count):
operations.append(broadcast_result["trx"]["operations"][i])
limit = operation_count
stop = operation_id
lcc.set_step("Get account history. Stop: '{}', limit: '{}'".format(stop, limit))
response = self.get_account_history(self.echo_acc0, stop, limit, start)
lcc.set_step("Check account history to see added operations and store operation ids")
operations.reverse()
for i in range(limit):
require_that(
"'account history'",
response["result"][i]["op"], is_list(operations[i])
)
operation_ids.append(response["result"][i]["id"])
limit = operation_count + 1
stop = operation_id
start = operation_ids[0]
lcc.set_step("Get account history. Stop: '{}', limit: '{}' and start: '{}'".format(stop, limit, start))
results = self.get_account_history(self.echo_acc0, stop, limit, start)["result"]
lcc.set_step("Check account history to see operations from the selected ids interval")
for i, result in enumerate(results):
lcc.log_info("Check operation #{}:".format(i))
require_that_in(
result,
["id"], is_str(operation_ids[i]),
["op"], is_list(operations[i])
)
|
the-stack_0_16129 | # Sourced from here: https://python-forum.io/Thread-Learning-Python-with-a-Caesar-cipher?pid=131456#pid131456
# Modified by Drone4four
import string
def encrypt(message, shift=0, replace='', alphabet=string.ascii_letters):
reply = ''
for letter in message:
try:
position = alphabet.index(letter) # Get ord value of letter in alphabet
position = (position+shift) % len(alphabet) # Shift ord value
reply += alphabet[position] # Convert shifted ord value back to a letter
except:
reply += replace # Use replace for letters not found in alphabet
return reply
message = "Hello World"
encrypted = encrypt(message, shift=1, replace=' ')
decrypted = encrypt(encrypted, shift=-2, replace=' ')
print(encrypted)
print(decrypted) |
the-stack_0_16132 | import streamlit as st # streamlit run Location100_RF_streamlit.py
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
import warnings
from sklearn.model_selection import train_test_split, GridSearchCV, learning_curve, cross_val_score
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn import tree
from sklearn import preprocessing
from sklearn.metrics import r2_score, mean_squared_error
from sklearn.svm import SVR
from sklearn.tree import DecisionTreeRegressor
from scipy import stats
from sklearn.neighbors import KNeighborsRegressor
# import config
st.title('Pepper ML for Chicago area(location 100) by using random forest')
# df = pd.read_csv("C:\PepperPepper\pepperProject.csv", encoding = 'unicode_escape', engine ='python')
url = f'https://raw.githubusercontent.com/LeonZly90/myData/main/pepperProject.csv?token=AG6BQ7M2G3HRK4IT4IU5ZALBD7M3S'
df = pd.read_csv(url, encoding='unicode_escape', engine='python')
df_data = df.copy()
new_sheet = pd.DataFrame(df_data,
columns=['OMOP_COMP_CODE', 'CTRL_JOB', 'STAGE_CODE', 'MARKET_TYPE', 'POTENTIAL_REV_AMT',
'TOTAL_HOURS'])
new_sheet = new_sheet[~new_sheet['MARKET_TYPE'].isin(['Select Market', 'Self Performed Work', 'Self Performed Direct'])]
new_sheet = new_sheet[new_sheet['POTENTIAL_REV_AMT'] > 0]
location_100 = new_sheet[new_sheet.OMOP_COMP_CODE == 100]
location_100 = location_100.drop('OMOP_COMP_CODE', 1)
# st.write('location_100:\n', location_100)
JobHour_by_StageMarket = location_100.groupby(['CTRL_JOB', 'STAGE_CODE', 'MARKET_TYPE'])[
'POTENTIAL_REV_AMT', 'TOTAL_HOURS'].sum().reset_index()
# st.write('JobHour_by_StageMarket:\n', JobHour_by_StageMarket) # [474 rows x 5 columns]
revAmt_Hour0 = JobHour_by_StageMarket.iloc[:, -2:].abs()
# st.write(revAmt_Hour0)
# with st.echo(code_location='below'):
# fig1 = plt.figure(1)
# plt.scatter(revAmt_Hour0['POTENTIAL_REV_AMT'], revAmt_Hour0['TOTAL_HOURS'])
# plt.xlabel('POTENTIAL_REV_AMT')
# plt.ylabel('TOTAL_HOURS')
# plt.show()
# st.write(fig1)
# clean outlier [469 rows x 5 columns]
z_scores = stats.zscore(revAmt_Hour0)
abs_z_scores = np.abs(z_scores)
revAmt_Hour1 = revAmt_Hour0[(abs_z_scores < 3).all(axis=1)]
# st.write(revAmt_Hour1)
# with st.echo(code_location='below'):
# fig2=plt.figure(2)
# plt.scatter(revAmt_Hour1['POTENTIAL_REV_AMT'], revAmt_Hour1['TOTAL_HOURS'])
# plt.xlabel('POTENTIAL_REV_AMT1')
# plt.ylabel('TOTAL_HOURS1')
# plt.show()
# st.write(fig2)
rest = JobHour_by_StageMarket.iloc[:, :-2]
JobHour_by_StageMarket = rest.join(revAmt_Hour1, how='outer')
# @st.cache # 👈 This function will be cached
JobHour_by_StageMarket = JobHour_by_StageMarket.dropna()
# st.write('Now JobHour_by_StageMarket:\n', JobHour_by_StageMarket) # [469 rows x 5 columns]
# @st.cache # 👈 This function will be cached
standardscaler = preprocessing.StandardScaler()
numer_feature = standardscaler.fit_transform(JobHour_by_StageMarket["POTENTIAL_REV_AMT"].values.reshape(-1, 1))
numer_feature = pd.DataFrame(numer_feature, columns=["POTENTIAL_REV_AMT"])
# st.write('numer_feature\n', numer_feature)
# @st.cache # 👈 This function will be cached
ohe = preprocessing.OneHotEncoder(categories='auto')
feature_arr = ohe.fit_transform(JobHour_by_StageMarket[['STAGE_CODE', 'MARKET_TYPE']]).toarray()
feature_labels = ohe.get_feature_names()
# st.write(feature_labels)
feature_labels = np.array(feature_labels, dtype=object).ravel()
# st.write('feature_labels\n', feature_labels)
features = pd.DataFrame(feature_arr, columns=feature_labels)
# st.write('features\n', features)
predictors = np.concatenate([features, numer_feature], axis=1)
# st.write('predictors:\n', predictors)
target = JobHour_by_StageMarket['TOTAL_HOURS']
# st.write('target:\n', target)
X_train, X_test, y_train, y_test = train_test_split(predictors, target, test_size=0.20, random_state=37)
# st.write(X_train.shape)
# st.write(X_test.shape)
# st.write(y_train.shape)
# st.write(y_test.shape)
# (328, 14)
# (141, 14)
# (328,)
# (141,)
# Random Forest # 0.7806525157351498 initial
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import RandomizedSearchCV
import time
start_time = time.time()
# reg = RandomForestRegressor(n_estimators=1000, criterion="mse")
# reg.fit(X_train, y_train)
# y_pred = reg.predict(X_test)
# r2_scoreE = r2_score(y_test, y_pred)
# st.write('\nRandom Forest\n')
# st.write("r2_score: {0}".format(r2_scoreE))
# rmse = mean_squared_error(y_test, y_pred, squared=False)
# st.write("RMSE: {0}".format(rmse))
####################################################################################
# # Number of trees in random forest
# n_estimators = [int(x) for x in np.linspace(start=200, stop=2000, num=10)]
# # Number of features to consider at every split
# max_features = ['auto', 'sqrt']
# # Maximum number of levels in tree
# max_depth = [int(x) for x in np.linspace(10, 110, num=11)]
# max_depth.append(None)
# # Minimum number of samples required to split a node
# min_samples_split = [2, 5, 10]
# # Minimum number of samples required at each leaf node
# min_samples_leaf = [1, 2, 4]
# # Method of selecting samples for training each tree
# bootstrap = [True, False]
# # Create the random grid
#
# random_grid = {'n_estimators': n_estimators,
# 'max_features': max_features,
# 'max_depth': max_depth,
# 'min_samples_split': min_samples_split,
# 'min_samples_leaf': min_samples_leaf,
# 'bootstrap': bootstrap}
#
# rf = RandomForestRegressor()
# rf_random = RandomizedSearchCV(estimator=rf, param_distributions=random_grid, n_iter=100, cv=3, verbose=2,
# random_state=42, n_jobs=-1)
# # Fit the random search model
# rf_random.fit(X_train, y_train)
#
# # search.best_params_ {'n_estimators': 400, 'min_samples_split': 5, 'min_samples_leaf': 1, 'max_features': 'sqrt', 'max_depth': 100, 'bootstrap': True}
# # search.fit(X_train, y_train)
# st.write('\nsearch.best_params_', rf_random.best_params_)
# end_time = time.time() # time 304.75399446487427
# st.write('time', end_time - start_time)
#
#
# best_search = rf_random.best_estimator_
# st.write('best_search\n', best_search)
# reg = best_search
####################################################################################
# search.best_params_ {'n_estimators': 200, 'min_samples_split': 5, 'min_samples_leaf': 4, 'max_features': 'auto', 'max_depth': 80, 'bootstrap': True}
reg = RandomForestRegressor(n_estimators=200, min_samples_split=5, min_samples_leaf=4, max_features='auto',
max_depth=80, bootstrap='True')
# reg = RandomForestRegressor()
# r2_score: 0.7872974759353466
# MSE: 1107.7595622634976
# @st.cache # 👈 This function will be cached
reg.fit(X_train, y_train)
y_pred = reg.predict(X_test)
r2_scoreF = r2_score(y_test, y_pred)
# st.write('\nRF RF RF RF RF RF RF RF RF RF RF RF RF RF RF RF RF RF RF')
# st.write("accur2_score: {0}".format(r2_scoreF)) # r2_score:
mse = mean_squared_error(y_test, y_pred, squared=False)
# st.write("MSE: {0}".format(mse))
x_ax = range(len(y_test))
# with st.echo(code_location='below'):
fig3 = plt.figure(3)
plt.scatter(x_ax, y_test, s=5, color="blue", label="original")
plt.plot(x_ax, y_pred, lw=0.8, color="red", label="predicted")
plt.xlabel('Trained model')
plt.ylabel('HOURS')
plt.legend()
plt.show()
st.write(fig3)
# @st.cache # 👈 This function will be cached
def predict_new_data(test_data):
test_dataframe = pd.DataFrame(columns=JobHour_by_StageMarket.columns[1:3])
# st.write('test_dataframe:\n', test_dataframe)
for index, column in enumerate(test_dataframe.columns):
test_dataframe[column] = [test_data[index]]
# st.write('test_dataframe:\n', test_dataframe)
cate_test_one_hot = ohe.transform(test_dataframe).toarray()
# st.write('cate_test_one_hot\n', cate_test_one_hot)
numer_feature = standardscaler.transform(np.array(test_data[-1]).reshape(-1, 1))
# st.write('numer_test_stand:\n', numer_feature)
test = np.concatenate([cate_test_one_hot, numer_feature], axis=1)
# st.write('test:\n', test)
return reg.predict(test)
# ['STAGE_CODE','MARKET_TYPE',"POTENTIAL_REV_AMT"]
test_data_1 = ["BO", "Higher Education", 30000000] # 355
test_data_2 = ["SALE", "Healthcare", 20236036] # 909
test_data_3 = ["SALE", "Healthcare", 65172520] # 1180
test_data_4 = ["BR", "Healthcare", 297000] # 52
# st.write("For new data forecast1:", str(round(predict_new_data(test_data_1)[0], 2))) # 355 127.86
# st.write("For new data forecast2:", str(round(predict_new_data(test_data_2)[0], 2))) # 909 1536.94
# st.write("For new data forecast3:", str(round(predict_new_data(test_data_3)[0], 2))) # 1180 1385.98
# st.write("For new data forecast4:", str(round(predict_new_data(test_data_4)[0], 2))) # 52 42.82
STAGE_CODE = np.unique(JobHour_by_StageMarket['STAGE_CODE'])
MARKET_TYPE = np.unique(JobHour_by_StageMarket['MARKET_TYPE'])
r2_scoreF = r2_scoreF*100
st.write("Accuracy rate(r2_score): {0}%".format(round(r2_scoreF, 2)))
option1 = st.sidebar.selectbox(
'Choose your STAGE_CODE:',
STAGE_CODE)
st.write('You selected: ', option1)
option2 = st.sidebar.selectbox(
'Choose your MARKET_TYPE:',
MARKET_TYPE)
st.write('You selected: ', option2)
option3 = st.sidebar.number_input(
'Put your POTENTIAL_REV_AMT:',
)
st.write('You selected: $', option3)
test_data = [option1, option2, option3]
if float(test_data[2]) <= 0.00:
res = 0
else:
# st.sidebar.write('You want to predict:', test_data)
res = round(predict_new_data(test_data)[0], 2)
st.sidebar.write("Estimate:", res, 'hours.')
st.write('Estimate:', res, 'project hours.')
|
the-stack_0_16133 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# PapersBot
#
# purpose: read journal RSS feeds and tweet selected entries
# license: MIT License
# author: Nina Miolane
# e-mail: [email protected]
# inspired by: https://github.com/fxcoudert/PapersBot
import imghdr
import json
import os
import random
import re
import sys
import tempfile
import time
import urllib
import yaml
import bs4
import feedparser
import tweepy
# This is the regular expression that selects the papers of interest
regex = re.compile(r"""( geometric.deep.learning |
geometric.machine.learning |
geometric.neural.net |
geometric.statistics |
geomstats |
geoopt |
hyperbolic.data |
non-euclidean.data |
(?=.*non-euclidean)(?=.*deep.learning) |
(?=.*non-euclidean)(?=.*machine.learning) |
(?=.*non-euclidean)(?=.*neural.net) |
(?=.*non-euclidean)(?=.*statistics) |
manopt |
mctorch |
(?=.*riemannian)(?=.*data) |
(?=.*riemannian)(?=.*deep.learning) |
(?=.*riemannian)(?=.*statistic) |
(?=.*riemannian)(?=.*machine.learning) |
(?=.*riemannian)(?=.*neural.net) |
theanogeometry
)
""", re.IGNORECASE | re.VERBOSE)
# We select entries based on title or summary (abstract, for some feeds)
def entryMatches(entry):
# Malformed entry
if "title" not in entry:
return False
if regex.search(entry.title):
return True
if "summary" in entry:
return regex.search(entry.summary)
else:
return False
# Find the URL for an image associated with the entry
def findImage(entry):
if "description" not in entry:
return
soup = bs4.BeautifulSoup(entry.description, "html.parser")
img = soup.find("img")
if img:
img = img["src"]
if len(img) == 0:
return
# If address is relative, append root URL
if img[0] == "/":
p = urllib.parse.urlparse(entry.id)
img = f"{p.scheme}://{p.netloc}" + img
return img
# Convert string from HTML to plain text
def htmlToText(s):
return bs4.BeautifulSoup(s, "html.parser").get_text()
def downloadImage(url):
if not url:
return None
try:
img, _ = urllib.request.urlretrieve(url)
except Exception:
return None
ext = imghdr.what(img)
res = img + "." + ext
os.rename(img, res)
# Images smaller than 4 KB have a problem, and Twitter will complain
if os.path.getsize(res) < 4096:
os.remove(res)
return None
return res
# Connect to Twitter and authenticate
# Credentials are passed in the environment,
# or stored in "credentials.yml" which contains four lines:
# CONSUMER_KEY: "x1F3s..."
# CONSUMER_SECRET: "3VNg..."
# ACCESS_KEY: "7109..."
# ACCESS_SECRET: "AdnA..."
#
def initTwitter():
if 'CONSUMER_KEY' in os.environ:
cred = {'CONSUMER_KEY': os.environ['CONSUMER_KEY'],
'CONSUMER_SECRET': os.environ['CONSUMER_SECRET'],
'ACCESS_KEY': os.environ['ACCESS_KEY'],
'ACCESS_SECRET': os.environ['ACCESS_SECRET']}
else:
with open("credentials.yml", "r") as f:
cred = yaml.safe_load(f)
auth = tweepy.OAuthHandler(cred["CONSUMER_KEY"], cred["CONSUMER_SECRET"])
auth.set_access_token(cred["ACCESS_KEY"], cred["ACCESS_SECRET"])
return tweepy.API(auth)
def getTwitterConfig(api):
# Check for cached configuration, no more than a day old
if os.path.isfile("twitter_config.dat"):
mtime = os.stat("twitter_config.dat").st_mtime
if time.time() - mtime < 24 * 60 * 60:
with open("twitter_config.dat", "r") as f:
return json.load(f)
# Otherwise, query the Twitter API and cache the result
config = api.configuration()
with open("twitter_config.dat", "w") as f:
json.dump(config, f)
return config
# Read our list of feeds from file
def readFeedsList():
with open("feeds.txt", "r") as f:
feeds = [s.partition("#")[0].strip() for s in f]
return [s for s in feeds if s]
# Remove unwanted text some journals insert into the feeds
def cleanText(s):
# Annoying ASAP tags
s = s.replace("[ASAP]", "")
# Some feeds have LF characeters
s = s.replace("\x0A", "")
# Remove (arXiv:1903.00279v1 [cond-mat.mtrl-sci])
s = re.sub(r"\(arXiv:.+\)", "", s)
# Remove multiple spaces, leading and trailing space
return re.sub("\\s\\s+", " ", s).strip()
# Read list of feed items already posted
def readPosted():
try:
with open("posted.dat", "r") as f:
return f.read().splitlines()
except Exception:
return []
class PapersBot:
posted = []
n_seen = 0
n_tweeted = 0
def __init__(self, doTweet=True):
self.feeds = readFeedsList()
self.posted = readPosted()
# Read parameters from configuration file
try:
with open("config.yml", "r") as f:
config = yaml.safe_load(f)
except Except:
config = {}
self.throttle = config.get("throttle", 0)
self.wait_time = config.get("wait_time", 5)
self.shuffle_feeds = config.get("shuffle_feeds", True)
self.blacklist = config.get("blacklist", [])
self.blacklist = [re.compile(s) for s in self.blacklist]
# Shuffle feeds list
if self.shuffle_feeds:
random.shuffle(self.feeds)
# Connect to Twitter, unless requested not to
if doTweet:
self.api = initTwitter()
else:
self.api = None
# Determine maximum tweet length
if doTweet:
twconfig = getTwitterConfig(self.api)
urllen = max(twconfig["short_url_length"], twconfig["short_url_length_https"])
imglen = twconfig["characters_reserved_per_media"]
else:
urllen = 23
imglen = 24
self.maxlength = 280 - (urllen + 1) - imglen
# Start-up banner
print(f"This is PapersBot running at {time.strftime('%Y-%m-%d %H:%M:%S %Z')}")
if self.api:
timeline = self.api.user_timeline(count=1)
if len(timeline) > 0:
print(f"Last tweet was posted at {timeline[0].created_at} (UTC)")
else:
print(f"No tweets posted yet? Welcome, new user!")
print(f"Feed list has {len(self.feeds)} feeds\n")
# Add to tweets posted
def addToPosted(self, url):
with open("posted.dat", "a+") as f:
print(url, file=f)
self.posted.append(url)
# Send a tweet for a given feed entry
def sendTweet(self, entry):
title = cleanText(htmlToText(entry.title))
length = self.maxlength
# Usually the ID is the canonical URL, but not always
if entry.id[:8] == "https://" or entry.id[:7] == "http://":
url = entry.id
else:
url = entry.link
# URL may be malformed
if not (url[:8] == "https://" or url[:7] == "http://"):
print(f"INVALID URL: {url}\n")
return
tweet_body = title[:length] + " " + url
# URL may match our blacklist
for regexp in self.blacklist:
if regexp.search(url):
print(f"BLACKLISTED: {tweet_body}\n")
self.addToPosted(entry.id)
return
media = None
image = findImage(entry)
image_file = downloadImage(image)
if image_file:
print(f"IMAGE: {image}")
if self.api:
media = [self.api.media_upload(image_file).media_id]
os.remove(image_file)
print(f"TWEET: {tweet_body}\n")
if self.api:
try:
self.api.update_status(tweet_body, media_ids=media)
except tweepy.error.TweepError as e:
if e.api_code == 187:
print("ERROR: Tweet refused as duplicate\n")
else:
print(f"ERROR: Tweet refused, {e.reason}\n")
sys.exit(1)
self.addToPosted(entry.id)
self.n_tweeted += 1
if self.api:
time.sleep(self.wait_time)
# Main function, iterating over feeds and posting new items
def run(self):
for feed in self.feeds:
parsed_feed = feedparser.parse(feed)
for entry in parsed_feed.entries:
if entryMatches(entry):
self.n_seen += 1
# If no ID provided, use the link as ID
if "id" not in entry:
entry.id = entry.link
if entry.id not in self.posted:
self.sendTweet(entry)
# Bail out if we have reached max number of tweets
if self.throttle > 0 and self.n_tweeted >= self.throttle:
print(f"Max number of papers met ({self.throttle}), stopping now")
return
# Print statistics of a given run
def printStats(self):
print(f"Number of relevant papers: {self.n_seen}")
print(f"Number of papers tweeted: {self.n_tweeted}")
# Print out the n top tweets (most liked and RT'ed)
def printTopTweets(self, count=20):
tweets = self.api.user_timeline(count=200)
oldest = tweets[-1].created_at
print(f"Top {count} recent tweets, by number of RT and likes, since {oldest}:\n")
tweets = [(t.retweet_count + t.favorite_count, t.id, t) for t in tweets]
tweets.sort(reverse=True)
for _, _, t in tweets[0:count]:
url = f"https://twitter.com/{t.user.screen_name}/status/{t.id}"
print(f"{t.retweet_count} RT {t.favorite_count} likes: {url}")
print(f" {t.created_at}")
print(f" {t.text}\n")
def main():
# Make sure all options are correctly typed
options_allowed = ["--do-not-tweet", "--top-tweets"]
for arg in sys.argv[1:]:
if arg not in options_allowed:
print(f"Unknown option: {arg}")
sys.exit(1)
# Initialize our bot
doTweet = "--do-not-tweet" not in sys.argv
bot = PapersBot(doTweet)
# We can print top tweets
if "--top-tweets" in sys.argv:
bot.printTopTweets()
sys.exit(0)
bot.run()
bot.printStats()
if __name__ == "__main__":
main()
|
the-stack_0_16134 | """
Copyright 2017 Robin Verschueren, 2017 Akshay Agrawal
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import cvxpy.settings as s
from cvxpy.constraints import SOC, ExpCone, PSD, Zero, NonNeg
from cvxpy.reductions.cvx_attr2constr import convex_attributes
from cvxpy.reductions.dcp2cone.cone_matrix_stuffing import ParamConeProg
from cvxpy.reductions.solution import Solution, failure_solution
from cvxpy.reductions.solvers.solver import Solver
from cvxpy.reductions.solvers import utilities
import numpy as np
import scipy.sparse as sp
# NOTE(akshayka): Small changes to this file can lead to drastic
# performance regressions. If you are making a change to this file,
# make sure to run cvxpy/tests/test_benchmarks.py to ensure that you have
# not introduced a regression.
class LinearOperator(object):
"""A wrapper for linear operators."""
def __init__(self, linear_op, shape):
if sp.issparse(linear_op):
self._matmul = lambda X: linear_op @ X
else:
self._matmul = linear_op
self.shape = shape
def __call__(self, X):
return self._matmul(X)
def as_linear_operator(linear_op):
if isinstance(linear_op, LinearOperator):
return linear_op
elif sp.issparse(linear_op):
return LinearOperator(linear_op, linear_op.shape)
def as_block_diag_linear_operator(matrices):
"""Block diag of SciPy sparse matrices or linear operators."""
linear_operators = [as_linear_operator(op) for op in matrices]
nrows = [op.shape[0] for op in linear_operators]
ncols = [op.shape[1] for op in linear_operators]
m, n = sum(nrows), sum(ncols)
col_indices = np.append(0, np.cumsum(ncols))
def matmul(X):
outputs = []
for i, op in enumerate(linear_operators):
Xi = X[col_indices[i]:col_indices[i + 1]]
outputs.append(op(Xi))
return sp.vstack(outputs)
return LinearOperator(matmul, (m, n))
class ConicSolver(Solver):
"""Conic solver class with reduction semantics
"""
# The key that maps to ConeDims in the data returned by apply().
DIMS = "dims"
# Every conic solver must support Zero and NonNeg constraints.
SUPPORTED_CONSTRAINTS = [Zero, NonNeg]
# Some solvers cannot solve problems that do not have constraints.
# For such solvers, REQUIRES_CONSTR should be set to True.
REQUIRES_CONSTR = False
EXP_CONE_ORDER = None
def accepts(self, problem):
return (isinstance(problem, ParamConeProg)
and (self.MIP_CAPABLE or not problem.is_mixed_integer())
and not convex_attributes([problem.x])
and (len(problem.constraints) > 0 or not self.REQUIRES_CONSTR)
and all(type(c) in self.SUPPORTED_CONSTRAINTS for c in
problem.constraints))
@staticmethod
def get_spacing_matrix(shape, spacing, streak, num_blocks, offset):
"""Returns a sparse matrix that spaces out an expression.
Parameters
----------
shape : tuple
(rows in matrix, columns in matrix)
spacing : int
The number of rows between the start of each non-zero block.
streak: int
The number of elements in each block.
num_blocks : int
The number of non-zero blocks.
offset : int
The number of zero rows at the beginning of the matrix.
Returns
-------
SciPy CSC matrix
A sparse matrix
"""
num_values = num_blocks * streak
val_arr = np.ones(num_values, dtype=np.float64)
streak_plus_spacing = streak + spacing
row_arr = np.arange(0, num_blocks * streak_plus_spacing).reshape(
num_blocks, streak_plus_spacing)[:, :streak].flatten() + offset
col_arr = np.arange(num_values)
return sp.csc_matrix((val_arr, (row_arr, col_arr)), shape)
def psd_format_mat(self, constr):
"""Return a matrix to multiply by PSD constraint coefficients.
"""
# Default is identity.
return sp.eye(constr.size, format='csc')
def format_constraints(self, problem, exp_cone_order):
"""
Returns a ParamConeProg whose problem data tensors will yield the
coefficient "A" and offset "b" for the constraint in the following
formats:
Linear equations: (A, b) such that A * x + b == 0,
Linear inequalities: (A, b) such that A * x + b >= 0,
Second order cone: (A, b) such that A * x + b in SOC,
Exponential cone: (A, b) such that A * x + b in EXP,
Semidefinite cone: (A, b) such that A * x + b in PSD,
The CVXPY standard for the exponential cone is:
K_e = closure{(x,y,z) | z >= y * exp(x/y), y>0}.
Whenever a solver uses this convention, EXP_CONE_ORDER should be
[0, 1, 2].
The CVXPY standard for the second order cone is:
SOC(n) = { x : x[0] >= norm(x[1:n], 2) }.
All currently supported solvers use this convention.
Args:
problem : ParamConeProg
The problem that is the provenance of the constraint.
exp_cone_order: list
A list indicating how the exponential cone arguments are ordered.
Returns:
ParamConeProg with structured A.
"""
# Create a matrix to reshape constraints, then replicate for each
# variable entry.
restruct_mat = [] # Form a block diagonal matrix.
for constr in problem.constraints:
total_height = sum([arg.size for arg in constr.args])
if type(constr) == Zero:
restruct_mat.append(-sp.eye(constr.size, format='csr'))
elif type(constr) == NonNeg:
restruct_mat.append(sp.eye(constr.size, format='csr'))
elif type(constr) == SOC:
# Group each t row with appropriate X rows.
assert constr.axis == 0, 'SOC must be lowered to axis == 0'
# Interleave the rows of coeffs[0] and coeffs[1]:
# coeffs[0][0, :]
# coeffs[1][0:gap-1, :]
# coeffs[0][1, :]
# coeffs[1][gap-1:2*(gap-1), :]
t_spacer = ConicSolver.get_spacing_matrix(
shape=(total_height, constr.args[0].size),
spacing=constr.args[1].shape[0],
streak=1,
num_blocks=constr.args[0].size,
offset=0,
)
X_spacer = ConicSolver.get_spacing_matrix(
shape=(total_height, constr.args[1].size),
spacing=1,
streak=constr.args[1].shape[0],
num_blocks=constr.args[0].size,
offset=1,
)
restruct_mat.append(sp.hstack([t_spacer, X_spacer]))
elif type(constr) == ExpCone:
arg_mats = []
for i, arg in enumerate(constr.args):
space_mat = ConicSolver.get_spacing_matrix(
shape=(total_height, arg.size),
spacing=len(exp_cone_order) - 1,
streak=1,
num_blocks=arg.size,
offset=exp_cone_order[i],
)
arg_mats.append(space_mat)
restruct_mat.append(sp.hstack(arg_mats))
elif type(constr) == PSD:
restruct_mat.append(self.psd_format_mat(constr))
else:
raise ValueError("Unsupported constraint type.")
# Form new ParamConeProg
if restruct_mat:
# TODO(akshayka): profile to see whether using linear operators
# or bmat is faster
restruct_mat = as_block_diag_linear_operator(restruct_mat)
# this is equivalent to but _much_ faster than:
# restruct_mat_rep = sp.block_diag([restruct_mat]*(problem.x.size + 1))
# restruct_A = restruct_mat_rep * problem.A
reshaped_A = problem.A.reshape(restruct_mat.shape[1], -1, order='F').tocsr()
restructured_A = restruct_mat(reshaped_A).tocoo()
# Because of a bug in scipy versions < 1.20, `reshape`
# can overflow if indices are int32s.
restructured_A.row = restructured_A.row.astype(np.int64)
restructured_A.col = restructured_A.col.astype(np.int64)
restructured_A = restructured_A.reshape(
restruct_mat.shape[0] * (problem.x.size + 1),
problem.A.shape[1], order='F')
else:
restructured_A = problem.A
new_param_cone_prog = ParamConeProg(problem.c,
problem.x,
restructured_A,
problem.variables,
problem.var_id_to_col,
problem.constraints,
problem.parameters,
problem.param_id_to_col,
formatted=True)
return new_param_cone_prog
def invert(self, solution, inverse_data):
"""Returns the solution to the original problem given the inverse_data.
"""
status = solution['status']
if status in s.SOLUTION_PRESENT:
opt_val = solution['value']
primal_vars = {inverse_data[self.VAR_ID]: solution['primal']}
eq_dual = utilities.get_dual_values(
solution['eq_dual'],
utilities.extract_dual_value,
inverse_data[Solver.EQ_CONSTR])
leq_dual = utilities.get_dual_values(
solution['ineq_dual'],
utilities.extract_dual_value,
inverse_data[Solver.NEQ_CONSTR])
eq_dual.update(leq_dual)
dual_vars = eq_dual
return Solution(status, opt_val, primal_vars, dual_vars, {})
else:
return failure_solution(status)
|
the-stack_0_16137 | import torch.nn as nn
import torch
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 3, 3)
self.relu1 = nn.ReLU()
self.mp1 = nn.MaxPool2d(2)
self.conv2 = nn.Conv2d(3, 3, 3)
self.relu2 = nn.ReLU()
self.mp2 = nn.MaxPool2d(2)
self.flatten = nn.Flatten()
self.fc1 = nn.Linear(108, 64)
self.fc2 = nn.Linear(64, 32)
self.fc3 = nn.Linear(32, 10)
def forward(self, x):
out = self.conv1(x)
out = self.relu1(out)
out = self.mp1(out)
out = self.conv2(out)
out = self.relu2(out)
out = self.mp2(out)
out = self.flatten(out)
out = self.fc1(out)
out = self.fc2(out)
out = self.fc3(out)
return out
if __name__ == "__main__":
model = Model()
x = torch.ones((64, 3, 32, 32))
print(model.forward(x))
|
the-stack_0_16140 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Type
from typing import Union
from types import ModuleType
from typing import TYPE_CHECKING
from libcloud.container.types import Provider
from libcloud.common.providers import get_driver as _get_provider_driver
from libcloud.common.providers import set_driver as _set_provider_driver
if TYPE_CHECKING:
# NOTE: This is needed to avoid having setup.py depend on requests
from libcloud.container.base import ContainerDriver
DRIVERS = {
Provider.DUMMY:
('libcloud.container.drivers.dummy', 'DummyContainerDriver'),
Provider.DOCKER:
('libcloud.container.drivers.docker', 'DockerContainerDriver'),
Provider.JOYENT:
('libcloud.container.drivers.joyent', 'JoyentContainerDriver'),
Provider.ECS:
('libcloud.container.drivers.ecs', 'ElasticContainerDriver'),
Provider.KUBERNETES:
('libcloud.container.drivers.kubernetes', 'KubernetesContainerDriver'),
Provider.LXD:
('libcloud.container.drivers.lxd', 'LXDContainerDriver'),
Provider.RANCHER:
('libcloud.container.drivers.rancher', 'RancherContainerDriver'),
Provider.GKE:
('libcloud.container.drivers.gke', 'GKEContainerDriver')
}
def get_driver(provider):
# type: (Union[Provider, str]) -> Type[ContainerDriver]
return _get_provider_driver(drivers=DRIVERS, provider=provider)
def set_driver(provider, module, klass):
# type: (Union[Provider, str], ModuleType, type) -> Type[ContainerDriver]
return _set_provider_driver(drivers=DRIVERS, provider=provider,
module=module, klass=klass)
|
the-stack_0_16141 | """Mock Server for simple calls the cli and public api make"""
from flask import Flask, request, g, jsonify
import os
import sys
from datetime import datetime, timedelta
import json
import yaml
import six
# HACK: restore first two entries of sys path after wandb load
save_path = sys.path[:2]
import wandb
sys.path[0:0] = save_path
import logging
from six.moves import urllib
import threading
from tests.utils.mock_requests import RequestsMock, InjectRequestsParse
def default_ctx():
return {
"fail_graphql_count": 0, # used via "fail_graphql_times"
"fail_storage_count": 0, # used via "fail_storage_times"
"rate_limited_count": 0, # used via "rate_limited_times"
"page_count": 0,
"page_times": 2,
"requested_file": "weights.h5",
"current_run": None,
"files": {},
"k8s": False,
"resume": False,
"file_bytes": {},
"manifests_created": [],
"artifacts_by_id": {},
"upsert_bucket_count": 0,
}
def mock_server(mocker):
ctx = default_ctx()
app = create_app(ctx)
mock = RequestsMock(app, ctx)
# We mock out all requests libraries, couldn't find a way to mock the core lib
sdk_path = "wandb.sdk"
mocker.patch("gql.transport.requests.requests", mock)
mocker.patch("wandb.wandb_sdk.internal.file_stream.requests", mock)
mocker.patch("wandb.wandb_sdk.internal.internal_api.requests", mock)
mocker.patch("wandb.wandb_sdk.internal.update.requests", mock)
mocker.patch("wandb.wandb_sdk.internal.sender.requests", mock)
mocker.patch("wandb.apis.internal_runqueue.requests", mock)
mocker.patch("wandb.apis.public.requests", mock)
mocker.patch("wandb.util.requests", mock)
mocker.patch("wandb.wandb_sdk.wandb_artifacts.requests", mock)
print("Patched requests everywhere", os.getpid())
return mock
def run(ctx):
if ctx["resume"]:
now = datetime.now()
created_at = (now - timedelta(days=1)).isoformat()
else:
created_at = datetime.now().isoformat()
stopped = ctx.get("stopped", False)
# for wandb_tests::wandb_restore_name_not_found
# if there is a fileName query, and this query is for nofile.h5
# return an empty file. otherwise, return the usual weights.h5
if ctx.get("graphql"):
fileNames = ctx["graphql"][-1]["variables"].get("fileNames")
else:
fileNames = None
if fileNames == ["nofile.h5"]:
fileNode = {
"id": "file123",
"name": "nofile.h5",
"sizeBytes": 0,
"md5": "0",
"url": request.url_root + "/storage?file=nofile.h5",
}
else:
fileNode = {
"id": "file123",
"name": ctx["requested_file"],
"sizeBytes": 20,
"md5": "XXX",
"url": request.url_root + "/storage?file=%s" % ctx["requested_file"],
"directUrl": request.url_root
+ "/storage?file=%s&direct=true" % ctx["requested_file"],
}
return {
"id": "test",
"name": "test",
"displayName": "beast-bug-33",
"state": "running",
"config": '{"epochs": {"value": 10}}',
"group": "A",
"jobType": "test",
"description": "",
"systemMetrics": '{"cpu": 100}',
"summaryMetrics": '{"acc": 100, "loss": 0}',
"fileCount": 1,
"history": [
'{"acc": 10, "loss": 90}',
'{"acc": 20, "loss": 80}',
'{"acc": 30, "loss": 70}',
],
"events": ['{"cpu": 10}', '{"cpu": 20}', '{"cpu": 30}'],
"files": {
# Special weights url by default, if requesting upload we set the name
"edges": [{"node": fileNode,}]
},
"sampledHistory": [[{"loss": 0, "acc": 100}, {"loss": 1, "acc": 0}]],
"shouldStop": False,
"failed": False,
"stopped": stopped,
"running": True,
"tags": [],
"notes": None,
"sweepName": None,
"createdAt": created_at,
"updatedAt": datetime.now().isoformat(),
}
def artifact(
ctx,
collection_name="mnist",
state="COMMITTED",
request_url_root="",
id_override=None,
):
_id = str(ctx["page_count"]) if id_override is None else id_override
return {
"id": _id,
"digest": "abc123",
"description": "",
"state": state,
"size": 10000,
"createdAt": datetime.now().isoformat(),
"updatedAt": datetime.now().isoformat(),
"versionIndex": ctx["page_count"],
"labels": [],
"metadata": "{}",
"aliases": [
{
"artifactCollectionName": collection_name,
"alias": "v%i" % ctx["page_count"],
}
],
"artifactSequence": {"name": collection_name,},
"currentManifest": {
"file": {
"directUrl": request_url_root
+ "/storage?file=wandb_manifest.json&id={}".format(_id)
}
},
}
def paginated(node, ctx, extra={}):
next_page = False
ctx["page_count"] += 1
if ctx["page_count"] < ctx["page_times"]:
next_page = True
edge = {"node": node, "cursor": "abc123"}
edge.update(extra)
return {
"edges": [edge],
"pageInfo": {"endCursor": "abc123", "hasNextPage": next_page},
}
class CTX(object):
"""This is a silly threadsafe wrapper for getting ctx into the server
NOTE: This will stop working for live_mock_server if we make pytest run
in parallel.
"""
lock = threading.Lock()
STATE = None
def __init__(self, ctx):
self.ctx = ctx
def get(self):
return self.ctx
def set(self, ctx):
self.ctx = ctx
CTX.persist(self)
return self.ctx
@classmethod
def persist(cls, instance):
with cls.lock:
cls.STATE = instance.ctx
@classmethod
def load(cls, default):
with cls.lock:
if cls.STATE is not None:
return CTX(cls.STATE)
else:
return CTX(default)
def get_ctx():
if "ctx" not in g:
g.ctx = CTX.load(default_ctx())
return g.ctx.get()
def set_ctx(ctx):
get_ctx()
g.ctx.set(ctx)
def _bucket_config():
return {
"commit": "HEAD",
"github": "https://github.com/vanpelt",
"config": '{"foo":{"value":"bar"}}',
"files": {
"edges": [
{
"node": {
"directUrl": request.url_root
+ "/storage?file=wandb-metadata.json",
"name": "wandb-metadata.json",
}
},
{
"node": {
"directUrl": request.url_root + "/storage?file=diff.patch",
"name": "diff.patch",
}
},
]
},
}
class HttpException(Exception):
status_code = 500
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv["error"] = self.message
return rv
def create_app(user_ctx=None):
app = Flask(__name__)
# When starting in live mode, user_ctx is a fancy object
if isinstance(user_ctx, dict):
with app.app_context():
set_ctx(user_ctx)
@app.teardown_appcontext
def persist_ctx(exc):
if "ctx" in g:
CTX.persist(g.ctx)
@app.errorhandler(HttpException)
def handle_http_exception(error):
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
@app.route("/ctx", methods=["GET", "PUT", "DELETE"])
def update_ctx():
"""Updating context for live_mock_server"""
ctx = get_ctx()
body = request.get_json()
if request.method == "GET":
return json.dumps(ctx)
elif request.method == "DELETE":
app.logger.info("reseting context")
set_ctx(default_ctx())
return json.dumps(get_ctx())
else:
ctx.update(body)
# TODO: tests in CI failed on this
set_ctx(ctx)
app.logger.info("updated context %s", ctx)
return json.dumps(get_ctx())
@app.route("/graphql", methods=["POST"])
def graphql():
# TODO: in tests wandb-username is set to the test name, lets scope ctx to it
ctx = get_ctx()
test_name = request.headers.get("X-WANDB-USERNAME")
if test_name:
app.logger.info("Test request from: %s", test_name)
app.logger.info("graphql post")
if "fail_graphql_times" in ctx:
if ctx["fail_graphql_count"] < ctx["fail_graphql_times"]:
ctx["fail_graphql_count"] += 1
return json.dumps({"errors": ["Server down"]}), 500
if "rate_limited_times" in ctx:
if ctx["rate_limited_count"] < ctx["rate_limited_times"]:
ctx["rate_limited_count"] += 1
return json.dumps({"error": "rate limit exceeded"}), 429
body = request.get_json()
app.logger.info("graphql post body: %s", body)
if body["variables"].get("run"):
ctx["current_run"] = body["variables"]["run"]
if "mutation UpsertBucket(" in body["query"]:
param_config = body["variables"].get("config")
if param_config:
ctx.setdefault("config", []).append(json.loads(param_config))
param_summary = body["variables"].get("summaryMetrics")
if param_summary:
ctx.setdefault("summary", []).append(json.loads(param_summary))
ctx["upsert_bucket_count"] += 1
if body["variables"].get("files"):
requested_file = body["variables"]["files"][0]
ctx["requested_file"] = requested_file
url = request.url_root + "/storage?file={}&run={}".format(
urllib.parse.quote(requested_file), ctx["current_run"]
)
return json.dumps(
{
"data": {
"model": {
"bucket": {
"id": "storageid",
"files": {
"uploadHeaders": [],
"edges": [
{
"node": {
"name": requested_file,
"url": url,
"directUrl": url + "&direct=true",
}
}
],
},
}
}
}
}
)
if "historyTail" in body["query"]:
if ctx["resume"] is True:
hist_tail = '["{\\"_step\\": 15, \\"acc\\": 1, \\"_runtime\\": 60}"]'
return json.dumps(
{
"data": {
"model": {
"bucket": {
"name": "test",
"displayName": "funky-town-13",
"id": "test",
"config": '{"epochs": {"value": 10}}',
"summaryMetrics": '{"acc": 10, "best_val_loss": 0.5}',
"logLineCount": 14,
"historyLineCount": 15,
"eventsLineCount": 0,
"historyTail": hist_tail,
"eventsTail": '["{\\"_runtime\\": 70}"]',
}
}
}
}
)
else:
return json.dumps({"data": {"model": {"bucket": None}}})
if "query Runs(" in body["query"]:
return json.dumps(
{
"data": {
"project": {
"runCount": 4,
"readOnly": False,
"runs": paginated(run(ctx), ctx),
}
}
}
)
if "query Run(" in body["query"]:
return json.dumps({"data": {"project": {"run": run(ctx)}}})
if "query Model(" in body["query"]:
if "project(" in body["query"]:
project_field_name = "project"
run_field_name = "run"
else:
project_field_name = "model"
run_field_name = "bucket"
if "commit" in body["query"]:
run_config = _bucket_config()
else:
run_config = run(ctx)
return json.dumps(
{"data": {project_field_name: {run_field_name: run_config}}}
)
if "query Models(" in body["query"]:
return json.dumps(
{
"data": {
"models": {
"edges": [
{
"node": {
"id": "123",
"name": "myname",
"project": "myproj",
}
}
]
}
}
}
)
if "query Projects(" in body["query"]:
return json.dumps(
{
"data": {
"models": paginated(
{
"id": "1",
"name": "test-project",
"entityName": body["variables"]["entity"],
"createdAt": "now",
"isBenchmark": False,
},
ctx,
)
}
}
)
if "query Viewer " in body["query"]:
return json.dumps(
{
"data": {
"viewer": {
"entity": "mock_server_entity",
"flags": '{"code_saving_enabled": true}',
"teams": {
"edges": [] # TODO make configurable for cli_test
},
}
}
}
)
if "query Sweep(" in body["query"]:
return json.dumps(
{
"data": {
"project": {
"sweep": {
"id": "1234",
"name": "fun-sweep-10",
"state": "running",
"bestLoss": 0.33,
"config": yaml.dump(
{"metric": {"name": "loss", "value": "minimize"}}
),
"createdAt": datetime.now().isoformat(),
"heartbeatAt": datetime.now().isoformat(),
"updatedAt": datetime.now().isoformat(),
"earlyStopJobRunning": False,
"controller": None,
"scheduler": None,
"runs": paginated(run(ctx), ctx),
}
}
}
}
)
if "mutation UpsertSweep(" in body["query"]:
return json.dumps(
{
"data": {
"upsertSweep": {
"sweep": {
"name": "test",
"project": {
"id": "1234",
"name": "test",
"entity": {"id": "1234", "name": "test"},
},
}
}
}
}
)
if "mutation CreateAgent(" in body["query"]:
return json.dumps(
{"data": {"createAgent": {"agent": {"id": "mock-server-agent-93xy",}}}}
)
if "mutation Heartbeat(" in body["query"]:
return json.dumps(
{
"data": {
"agentHeartbeat": {
"agent": {"id": "mock-server-agent-93xy",},
"commands": json.dumps(
[
{
"type": "run",
"run_id": "mocker-sweep-run-x9",
"args": {"learning_rate": {"value": 0.99124}},
}
]
),
}
}
}
)
if "mutation UpsertBucket(" in body["query"]:
response = {
"data": {
"upsertBucket": {
"bucket": {
"id": "storageid",
"name": body["variables"].get("name", "abc123"),
"displayName": "lovely-dawn-32",
"project": {
"name": "test",
"entity": {"name": "mock_server_entity"},
},
},
"inserted": ctx["resume"] is False,
}
}
}
if body["variables"].get("name") == "mocker-sweep-run-x9":
response["data"]["upsertBucket"]["bucket"][
"sweepName"
] = "test-sweep-id"
return json.dumps(response)
if "mutation DeleteRun(" in body["query"]:
return json.dumps({"data": {}})
if "mutation CreateAnonymousApiKey " in body["query"]:
return json.dumps(
{
"data": {
"createAnonymousEntity": {"apiKey": {"name": "ANONYMOOSE" * 4}}
}
}
)
if "mutation DeleteFiles(" in body["query"]:
return json.dumps({"data": {"deleteFiles": {"success": True}}})
if "mutation PrepareFiles(" in body["query"]:
nodes = []
for i, file_spec in enumerate(body["variables"]["fileSpecs"]):
url = request.url_root + "/storage?file=%s" % file_spec["name"]
nodes.append(
{
"node": {
"id": str(i),
"name": file_spec["name"],
"displayName": file_spec["name"],
"digest": "null",
"uploadUrl": url,
"uploadHeaders": "",
}
}
)
return json.dumps({"data": {"prepareFiles": {"files": {"edges": nodes}}}})
if "mutation CreateArtifact(" in body["query"]:
collection_name = body["variables"]["artifactCollectionNames"][0]
ctx["artifacts"] = ctx.get("artifacts", {})
ctx["artifacts"][collection_name] = ctx["artifacts"].get(
collection_name, []
)
ctx["artifacts"][collection_name].append(body["variables"])
_id = body.get("variables", {}).get("digest", "")
if _id != "":
ctx.get("artifacts_by_id")[_id] = body["variables"]
return {
"data": {
"createArtifact": {
"artifact": artifact(
ctx,
collection_name,
id_override=_id,
state="COMMITTED"
if "PENDING" not in collection_name
else "PENDING",
)
}
}
}
if "mutation CreateArtifactManifest(" in body["query"]:
manifest = {
"id": 1,
"type": "INCREMENTAL"
if "incremental" in body.get("variables", {}).get("name", "")
else "FULL",
"file": {
"id": 1,
"directUrl": request.url_root
+ "/storage?file=wandb_manifest.json&name={}".format(
body.get("variables", {}).get("name", "")
),
"uploadUrl": request.url_root + "/storage?file=wandb_manifest.json",
"uploadHeaders": "",
},
}
ctx["manifests_created"].append(manifest)
return {"data": {"createArtifactManifest": {"artifactManifest": manifest,}}}
if "mutation UpdateArtifactManifest(" in body["query"]:
manifest = {
"id": 1,
"type": "INCREMENTAL"
if "incremental" in body.get("variables", {}).get("name", "")
else "FULL",
"file": {
"id": 1,
"directUrl": request.url_root
+ "/storage?file=wandb_manifest.json&name={}".format(
body.get("variables", {}).get("name", "")
),
"uploadUrl": request.url_root + "/storage?file=wandb_manifest.json",
"uploadHeaders": "",
},
}
return {"data": {"updateArtifactManifest": {"artifactManifest": manifest,}}}
if "mutation CreateArtifactFiles" in body["query"]:
return {
"data": {
"files": [
{
"node": {
"id": idx,
"name": file["name"],
"uploadUrl": "",
"uploadheaders": [],
"artifact": {"id": file["artifactID"]},
}
for idx, file in enumerate(
body["variables"]["artifactFiles"]
)
}
],
}
}
if "mutation CommitArtifact(" in body["query"]:
return {
"data": {
"commitArtifact": {
"artifact": {"id": 1, "digest": "0000===================="}
}
}
}
if "mutation UseArtifact(" in body["query"]:
return {"data": {"useArtifact": {"artifact": artifact(ctx)}}}
if "query ProjectArtifactType(" in body["query"]:
return {
"data": {
"project": {
"artifactType": {
"id": "1",
"name": "dataset",
"description": "",
"createdAt": datetime.now().isoformat(),
}
}
}
}
if "query ProjectArtifacts(" in body["query"]:
return {
"data": {
"project": {
"artifactTypes": paginated(
{
"id": "1",
"name": "dataset",
"description": "",
"createdAt": datetime.now().isoformat(),
},
ctx,
)
}
}
}
if "query ProjectArtifactCollections(" in body["query"]:
return {
"data": {
"project": {
"artifactType": {
"artifactSequences": paginated(
{
"id": "1",
"name": "mnist",
"description": "",
"createdAt": datetime.now().isoformat(),
},
ctx,
)
}
}
}
}
if "query RunArtifacts(" in body["query"]:
if "inputArtifacts" in body["query"]:
key = "inputArtifacts"
else:
key = "outputArtifacts"
artifacts = paginated(artifact(ctx), ctx)
artifacts["totalCount"] = ctx["page_times"]
return {"data": {"project": {"run": {key: artifacts}}}}
if "query Artifacts(" in body["query"]:
version = "v%i" % ctx["page_count"]
artifacts = paginated(artifact(ctx), ctx, {"version": version})
artifacts["totalCount"] = ctx["page_times"]
return {
"data": {
"project": {
"artifactType": {
"artifactSequence": {
"name": "mnist",
"artifacts": artifacts,
}
}
}
}
}
if "query Artifact(" in body["query"]:
art = artifact(ctx, request_url_root=request.url_root)
if "id" in body.get("variables", {}):
art = artifact(
ctx,
request_url_root=request.url_root,
id_override=body.get("variables", {}).get("id"),
)
art["artifactType"] = {"id": 1, "name": "dataset"}
return {"data": {"artifact": art}}
# code artifacts use source-RUNID names, we return the code type
art["artifactType"] = {"id": 2, "name": "code"}
if "source" not in body["variables"]["name"]:
art["artifactType"] = {"id": 1, "name": "dataset"}
if "logged_table" in body["variables"]["name"]:
art["artifactType"] = {"id": 3, "name": "run_table"}
if "run-" in body["variables"]["name"]:
art["artifactType"] = {"id": 4, "name": "run_table"}
if "wb_validation_data" in body["variables"]["name"]:
art["artifactType"] = {"id": 4, "name": "validation_dataset"}
return {"data": {"project": {"artifact": art}}}
if "query ArtifactManifest(" in body["query"]:
art = artifact(ctx)
art["currentManifest"] = {
"id": 1,
"file": {
"id": 1,
"directUrl": request.url_root
+ "/storage?file=wandb_manifest.json&name={}".format(
body.get("variables", {}).get("name", "")
),
},
}
return {"data": {"project": {"artifact": art}}}
if "stopped" in body["query"]:
return json.dumps(
{
"data": {
"Model": {
"project": {"run": {"stopped": ctx.get("stopped", False)}}
}
}
}
)
print("MISSING QUERY, add me to tests/mock_server.py", body["query"])
error = {"message": "Not implemented in tests/mock_server.py", "body": body}
return json.dumps({"errors": [error]})
@app.route("/storage", methods=["PUT", "GET"])
def storage():
ctx = get_ctx()
if "fail_storage_times" in ctx:
if ctx["fail_storage_count"] < ctx["fail_storage_times"]:
ctx["fail_storage_count"] += 1
return json.dumps({"errors": ["Server down"]}), 500
file = request.args.get("file")
_id = request.args.get("id", "")
run = request.args.get("run", "unknown")
ctx["storage"] = ctx.get("storage", {})
ctx["storage"][run] = ctx["storage"].get(run, [])
ctx["storage"][run].append(request.args.get("file"))
size = ctx["files"].get(request.args.get("file"))
if request.method == "GET" and size:
return os.urandom(size), 200
# make sure to read the data
request.get_data()
if request.method == "PUT":
curr = ctx["file_bytes"].get(file)
if curr is None:
ctx["file_bytes"].setdefault(file, 0)
ctx["file_bytes"][file] += request.content_length
else:
ctx["file_bytes"][file] += request.content_length
if file == "wandb_manifest.json":
if _id in ctx.get("artifacts_by_id"):
art = ctx["artifacts_by_id"][_id]
if "-validation_predictions" in art["artifactCollectionNames"][0]:
return {
"version": 1,
"storagePolicy": "wandb-storage-policy-v1",
"storagePolicyConfig": {},
"contents": {
"validation_predictions.table.json": {
"digest": "3aaaaaaaaaaaaaaaaaaaaa==",
"size": 81299,
}
},
}
if "wb_validation_data" in art["artifactCollectionNames"][0]:
return {
"version": 1,
"storagePolicy": "wandb-storage-policy-v1",
"storagePolicyConfig": {},
"contents": {
"validation_data.table.json": {
"digest": "3aaaaaaaaaaaaaaaaaaaaa==",
"size": 81299,
},
"media/tables/5aac4cea.table.json": {
"digest": "3aaaaaaaaaaaaaaaaaaaaa==",
"size": 81299,
},
},
}
if request.args.get("name") == "my-test_reference_download:latest":
return {
"version": 1,
"storagePolicy": "wandb-storage-policy-v1",
"storagePolicyConfig": {},
"contents": {
"StarWars3.wav": {
"digest": "a90eb05f7aef652b3bdd957c67b7213a",
"size": 81299,
"ref": "https://wandb-artifacts-refs-public-test.s3-us-west-2.amazonaws.com/StarWars3.wav",
},
"file1.txt": {
"digest": "0000====================",
"size": 81299,
},
},
}
elif (
_id == "bb8043da7d78ff168a695cff097897d2"
or _id == "ad4d74ac0e4167c6cf4aaad9d59b9b44"
):
return {
"version": 1,
"storagePolicy": "wandb-storage-policy-v1",
"storagePolicyConfig": {},
"contents": {
"t1.table.json": {
"digest": "3aaaaaaaaaaaaaaaaaaaaa==",
"size": 81299,
}
},
}
elif _id == "b89758a7e7503bdb021e0534fe444d9a":
return {
"version": 1,
"storagePolicy": "wandb-storage-policy-v1",
"storagePolicyConfig": {},
"contents": {
"logged_table.table.json": {
"digest": "3aaaaaaaaaaaaaaaaaaaaa==",
"size": 81299,
}
},
}
elif _id == "b9a598178557aed1d89bd93ec0db989b":
return {
"version": 1,
"storagePolicy": "wandb-storage-policy-v1",
"storagePolicyConfig": {},
"contents": {
"logged_table_2.table.json": {
"digest": "3aaaaaaaaaaaaaaaaaaaaa==",
"size": 81299,
}
},
}
elif _id in [
"2d9a7e0aa8407f0730e19e5bc55c3a45",
"c541de19b18331a4a33b282fc9d42510",
"6f3d6ed5417d2955afbc73bff0ed1609",
"7d797e62834a7d72538529e91ed958e2",
"03d3e221fd4da6c5fccb1fbd75fe475e",
"464aa7e0d7c3f8230e3fe5f10464a2e6",
"8ef51aeabcfcd89b719822de64f6a8bf",
]:
return {
"version": 1,
"storagePolicy": "wandb-storage-policy-v1",
"storagePolicyConfig": {},
"contents": {
"validation_data.table.json": {
"digest": "3aaaaaaaaaaaaaaaaaaaaa==",
"size": 81299,
},
"media/tables/e14239fe.table.json": {
"digest": "3aaaaaaaaaaaaaaaaaaaaa==",
"size": 81299,
},
},
}
elif (
len(ctx.get("graphql", [])) >= 3
and ctx["graphql"][2].get("variables", {}).get("name", "") == "dummy:v0"
) or request.args.get("name") == "dummy:v0":
return {
"version": 1,
"storagePolicy": "wandb-storage-policy-v1",
"storagePolicyConfig": {},
"contents": {
"dataset.partitioned-table.json": {
"digest": "0aaaaaaaaaaaaaaaaaaaaa==",
"size": 81299,
},
"parts/1.table.json": {
"digest": "1aaaaaaaaaaaaaaaaaaaaa==",
"size": 81299,
},
"t.table.json": {
"digest": "2aaaaaaaaaaaaaaaaaaaaa==",
"size": 123,
},
},
}
else:
return {
"version": 1,
"storagePolicy": "wandb-storage-policy-v1",
"storagePolicyConfig": {},
"contents": {
"digits.h5": {
"digest": "TeSJ4xxXg0ohuL5xEdq2Ew==",
"size": 81299,
},
},
}
elif file == "wandb-metadata.json":
return {
"docker": "test/docker",
"program": "train.py",
"args": ["--test", "foo"],
"git": ctx.get("git", {}),
}
elif file == "diff.patch":
# TODO: make sure the patch is valid for windows as well,
# and un skip the test in test_cli.py
return r"""
diff --git a/patch.txt b/patch.txt
index 30d74d2..9a2c773 100644
--- a/patch.txt
+++ b/patch.txt
@@ -1 +1 @@
-test
\ No newline at end of file
+testing
\ No newline at end of file
"""
return "", 200
@app.route("/artifacts/<entity>/<digest>", methods=["GET", "POST"])
def artifact_file(entity, digest):
if entity == "entity":
if (
digest == "d1a69a69a69a69a69a69a69a69a69a69"
): # "dataset.partitioned-table.json"
return (
json.dumps({"_type": "partitioned-table", "parts_path": "parts"}),
200,
)
elif digest == "d5a69a69a69a69a69a69a69a69a69a69": # "parts/1.table.json"
return (
json.dumps(
{
"_type": "table",
"column_types": {
"params": {
"type_map": {
"A": {
"params": {
"allowed_types": [
{"wb_type": "none"},
{"wb_type": "number"},
]
},
"wb_type": "union",
},
"B": {
"params": {
"allowed_types": [
{"wb_type": "none"},
{"wb_type": "number"},
]
},
"wb_type": "union",
},
"C": {
"params": {
"allowed_types": [
{"wb_type": "none"},
{"wb_type": "number"},
]
},
"wb_type": "union",
},
}
},
"wb_type": "dictionary",
},
"columns": ["A", "B", "C"],
"data": [[0, 0, 1]],
"ncols": 3,
"nrows": 1,
}
),
200,
)
elif digest == "d9a69a69a69a69a69a69a69a69a69a69": # "t.table.json"
return (
json.dumps(
{
"_type": "table",
"column_types": {
"params": {"type_map": {}},
"wb_type": "dictionary",
},
"columns": [],
"data": [],
"ncols": 0,
"nrows": 0,
}
),
200,
)
if digest == "dda69a69a69a69a69a69a69a69a69a69":
return (
json.dumps({"_type": "table-file", "columns": [], "data": []}),
200,
)
return "ARTIFACT %s" % digest, 200
@app.route("/files/<entity>/<project>/<run>/file_stream", methods=["POST"])
def file_stream(entity, project, run):
ctx = get_ctx()
ctx["file_stream"] = ctx.get("file_stream", [])
ctx["file_stream"].append(request.get_json())
response = json.dumps({"exitcode": None, "limits": {}})
inject = InjectRequestsParse(ctx).find(request=request)
if inject:
if inject.response:
response = inject.response
if inject.http_status:
# print("INJECT", inject, inject.http_status)
raise HttpException("some error", status_code=inject.http_status)
return response
@app.route("/api/v1/namespaces/default/pods/test")
def k8s_pod():
ctx = get_ctx()
image_id = b"docker-pullable://test@sha256:1234"
ms = b'{"status":{"containerStatuses":[{"imageID":"%s"}]}}' % image_id
if ctx.get("k8s"):
return ms, 200
else:
return b"", 500
@app.route("/api/sessions")
def jupyter_sessions():
return json.dumps(
[
{
"kernel": {"id": "12345"},
"notebook": {"path": "test.ipynb", "name": "test.ipynb"},
}
]
)
@app.route("/wandb_url", methods=["PUT"])
def spell_url():
ctx = get_ctx()
ctx["spell_data"] = request.get_json()
return json.dumps({"success": True})
@app.route("/pypi/<library>/json")
def pypi(library):
version = getattr(wandb, "__hack_pypi_latest_version__", wandb.__version__)
return json.dumps(
{
"info": {"version": version},
"releases": {
"88.1.2rc2": [],
"88.1.2rc12": [],
"88.1.2rc3": [],
"88.1.2rc4": [],
"0.0.8rc6": [],
"0.0.8rc2": [],
"0.0.8rc3": [],
"0.0.8rc8": [],
"0.0.2": [{"yanked": True}],
"0.0.3": [{"yanked": True, "yanked_reason": "just cuz"}],
"0.0.7": [],
"0.0.5": [],
"0.0.6": [],
},
}
)
@app.errorhandler(404)
def page_not_found(e):
print("Got request to: %s (%s)" % (request.url, request.method))
return "Not Found", 404
return app
class ParseCTX(object):
def __init__(self, ctx):
self._ctx = ctx
def get_filestream_file_updates(self):
data = {}
file_stream_updates = self._ctx["file_stream"]
for update in file_stream_updates:
files = update.get("files")
if not files:
continue
for k, v in six.iteritems(files):
data.setdefault(k, []).append(v)
return data
def get_filestream_file_items(self):
data = {}
fs_file_updates = self.get_filestream_file_updates()
for k, v in six.iteritems(fs_file_updates):
l = []
for d in v:
offset = d.get("offset")
content = d.get("content")
assert offset is not None
assert content is not None
assert offset == 0 or offset == len(l), (k, v, l, d)
if not offset:
l = []
if k == u"output.log":
lines = [content]
else:
lines = map(json.loads, content)
l.extend(lines)
data[k] = l
return data
@property
def summary(self):
fs_files = self.get_filestream_file_items()
summary = fs_files["wandb-summary.json"][-1]
return summary
@property
def history(self):
fs_files = self.get_filestream_file_items()
history = fs_files["wandb-history.jsonl"]
return history
@property
def config(self):
return self._ctx["config"][-1]
@property
def config_wandb(self):
return self.config["_wandb"]["value"]
@property
def telemetry(self):
return self.config.get("_wandb", {}).get("value", {}).get("t")
@property
def metrics(self):
return self.config.get("_wandb", {}).get("value", {}).get("m")
@property
def manifests_created(self):
return self._ctx.get("manifests_created") or []
if __name__ == "__main__":
app = create_app()
app.logger.setLevel(logging.INFO)
app.run(debug=False, port=int(os.environ.get("PORT", 8547)))
|
the-stack_0_16143 | # Copyright 2019 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from impala.dbapi import connect
class TestHttpConnect(object):
def test_simple_connect(self):
con = connect("localhost", 28000, use_http_transport=True)
cur = con.cursor()
cur.execute('select 1')
rows = cur.fetchall()
assert rows == [(1,)]
|
the-stack_0_16144 | # -*- coding: utf-8 -*-
import datetime
import logging
import os.path
import sqlite3
import string
from nmj.tables import ALL_TABLES, DbVersion, ScanDirs, ScanSystem, ShowGroups
_LOGGER = logging.getLogger(__name__)
INDEXES = [
"CREATE INDEX IDX_PHOTOS_TITLE ON PHOTOS(TITLE ASC);",
"CREATE INDEX IDX_PHOTOS_SEARCH_TITLE ON PHOTOS(SEARCH_TITLE ASC);",
"CREATE INDEX IDX_PHOTO_ALBUMS_PHOTOS_PHOTO_ALBUMS_ID ON PHOTO_ALBUMS_PHOTOS(PHOTO_ALBUMS_ID ASC);",
"CREATE INDEX IDX_PHOTO_ALBUMS_PHOTOS_PHOTOS_ID ON PHOTO_ALBUMS_PHOTOS(PHOTOS_ID ASC);",
"CREATE INDEX IDX_PHOTO_DATE_CAPTURE_TIME ON PHOTO_DATE(CAPTURE_TIME ASC);",
"CREATE INDEX IDX_SHOWS_CONTENT_TTID ON SHOWS(CONTENT_TTID ASC);",
"CREATE INDEX IDX_SHOWS_TITLE ON SHOWS(TITLE ASC);",
"CREATE INDEX IDX_SHOWS_SEARCH_TITLE ON SHOWS(SEARCH_TITLE ASC);",
"CREATE INDEX IDX_SHOWS_YEAR ON SHOWS(YEAR ASC);",
"CREATE INDEX IDX_SHOWS_RATING ON SHOWS(RATING ASC);",
"CREATE INDEX IDX_SHOWS_PARENTAL_CONTROL ON SHOWS(PARENTAL_CONTROL ASC);",
"CREATE INDEX IDX_SONGS_TITLE ON SONGS(TITLE ASC);",
"CREATE INDEX IDX_SONGS_SEARCH_TITLE ON SONGS(SEARCH_TITLE ASC);",
"CREATE INDEX IDX_SONGS_RATING ON SONGS(RATING ASC);",
"CREATE INDEX IDX_SONGS_RELEASE_DATE ON SONGS(RELEASE_DATE ASC);",
"CREATE INDEX IDX_SONG_ALBUMS_TITLE ON SONG_ALBUMS(TITLE ASC);",
"CREATE INDEX IDX_SONG_ALBUMS_SEARCH_TITLE ON SONG_ALBUMS(SEARCH_TITLE ASC);",
"CREATE INDEX IDX_SONG_ALBUMS_RELEASE_DATE ON SONG_ALBUMS(RELEASE_DATE ASC);",
"CREATE INDEX IDX_SONG_ALBUM_SONGS_ALBUMS_ID ON SONG_ALBUMS_SONGS(ALBUMS_ID ASC);",
"CREATE INDEX IDX_SONG_ALBUM_SONGS_SONGS_ID ON SONG_ALBUMS_SONGS(SONGS_ID ASC);",
"CREATE INDEX IDX_SONG_GENRES_SONGS_GENRES_ID ON SONG_GENRES_SONGS(GENRES_ID ASC);",
"CREATE INDEX IDX_SONG_GENRES_SONGS_SONGS_ID ON SONG_GENRES_SONGS(SONGS_ID ASC);",
"CREATE INDEX IDX_SONG_GENRES_SONG_ALBUMS_ALBUMS_ID ON SONG_GENRES_SONG_ALBUMS(ALBUMS_ID ASC);",
"CREATE INDEX IDX_SONG_GENRES_SONG_ALBUMS_GENRES_ID ON SONG_GENRES_SONG_ALBUMS(GENRES_ID ASC);",
"CREATE INDEX IDX_SONG_GROUPS_SONG_ALBUMS_GROUPS_ID ON SONG_GROUPS_SONG_ALBUMS(GROUPS_ID ASC);",
"CREATE INDEX IDX_SONG_GROUPS_SONG_ALBUMS_ALBUMS_ID ON SONG_GROUPS_SONG_ALBUMS(ALBUMS_ID ASC);",
"CREATE INDEX IDX_SONG_PERSONS_SONGS_PERSONS_ID ON SONG_PERSONS_SONGS(PERSONS_ID ASC);",
"CREATE INDEX IDX_SONG_PERSONS_SONGS_SONGS_ID ON SONG_PERSONS_SONGS(SONGS_ID ASC);",
"CREATE INDEX IDX_SONG_PERSONS_SONG_ALBUMS_PERSONS_ID ON SONG_PERSONS_SONG_ALBUMS(PERSONS_ID ASC);",
"CREATE INDEX IDX_SONG_PERSONS_SONG_ALBUMS_ALBUMS_ID ON SONG_PERSONS_SONG_ALBUMS(ALBUMS_ID ASC);",
"CREATE INDEX IDX_VIDEO_SUBTITLES_VIDEOS_ID ON VIDEO_SUBTITLES(VIDEOS_ID ASC);",
]
class DBProxy(object):
isolation_level = "DEFERRED"
def __init__(self, root_path, popcorn_path=""):
self.root_path = root_path
self.popcorn_path = popcorn_path
self.media_db_path = os.path.join(root_path, "nmj_database", "media.db")
if not os.path.isfile(self.media_db_path):
self.create()
self.connection, self.cursor = self.get_connection_and_cursor()
def get_connection_and_cursor(self):
if not os.path.isdir(os.path.join(self.root_path, "nmj_database")):
os.makedirs(os.path.dirname(self.media_db_path))
connection = sqlite3.connect(self.media_db_path)
connection.isolation_level = self.isolation_level
connection.text_factory = str
cursor = connection.cursor()
return connection, cursor
def create(self):
_LOGGER.info("Creating database...")
connection, cursor = self.get_connection_and_cursor()
for table in ALL_TABLES:
_LOGGER.debug("create table %s", table)
table().create(cursor)
DbVersion.insert(cursor, version="2.0.0")
ScanDirs.insert(cursor, directory="", name=self.popcorn_path, scan_time="", size=1807172, category=3, status=3)
ScanSystem.insert(cursor, type="RUNNING_STATUS", value="0")
ScanSystem.insert(cursor, type="HISTORY_SCAN_VIDEOS", value=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), custom1="1", custom2="89", custom3="0")
for group in ["0-9",] + [letter for letter in string.ascii_uppercase]:
ShowGroups.insert(cursor, name=group, language="FR")
for request in INDEXES:
cursor.execute(request)
connection.commit()
cursor.close()
connection.close()
_LOGGER.info("Database creation done")
def contains(self, table, **kwargs):
items = self.get_tables_items(table, **kwargs)
return bool(items)
def get_first(self, table, **kwargs):
try:
return self.get_tables_items(table, **kwargs)[0]
except IndexError:
return None
def get_tables_items(self, *tables, **kwargs):
result = []
for table in tables:
try:
result += table.load(self.cursor, **kwargs)
except:
_LOGGER.exception("Getting items in table %s", table)
return result
def insert(self, table, **kwargs):
return table.insert(self.cursor, **kwargs)
def commit(self):
self.connection.commit()
def delete(self, to_remove):
to_remove.delete(self.cursor)
def update(self, table, item_id, **kwargs):
item = self.get_tables_items(table, id=item_id)[0]
item.update(self.cursor, **kwargs)
self.commit()
|
the-stack_0_16146 | #!/usr/bin/env python3
import telnetlib
import time
# yum install python3 (centos7.9)
# 要请求的IP和端口号
Host = '192.168.89.135'
Port = '22'
def do_telnet(Host, Port):
try:
tn = telnetlib.Telnet(Host, Port, timeout=5)
tn.close()
except:
return False
return True
while True:
time.sleep(5)
res = do_telnet(Host, Port)
print(str(Host) + ':' + str(Port) + ' ' + str(res))
|
the-stack_0_16152 | import json
import requests
from web import config
CATEGORY = 1 # Magic the Gathering
PAGE_LENGTH = 100 # API's max items per page limit is 100
class TCGPlayerException(Exception):
pass
class NoResults(TCGPlayerException):
pass
def _send(
method: str,
endpoint: str,
params: any = None,
data: any = None,
token: str = None
) -> any:
"""Send a request to the TCGplayer API.
:param method: HTTP method to use
:type method: str
:param endpoint: API endpoint to send request to
:type endpoint: str
:param params: URL parameters, defaults to None
:type params: any, optional
:param data: Request data, defaults to None
:type data: any, optional
:param token: Bearer token, defaults to None
:type token: str, optional
:raises TCGPlayerException: An HTTP error occurred
:return: The response returned, decoded into an object
:rtype: any
"""
headers = {}
if token:
headers['Authorization'] = f'bearer {token}'
response = requests.request(
method,
f'https://api.tcgplayer.com{endpoint}',
headers=headers,
params=params,
data=data
)
try:
response.raise_for_status()
except requests.HTTPError as e:
code = e.response.status_code
if code == 404:
raise NoResults from e
raise TCGPlayerException from e
resp = json.loads(response.text)
return resp
def _get(endpoint, **kwargs):
"""Wrapper function for sending GET requests."""
return _send('GET', endpoint, **kwargs)
def _post(endpoint, **kwargs):
"""Wrapper function for sending POST requests."""
return _send('POST', endpoint, **kwargs)
def login() -> str:
data = {
'grant_type': 'client_credentials',
'client_id': config.TCGPLAYER_PUBLICKEY,
'client_secret': config.TCGPLAYER_PRIVATEKEY
}
token = _post(
'/token',
data=data
)['access_token']
return token
def _offset(page):
"""Convert a page number into an offset."""
return (page - 1) * PAGE_LENGTH
def _all_pages(callback, **kwargs):
"""Get all pages available for a given endpoint.
:param callback: Callback function getting the pages
:type callback: function
"""
page = 1
results = []
token = login()
while True:
try:
resp = callback(page, token, **kwargs)
except NoResults:
# 404, meaning no more results
break
if len(resp) == 0:
# Backup to prevent infinite loop, in case API stops 404-ing
break
results += resp
page += 1
return results
def get_groups(page, token):
params = {
'offset': _offset(page),
'limit': PAGE_LENGTH,
}
resp = _get(
f'/catalog/categories/{CATEGORY}/groups',
params=params,
token=token
)
groups = resp['results']
return groups
def get_all_groups():
return _all_pages(get_groups)
def get_products(page, token, groupid=None):
params = {
'productTypes': 'Cards',
'categoryId': CATEGORY, # Only MTG cards
'groupid': groupid,
'offset': _offset(page),
'limit': PAGE_LENGTH,
'getExtendedFields': True
}
resp = _get(
'/catalog/products',
params=params,
token=token
)
products = resp['results']
for p in products:
# Convert extended data to dictionary
extras = {}
for data in p['extendedData']:
extras[data['name']] = data['value']
p['extendedData'] = extras
return products
def get_all_products(groupid):
return _all_pages(get_products, groupid=groupid)
def get_product_prices(products):
productids = ','.join([str(p) for p in products])
token = login()
return _get(f'/pricing/product/{productids}', token=token)['results']
|
the-stack_0_16157 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from collections import defaultdict
from . import Command
from ..benchmarks import Benchmarks
from ..machine import iter_machine_files
from ..results import iter_results_for_machine, iter_results_for_machine_and_hash
from ..runner import format_benchmark_result
from ..repo import get_repo, NoSuchNameError
from ..util import load_json
from ..console import log, color_print
from ..environment import get_environments
from .. import util
from . import common_args
class Show(Command):
@classmethod
def setup_arguments(cls, subparsers):
parser = subparsers.add_parser(
"show", help="Print recorded data.",
description="""Print saved benchmark results.""")
parser.add_argument(
'commit', nargs='?', default=None,
help="""The commit to show data for.""")
parser.add_argument(
'--details', action='store_true', default=False,
help="""Show all result details.""")
common_args.add_bench(parser)
common_args.add_machine(parser)
common_args.add_environment(parser)
parser.set_defaults(func=cls.run_from_args)
return parser
@classmethod
def run_from_conf_args(cls, conf, args, **kwargs):
return cls.run(
conf=conf, commit=args.commit, bench=args.bench,
machine=args.machine, env_spec=args.env_spec,
details=args.details, **kwargs
)
@classmethod
def run(cls, conf, commit=None, bench=None, machine=None, env_spec=None,
details=False):
if env_spec:
env_names = ([env.name for env in get_environments(conf, env_spec, verbose=False)]
+ list(env_spec))
else:
env_names = None
machines = []
for path in iter_machine_files(conf.results_dir):
d = load_json(path)
machines.append(d['machine'])
if len(machines) == 0:
raise util.UserError("No results found")
elif machine is None:
pass
elif machine in machines:
machines = [machine]
else:
raise util.UserError(
"Results for machine '{0} not found".format(machine))
benchmarks = Benchmarks.load(conf, regex=bench)
if commit is None:
cls._print_commits(conf, machines, env_names, benchmarks)
else:
cls._print_results(conf, commit, machines, env_names, benchmarks,
show_details=details)
@classmethod
def _print_commits(cls, conf, machines, env_names, benchmarks):
commits = defaultdict(lambda: {})
for machine in machines:
for result in iter_results_for_machine(
conf.results_dir, machine):
if env_names is not None and result.env_name not in env_names:
continue
if result.get_result_keys(benchmarks):
commits[(machine, result.env_name)][result.commit_hash] = result.date
log.flush()
color_print("Commits with results:")
color_print("")
for machine, env_name in sorted(commits.keys()):
color_print("Machine : {}".format(machine))
color_print("Environment: {}".format(env_name))
color_print("")
cur_commits = commits[(machine, env_name)]
commit_order = list(cur_commits.keys())
commit_order.sort(key=lambda x: cur_commits[x])
for commit in commit_order:
color_print(" {}".format(commit[:conf.hash_length]))
color_print("")
@classmethod
def _print_results(cls, conf, commit_hash, machines, env_names, benchmarks,
show_details=False):
repo = get_repo(conf)
try:
commit_hash = repo.get_hash_from_name(commit_hash)
except NoSuchNameError:
pass
def results_iter():
for machine in sorted(machines):
for result in iter_results_for_machine_and_hash(
conf.results_dir, machine, commit_hash):
if env_names is not None and result.env_name not in env_names:
continue
yield machine, result
color_print("Commit: {}".format(repo.get_decorated_hash(commit_hash,
conf.hash_length)),
"blue")
color_print("")
for machine, result in results_iter():
for name in sorted(result.get_result_keys(benchmarks)):
cls._print_benchmark(machine, result, benchmarks[name],
show_details=show_details)
@classmethod
def _print_benchmark(cls, machine, result, benchmark, show_details=False):
color_print("{} [{}/{}]".format(benchmark['name'],
machine,
result.env_name),
'green')
info, details = format_benchmark_result(result, benchmark)
color_print(" {}".format(info), 'red')
if details:
color_print(" " + details.replace("\n", "\n "))
started_at = result.started_at.get(benchmark['name'])
ended_at = result.ended_at.get(benchmark['name'])
if started_at and ended_at:
started_at = util.js_timestamp_to_datetime(started_at)
ended_at = util.js_timestamp_to_datetime(ended_at)
color_print(' started: {}, duration: {}'.format(
started_at.strftime('%Y-%m-%d %H:%M:%S'),
util.human_time((ended_at - started_at).total_seconds())))
if not show_details:
color_print("")
return
stats = result.get_result_stats(benchmark['name'], benchmark['params'])
def get_stat_info(key):
return [x.get(key) if x is not None else None for x in stats]
for key in ['repeat', 'number', 'ci_99', 'mean', 'std', 'min', 'max']:
values = get_stat_info(key)
if key == 'ci_99':
values = ["({}, {})".format(util.human_value(x[0], benchmark['unit']),
util.human_value(x[1], benchmark['unit']))
if x is not None else None
for x in values]
elif any(isinstance(x, float) for x in values):
values = [util.human_value(x, benchmark['unit']) if x is not None else None
for x in values]
if not all(x is None for x in values):
color_print(" {}: {}".format(key, ", ".join(map(str, values))))
samples = result.get_result_samples(benchmark['name'], benchmark['params'])
if not all(x is None for x in samples):
color_print(" samples: {}".format(samples))
color_print("")
|
the-stack_0_16159 | #!/usr/bin/env python
__author__ = "Mari Wahl"
__copyright__ = "Copyright 2014, The Cogent Project"
__credits__ = ["Mari Wahl"]
__license__ = "GPL"
__version__ = "4.1"
__maintainer__ = "Mari Wahl"
__email__ = "[email protected]"
from helpers import running, constants
# change here for type of net:
NETWORK_FILES = constants.NETWORK_FILES_DIR_COMMUNICATION + constants.NETWORK_FILES_UN_COMMUNICATION
TYPE_NET_DIR = "communication/"
def main():
running.sampling(NETWORK_FILES, TYPE_NET_DIR)
print("All graphs for " + TYPE_NET_DIR + " were processed. The end! \n")
if __name__ == '__main__':
main()
|
the-stack_0_16160 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from parlai.mturk.core.worlds import MTurkOnboardWorld, MTurkTaskWorld
from parlai.mturk.core.agents import (
MTURK_DISCONNECT_MESSAGE,
RETURN_MESSAGE,
TIMEOUT_MESSAGE,
)
import threading
import time
def is_disconnected(act):
return 'text' in act and act['text'] in [
MTURK_DISCONNECT_MESSAGE,
RETURN_MESSAGE,
TIMEOUT_MESSAGE,
]
class LightChatOnboardingWorld(MTurkOnboardWorld):
"""
Example onboarding world.
Sends a message from the world to the worker and then exits as complete after the
worker uses the interface
"""
instruction_act = {
'id': 'System',
'text': 'Please attempt to take a turn given the setting and persona '
'on the left. This is where your information will appear in '
'the main task.',
'task_data': {
'base_name': 'bandit',
'persona': (
"I am an unforgiving bandit. Orcs have stolen my family away,"
" and they treat me like trash. I one day want to have my "
"revenge, be it through blood or gold. I'll try to get back at"
" them any chance I get."
),
'setting': (
"You are in a bar. There isn't anything really special about "
"it, and it's relatively quiet tonight. The air is somewhat "
"tense. A sign above the bar says 'No Fighting' and something "
"tells you that rule is taken pretty seriously. "
"There is a beer here. There is an orc here. "
),
'actions': [
'wave at orc',
'steal coin purse from orc',
'hit orc',
'give beer to orc',
'hug orc',
'get beer',
],
},
}
bad_choice_act = {
'id': 'System',
'text': "Are you sure that's an appropriate action to take given your "
"persona and the current setting? Try again.",
}
too_short_act = {
'id': 'System',
'text': "Please generally speak in full sentences unless your persona "
"implies that your character isn't able to.",
}
block_act = {
'id': 'System',
'text': "Sorry, you've exceeded the maximum amount of tries to get the "
"correct actions given your persona and the setting, and thus we "
"don't believe you can complete the task correctly. Please return "
"the HIT.",
}
complete_act = {
'id': 'System',
'text': "Passed - We'll be pairing you with a partner. Hold on tight.",
}
def block_loop(self):
print('Worker {} failed onboarding'.format(self.mturk_agent.worker_id))
self.mturk_agent.observe(self.block_act)
self.mturk_agent.mturk_manager.soft_block_worker(self.mturk_agent.worker_id)
act = self.mturk_agent.act()
while not is_disconnected(act):
self.mturk_agent.observe(self.block_act)
act = self.mturk_agent.act()
return True
def parley(self):
self.turns = 0
self.mturk_agent.update_agent_id('Bandit')
self.mturk_agent.observe(self.instruction_act)
act = self.mturk_agent.act() # first attempt, turns = 0
data = act.get('task_data', {'action': None})
while data['action'] != 'steal coin purse from orc' or len(act['text']) < 4:
if self.turns >= 2: # if 3rd attempt wasn't correct, block worker
self.block_loop()
self.episodeDone = True
return
if is_disconnected(act):
self.episodeDone = True
return
if data['action'] != 'steal coin purse from orc':
self.mturk_agent.observe(self.bad_choice_act)
else:
self.mturk_agent.observe(self.too_short_act)
self.turns += 1
act = self.mturk_agent.act()
data = act.get('task_data', {'action': None})
self.mturk_agent.observe(self.complete_act)
self.mturk_agent.onboarding_turns = self.turns
self.episodeDone = True
time.sleep(3)
class LightChatTaskWorld(MTurkTaskWorld):
"""
World to demonstrate workers with assymetric roles.
This task amounts to three rounds and then an evaluation step. It is purposefully
created as a task to demo multiple views and has no other purpose.
"""
collector_agent_id = 'Moderator'
def __init__(self, opt, mturk_agents, graph, room, characters):
self.mturk_agents = mturk_agents
self.graph = graph
self.room = room
self.characters = characters
# Extract the character names
self.c_names = [characters[0][0].lower(), characters[1][0].lower()]
self.graph_copy = graph.copy()
self.mturk_agents[0].update_agent_id(self.c_names[0].capitalize())
self.mturk_agents[1].update_agent_id(self.c_names[1].capitalize())
self.episodeDone = False
self.turns = 0
self.acts = []
self.graph.freeze(True)
def get_context_actions_for(self, agent_name):
self.graph.parse_exec(agent_name, 'look')
self.graph.parse_exec(agent_name, 'inv')
context = self.graph.get_text(agent_name).rstrip('\n')
use_actions = [
'get',
'put',
'drink',
'eat',
'steal',
'hit',
'hug',
'wear',
'wield',
'drop',
'give',
'remove',
]
actions = self.graph.get_possible_actions(agent_name, use_actions=use_actions)
return context, actions
def parley(self):
if self.turns == 0:
# Settings for both
for i in [0, 1]:
agent_name = self.c_names[i]
self.graph.get_text(agent_name).rstrip('\n')
context, actions = self.get_context_actions_for(agent_name)
ad = {
'id': 'System',
'text': "Your chat partner is: {}. "
"Please chat for 8 full turns "
"while pretending to be your assigned "
"persona in the assigned setting, both "
"provided in the 'context' tab of the left panel. "
"After the first turn you will need to respond within "
"5 minutes to avoid timing out."
"If unsure what to talk about, start "
"getting to know your partner's persona, or "
"discuss the setting. Take actions when/if it "
"feels appropriate to. "
"Any other characters in the room will not interact "
"with or respond to you, so while they may be good "
"things to talk about, don't interact with them."
"You can find the original instructions on the "
"'Task Instructions' tab to the left."
"".format(self.c_names[1 - i]),
'task_data': {
'base_name': self.c_names[i],
'persona': self.characters[i][1]['personas'][0],
'setting': context,
'actions': actions,
},
}
self.mturk_agents[i].observe(ad)
if self.turns < 7:
for i in [0, 1]:
cur_agent = self.mturk_agents[i]
other_agent = self.mturk_agents[1 - i]
cur_agent_name = self.c_names[i]
other_agent_name = self.c_names[1 - i]
if self.turns == 0 and i == 0:
a = cur_agent.act()
else:
a = cur_agent.act(timeout=5 * 60)
self.acts.append(a)
if is_disconnected(a):
self.episodeDone = True
return
graph_action = a.get('task_data', {'action': ''})['action']
observe_action = {
'id': cur_agent_name.capitalize(),
'text': a['text'],
'task_data': {},
}
if graph_action.startswith('gesture'):
observe_action['task_data']['action'] = graph_action
elif graph_action != '':
# execute graph action
status, c_acts_text = self.graph.parse_exec(
cur_agent_name, graph_action
)
if status:
self.graph.update_world()
# send new setting and actions to the actor
return_act_text = self.graph.get_text(cur_agent_name).rstrip('\n')
if status:
observe_action['task_data']['action'] = self.graph.get_text(
other_agent_name
).rstrip('\n')
context, actions = self.get_context_actions_for(cur_agent_name)
reflex_action = {
'id': 'System',
'text': return_act_text,
'task_data': {'setting': context, 'actions': actions},
}
cur_agent.observe(reflex_action)
# Set the viewer context change and new actions
context, actions = self.get_context_actions_for(other_agent_name)
observe_action['task_data']['setting'] = context
observe_action['task_data']['actions'] = actions
other_agent.observe(observe_action)
self.turns += 1
else:
# evaluate
ad = {
'id': 'System',
'text': "Thank you for the talk, the chat is complete.",
}
for agent in self.mturk_agents:
agent.observe(ad)
self.episodeDone = True
def episode_done(self):
return self.episodeDone
def shutdown(self):
# Parallel shutdown of agents
def shutdown_agent(agent):
try:
agent.shutdown(timeout=None)
except Exception:
agent.shutdown() # not MTurkAgent
threads = []
for agent in self.mturk_agents:
t = threading.Thread(target=shutdown_agent, args=(agent,))
t.start()
threads.append(t)
for t in threads:
t.join()
def review_work(self):
# Can review the work here to accept or reject it
pass
def get_custom_task_data(self):
# brings important data together for the task, to later be used for
# creating the dataset. If data requires pickling, put it in a field
# called 'needs-pickle'.
return {
'acts': self.acts,
'room': self.room,
'characters': self.characters,
'needs-pickle': self.graph_copy,
}
|
the-stack_0_16161 | #!/usr/bin/env python
# Copyright 2017 Calico LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from __future__ import print_function
from optparse import OptionParser
import json
import pdb
import pickle
import os
from queue import Queue
import sys
from threading import Thread
import time
import h5py
import numpy as np
import pandas as pd
import pysam
import tensorflow as tf
if tf.__version__[0] == '1':
tf.compat.v1.enable_eager_execution()
from basenji import seqnn
from basenji import stream
from basenji import vcf as bvcf
'''
basenji_sad.py
Compute SNP Activity Difference (SAD) scores for SNPs in a VCF file.
'''
################################################################################
# main
################################################################################
def main():
usage = 'usage: %prog [options] <params_file> <model_file> <vcf_file>'
parser = OptionParser(usage)
parser.add_option('-f', dest='genome_fasta',
default='%s/data/hg38.fa' % os.environ['BASENJIDIR'],
help='Genome FASTA for sequences [Default: %default]')
parser.add_option('-n', dest='norm_file',
default=None,
help='Normalize SAD scores')
parser.add_option('-o',dest='out_dir',
default='sad',
help='Output directory for tables and plots [Default: %default]')
parser.add_option('-p', dest='processes',
default=None, type='int',
help='Number of processes, passed by multi script')
parser.add_option('--pseudo', dest='log_pseudo',
default=1, type='float',
help='Log2 pseudocount [Default: %default]')
parser.add_option('--rc', dest='rc',
default=False, action='store_true',
help='Average forward and reverse complement predictions [Default: %default]')
parser.add_option('--shifts', dest='shifts',
default='0', type='str',
help='Ensemble prediction shifts [Default: %default]')
parser.add_option('--stats', dest='sad_stats',
default='SAD',
help='Comma-separated list of stats to save. [Default: %default]')
parser.add_option('-t', dest='targets_file',
default=None, type='str',
help='File specifying target indexes and labels in table format')
parser.add_option('--ti', dest='track_indexes',
default=None, type='str',
help='Comma-separated list of target indexes to output BigWig tracks')
parser.add_option('--threads', dest='threads',
default=False, action='store_true',
help='Run CPU math and output in a separate thread [Default: %default]')
# parser.add_option('-u', dest='penultimate',
# default=False, action='store_true',
# help='Compute SED in the penultimate layer [Default: %default]')
(options, args) = parser.parse_args()
if len(args) == 3:
# single worker
params_file = args[0]
model_file = args[1]
vcf_file = args[2]
elif len(args) == 4:
# multi separate
options_pkl_file = args[0]
params_file = args[1]
model_file = args[2]
vcf_file = args[3]
# save out dir
out_dir = options.out_dir
# load options
options_pkl = open(options_pkl_file, 'rb')
options = pickle.load(options_pkl)
options_pkl.close()
# update output directory
options.out_dir = out_dir
elif len(args) == 5:
# multi worker
options_pkl_file = args[0]
params_file = args[1]
model_file = args[2]
vcf_file = args[3]
worker_index = int(args[4])
# load options
options_pkl = open(options_pkl_file, 'rb')
options = pickle.load(options_pkl)
options_pkl.close()
# update output directory
options.out_dir = '%s/job%d' % (options.out_dir, worker_index)
else:
parser.error('Must provide parameters and model files and QTL VCF file')
if not os.path.isdir(options.out_dir):
os.mkdir(options.out_dir)
if options.track_indexes is None:
options.track_indexes = []
else:
options.track_indexes = [int(ti) for ti in options.track_indexes.split(',')]
if not os.path.isdir('%s/tracks' % options.out_dir):
os.mkdir('%s/tracks' % options.out_dir)
options.shifts = [int(shift) for shift in options.shifts.split(',')]
options.sad_stats = options.sad_stats.split(',')
#################################################################
# read parameters and targets
# read model parameters
with open(params_file) as params_open:
params = json.load(params_open)
params_model = params['model']
params_train = params['train']
if options.targets_file is None:
target_slice = None
else:
targets_df = pd.read_csv(options.targets_file, sep='\t', index_col=0)
target_ids = targets_df.identifier
target_labels = targets_df.description
target_slice = targets_df.index
#################################################################
# setup model
# can we sum on GPU?
length_stats = set(['SAX','SAXR','SAR','ALT','REF'])
sum_length = length_stats.isdisjoint(set(options.sad_stats))
sum_length = False # minimal influence
seqnn_model = seqnn.SeqNN(params_model)
seqnn_model.restore(model_file)
seqnn_model.build_slice(target_slice)
if sum_length:
seqnn_model.build_sad()
seqnn_model.build_ensemble(options.rc, options.shifts)
targets_length = seqnn_model.target_lengths[0]
num_targets = seqnn_model.num_targets()
if options.targets_file is None:
target_ids = ['t%d' % ti for ti in range(num_targets)]
target_labels = ['']*len(target_ids)
#################################################################
# load SNPs
# filter for worker SNPs
if options.processes is not None:
# determine boundaries
num_snps = bvcf.vcf_count(vcf_file)
worker_bounds = np.linspace(0, num_snps, options.processes+1, dtype='int')
# read SNPs form VCF
snps = bvcf.vcf_snps(vcf_file, start_i=worker_bounds[worker_index],
end_i=worker_bounds[worker_index+1])
else:
# read SNPs form VCF
snps = bvcf.vcf_snps(vcf_file)
num_snps = len(snps)
# open genome FASTA
genome_open = pysam.Fastafile(options.genome_fasta)
def snp_gen():
for snp in snps:
# get SNP sequences
snp_1hot_list = bvcf.snp_seq1(snp, params_model['seq_length'], genome_open)
for snp_1hot in snp_1hot_list:
yield snp_1hot
#################################################################
# setup output
sad_out = initialize_output_h5(options.out_dir, options.sad_stats,
snps, target_ids, target_labels, targets_length)
if options.threads:
snp_threads = []
snp_queue = Queue()
for i in range(1):
sw = SNPWorker(snp_queue, sad_out, options.sad_stats, options.log_pseudo)
sw.start()
snp_threads.append(sw)
#################################################################
# predict SNP scores, write output
# initialize predictions stream
preds_stream = stream.PredStreamGen(seqnn_model, snp_gen(), params_train['batch_size'])
# predictions index
pi = 0
for si in range(num_snps):
# get predictions
ref_preds = preds_stream[pi]
pi += 1
alt_preds = preds_stream[pi]
pi += 1
if options.threads:
# queue SNP
snp_queue.put((ref_preds, alt_preds, si))
else:
# process SNP
if sum_length:
print('Length summed')
write_snp(ref_preds, alt_preds, sad_out, si,
options.sad_stats, options.log_pseudo)
else:
write_snp_len(ref_preds, alt_preds, sad_out, si,
options.sad_stats, options.log_pseudo)
if options.threads:
# finish queue
print('Waiting for threads to finish.', flush=True)
snp_queue.join()
# close genome
genome_open.close()
###################################################
# compute SAD distributions across variants
write_pct(sad_out, options.sad_stats)
sad_out.close()
def initialize_output_h5(out_dir, sad_stats, snps, target_ids, target_labels, targets_length):
"""Initialize an output HDF5 file for SAD stats."""
num_targets = len(target_ids)
num_snps = len(snps)
sad_out = h5py.File('%s/sad.h5' % out_dir, 'w')
# write SNPs
snp_ids = np.array([snp.rsid for snp in snps], 'S')
sad_out.create_dataset('snp', data=snp_ids)
# write SNP chr
snp_chr = np.array([snp.chr for snp in snps], 'S')
sad_out.create_dataset('chr', data=snp_chr)
# write SNP pos
snp_pos = np.array([snp.pos for snp in snps], dtype='uint32')
sad_out.create_dataset('pos', data=snp_pos)
# check flips
snp_flips = [snp.flipped for snp in snps]
# write SNP reference allele
snp_refs = []
snp_alts = []
for snp in snps:
if snp.flipped:
snp_refs.append(snp.alt_alleles[0])
snp_alts.append(snp.ref_allele)
else:
snp_refs.append(snp.ref_allele)
snp_alts.append(snp.alt_alleles[0])
snp_refs = np.array(snp_refs, 'S')
snp_alts = np.array(snp_alts, 'S')
sad_out.create_dataset('ref_allele', data=snp_refs)
sad_out.create_dataset('alt_allele', data=snp_alts)
# write targets
sad_out.create_dataset('target_ids', data=np.array(target_ids, 'S'))
sad_out.create_dataset('target_labels', data=np.array(target_labels, 'S'))
# initialize SAD stats
for sad_stat in sad_stats:
if sad_stat in ['REF','ALT']:
sad_out.create_dataset(sad_stat,
shape=(num_snps, targets_length, num_targets),
dtype='float16')
else:
sad_out.create_dataset(sad_stat,
shape=(num_snps, num_targets),
dtype='float16')
return sad_out
def write_pct(sad_out, sad_stats):
"""Compute percentile values for each target and write to HDF5."""
# define percentiles
d_fine = 0.001
d_coarse = 0.01
percentiles_neg = np.arange(d_fine, 0.1, d_fine)
percentiles_base = np.arange(0.1, 0.9, d_coarse)
percentiles_pos = np.arange(0.9, 1, d_fine)
percentiles = np.concatenate([percentiles_neg, percentiles_base, percentiles_pos])
sad_out.create_dataset('percentiles', data=percentiles)
pct_len = len(percentiles)
for sad_stat in sad_stats:
if sad_stat not in ['REF','ALT']:
sad_stat_pct = '%s_pct' % sad_stat
# compute
sad_pct = np.percentile(sad_out[sad_stat], 100*percentiles, axis=0).T
sad_pct = sad_pct.astype('float16')
# save
sad_out.create_dataset(sad_stat_pct, data=sad_pct, dtype='float16')
def write_snp(ref_preds_sum, alt_preds_sum, sad_out, si, sad_stats, log_pseudo):
"""Write SNP predictions to HDF, assuming the length dimension has
been collapsed."""
# compare reference to alternative via mean subtraction
if 'SAD' in sad_stats:
sad = alt_preds_sum - ref_preds_sum
sad_out['SAD'][si,:] = sad.astype('float16')
# compare reference to alternative via mean log division
if 'SADR' in sad_stats:
sar = np.log2(alt_preds_sum + log_pseudo) \
- np.log2(ref_preds_sum + log_pseudo)
sad_out['SADR'][si,:] = sar.astype('float16')
def write_snp_len(ref_preds, alt_preds, sad_out, si, sad_stats, log_pseudo):
"""Write SNP predictions to HDF, assuming the length dimension has
been maintained."""
ref_preds = ref_preds.astype('float64')
alt_preds = alt_preds.astype('float64')
num_targets = ref_preds.shape[-1]
# sum across length
ref_preds_sum = ref_preds.sum(axis=0)
alt_preds_sum = alt_preds.sum(axis=0)
# compare reference to alternative via mean subtraction
if 'SAD' in sad_stats:
sad = alt_preds_sum - ref_preds_sum
sad_out['SAD'][si] = sad.astype('float16')
# compare reference to alternative via max subtraction
if 'SAX' in sad_stats:
sad_vec = (alt_preds - ref_preds)
max_i = np.argmax(np.abs(sad_vec), axis=0)
sax = sad_vec[max_i, np.arange(num_targets)]
sad_out['SAX'][si] = sax.astype('float16')
# compare reference to alternative via mean log division
if 'SADR' in sad_stats:
sar = np.log2(alt_preds_sum + log_pseudo) \
- np.log2(ref_preds_sum + log_pseudo)
sad_out['SADR'][si] = sar.astype('float16')
# compare reference to alternative via max subtraction
if 'SAXR' in sad_stats:
sar_vec = np.log2(alt_preds + log_pseudo) \
- np.log2(ref_preds + log_pseudo)
max_i = np.argmax(np.abs(sar_vec), axis=0)
saxr = sar_vec[max_i, np.arange(num_targets)]
sad_out['SAXR'][si] = saxr.astype('float16')
# compare geometric means
if 'SAR' in sad_stats:
sar_vec = np.log2(alt_preds + log_pseudo) \
- np.log2(ref_preds + log_pseudo)
geo_sad = sar_vec.sum(axis=0)
sad_out['SAR'][si] = geo_sad.astype('float16')
# predictions
if 'REF' in sad_stats:
sad_out['REF'][si] = ref_preds.astype('float16')
if 'ALT' in sad_stats:
sad_out['ALT'][si] = alt_preds.astype('float16')
class SNPWorker(Thread):
"""Compute summary statistics and write to HDF."""
def __init__(self, snp_queue, sad_out, stats, log_pseudo=1):
Thread.__init__(self)
self.queue = snp_queue
self.daemon = True
self.sad_out = sad_out
self.stats = stats
self.log_pseudo = log_pseudo
def run(self):
while True:
# unload predictions
ref_preds, alt_preds, szi = self.queue.get()
# write SNP
write_snp(ref_preds, alt_preds, self.sad_out, szi, self.stats, self.log_pseudo)
if szi % 32 == 0:
gc.collect()
# communicate finished task
self.queue.task_done()
################################################################################
# __main__
################################################################################
if __name__ == '__main__':
main()
|
the-stack_0_16162 | import FWCore.ParameterSet.Config as cms
fftSimParam = cms.PSet(
NumOfFFT_Points = cms.int32(2048), # Length of signal, This should be an integer number with power of 2
SamplingRepetition = cms.int32(10) # FS: Sampling repetition per ns [1/ns]
)
TofCharge_Test = cms.PSet(
TofVector = cms.vdouble(0.0, 35.0),
ChargeVect = cms.vdouble(3*6242, 3*6242),
TestSensorSize= cms.double(0.04) # cm2
)
SiHitPulseShapeParam =cms.PSet(
HitPulseParam = cms.vdouble(0.6294422, 99.999855, 40.371655, 1.0, 3.5/2.2) # 0.6294422, 99.999855, 40.371655, 1.0, 3.5/2.2
)
SiPadFrontEndBlock0 = cms.PSet(
GoodForSizeRange = cms.vdouble(0.0,0.0255), # cm2, range from minimum size through maximum size
MaxFEOutputVoltage = cms.double(700.0), # mV
#LimmiterEdgeCorrFactor = cms.double(1.5), # unitless, by default should be 1.5
ZCComp_LowerTsh = cms.double(-5.0), # mV
ZCComp_UpperTsh = cms.double(0.0), # mV
ArmingComp_LowerTsh = cms.double(5.0), # mV
ArmingComp_UpperTsh = cms.double(20.0), # mV
TIA_Shaper_Gain = cms.double(28.0), # V/V (the amplifier gain after TIA and Shaper1)
Tia_Rf = cms.double(5.0), # kOhm
Tia_Cf = cms.double(0.25), # pF
Tia_Cin_gs = cms.double(0.4), # pf (just the TIA input Capacitance), the SiPad Capacitance will be added to this
Tia_Co = cms.double(0.4), # pf
Tia_gin = cms.double(3.0), # mS
SensorCouplingCapacitance = cms.double(315.0), # pF
SensorCapPerCm2 = cms.double(86.207), # pF/cm2
Shaper1_Tau = cms.double(0.9), # ns
CFD_Delay = cms.double(2.0), # ns
CfdShaper_Gain = cms.double(1.5), # V/V
CfdShaper_Tau = cms.double(0.25), # ns
DelayModel = cms.string('FirstOrderPadeDelay'), # 'IdealDelay' or 'FirstOrderPadeDelay'
CFD_Fraction = cms.double(0.5), # between 0-1, typically around 0.5
lpGBT_AlignerDelay = cms.double(5.2), # ns
Bx_Duration = cms.double(25.0), # ns
ToAUpperCut = cms.double(30.0), # ns // for BIB study, more than one BX should be investigated
ToALowerCut = cms.double(-30.0), # ns // for BIB study, more than one BX should be investigated
BinLength = cms.double(6.26), # ns
BinOffset = cms.double(0.0), # ns
)
SiPadFrontEndBlock1 = SiPadFrontEndBlock0.clone();
SiPadFrontEndBlock2 = SiPadFrontEndBlock0.clone();
SiPadFrontEndBlock3 = SiPadFrontEndBlock0.clone();
SiPadFrontEndBlock4 = SiPadFrontEndBlock0.clone();
SiPadFrontEndBlock5 = SiPadFrontEndBlock0.clone();
SiPadFrontEndBlock6 = SiPadFrontEndBlock0.clone();
SiPadFrontEndBlock7 = SiPadFrontEndBlock0.clone();
SiPadFrontEndBlock8 = SiPadFrontEndBlock0.clone();
#----------------------------
SiPadFrontEndBlock0.GoodForSizeRange = cms.vdouble(0.0 , 0.0255) # cm2, range from minimum size through maximum size
SiPadFrontEndBlock1.GoodForSizeRange = cms.vdouble(0.0255, 0.0335) # cm2, range from minimum size through maximum size
SiPadFrontEndBlock2.GoodForSizeRange = cms.vdouble(0.0335, 0.046) # cm2, range from minimum size through maximum size
SiPadFrontEndBlock3.GoodForSizeRange = cms.vdouble(0.046 , 0.067) # cm2, range from minimum size through maximum size
SiPadFrontEndBlock4.GoodForSizeRange = cms.vdouble(0.067 , 0.1065) # cm2, range from minimum size through maximum size
SiPadFrontEndBlock5.GoodForSizeRange = cms.vdouble(0.1065, 0.1965) # cm2, range from minimum size through maximum size
SiPadFrontEndBlock6.GoodForSizeRange = cms.vdouble(0.1965, 0.491) # cm2, range from minimum size through maximum size
SiPadFrontEndBlock7.GoodForSizeRange = cms.vdouble(0.491 , 0.866) # cm2, range from minimum size through maximum size
SiPadFrontEndBlock8.GoodForSizeRange = cms.vdouble(0.866 , 100.0) # cm2, range from minimum size through maximum size
#-------------------------
SiPadFrontEndBlock1.CFD_Fraction = cms.double(0.55)
SiPadFrontEndBlock1.lpGBT_AlignerDelay = cms.double(5.5) # ns
SiPadFrontEndBlock1.TIA_Shaper_Gain = cms.double(28.0)
SiPadFrontEndBlock2.CFD_Fraction = cms.double(0.6)
SiPadFrontEndBlock2.lpGBT_AlignerDelay = cms.double(5.8) # ns
SiPadFrontEndBlock2.TIA_Shaper_Gain = cms.double(28.0)
SiPadFrontEndBlock3.CFD_Fraction = cms.double(0.65)
SiPadFrontEndBlock3.lpGBT_AlignerDelay = cms.double(6.2) # ns
SiPadFrontEndBlock3.TIA_Shaper_Gain = cms.double(31.0)
SiPadFrontEndBlock4.CFD_Fraction = cms.double(0.7)
SiPadFrontEndBlock4.lpGBT_AlignerDelay = cms.double(6.6) # ns
SiPadFrontEndBlock4.TIA_Shaper_Gain = cms.double(36.0)
SiPadFrontEndBlock5.CFD_Fraction = cms.double(0.75)
SiPadFrontEndBlock5.lpGBT_AlignerDelay = cms.double(7.1) # ns
SiPadFrontEndBlock5.TIA_Shaper_Gain = cms.double(46.0)
SiPadFrontEndBlock6.CFD_Fraction = cms.double(0.8)
SiPadFrontEndBlock6.lpGBT_AlignerDelay = cms.double(7.8) # ns
SiPadFrontEndBlock6.TIA_Shaper_Gain = cms.double(73.0)
SiPadFrontEndBlock7.CFD_Fraction = cms.double(0.85)
SiPadFrontEndBlock7.lpGBT_AlignerDelay = cms.double(8.6) # ns
SiPadFrontEndBlock7.TIA_Shaper_Gain = cms.double(167.0)
SiPadFrontEndBlock8.CFD_Fraction = cms.double(0.88)
SiPadFrontEndBlock8.lpGBT_AlignerDelay = cms.double(9.1) # ns
SiPadFrontEndBlock8.TIA_Shaper_Gain = cms.double(234.0)
|
the-stack_0_16163 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from gaebusiness.business import CommandExecutionException
from gaecookie.decorator import no_csrf
from gaepermission.decorator import login_not_required
from tekton.gae.middleware.json_middleware import JsonResponse, JsonUnsecureResponse
from aluno_app import facade
@login_not_required
@no_csrf
def index():
cmd = facade.list_alunos_cmd()
aluno_list = cmd()
short_form = facade.aluno_short_form()
aluno_short = [short_form.fill_with_model(m) for m in aluno_list]
return JsonResponse(aluno_short)
@login_not_required
@no_csrf
def save(_resp, **aluno_properties):
cmd = facade.save_aluno_cmd(**aluno_properties)
return _save_or_update_json_response(_resp, cmd)
@login_not_required
@no_csrf
def update(_resp, id, **aluno_properties):
cmd = facade.update_aluno_cmd(id, **aluno_properties)
return _save_or_update_json_response(_resp, cmd)
@login_not_required
@no_csrf
def delete(id):
facade.delete_aluno_cmd(id)()
def _save_or_update_json_response(_resp, cmd):
try:
aluno = cmd()
except CommandExecutionException:
_resp.status_code = 500
return JsonUnsecureResponse(cmd.errors)
short_form = facade.aluno_short_form()
return JsonResponse(short_form.fill_with_model(aluno))
|
the-stack_0_16164 | import json
from flask import Flask, request
from juggler import Juggler
app = Flask(__name__)
def apiresult(fn):
def _wrapper(*args, **kw):
result = None
error = None
try:
result = fn(*args, **kw)
except Exception as exception:
error = (
type(exception).__name__,
str(exception))
return json.dumps({
'result': result,
'error': error})
_wrapper.__name__ = fn.__name__
_wrapper.__doc__ = fn.__doc__
return _wrapper
@app.route('/submit/<path:command>', methods=['GET'])
@app.route('/submit', methods=['POST'])
@apiresult
def submit(command=None):
"""
Submit a job. This can be a string, which will be split into terms by " ".
It can also be a pre-split list, which is more robust.
"""
if command is None:
command = request.json
job_id = Juggler.submit_job(command)
return job_id
@app.route('/submit_many', methods=['POST'])
@apiresult
def submit_many():
"""
Everything is fired off at once. Think of it as a shotgun.
Commands are sent in as lists of lists containing terms.
[['echo', 'before this'],
['echo', 'this may happen']]
"""
commands = request.json
job_ids = list(map(Juggler.submit_job, commands))
return job_ids
@app.route('/submit_chain', methods=['POST'])
@apiresult
def submit_chain():
"""
If submit_many is a shotgun, then this is a machine gun.
Everything is fired off in order, one by one.
Commands are sent in as lists of lists containing terms.
[['echo', 'this will happen'],
['echo', 'before this']]
"""
commands = request.json
job_ids = Juggler.submit_queue(commands)
return job_ids
@app.route('/result/<int:job_id>')
@apiresult
def get_result(job_id):
"""
Gets the result of a finished job.
If the job has not yet finished, wait for it.
"""
job_id, result = Juggler.get_result(job_id)
return result
@app.route('/status/<int:job_id>')
@app.route('/status')
@apiresult
def get_status(job_id=None):
"""
Gets the job status.
This will tell you what the command was,
and if it's still running.
"""
if job_id is None:
return dict(Juggler.get_all_statuses())
job_id, result = Juggler.get_status(job_id)
return result
if __name__ == '__main__':
app.run(debug=True)
|
the-stack_0_16166 | # -*- coding: utf-8 -*-
import autograd.numpy as np
from lifelines.utils import coalesce, _get_index, CensoringType
from lifelines.fitters import ParametricRegressionFitter
import pandas as pd
from lifelines.utils.safe_exp import safe_exp
class PiecewiseExponentialRegressionFitter(ParametricRegressionFitter):
r"""
This implements a piecewise constant-hazard model at pre-specified break points.
.. math:: h(t) = \begin{cases}
1/\lambda_0(x) & \text{if $t \le \tau_0$} \\
1/\lambda_1(x) & \text{if $\tau_0 < t \le \tau_1$} \\
1/\lambda_2(x) & \text{if $\tau_1 < t \le \tau_2$} \\
...
\end{cases}
where :math:`\lambda_i(x) = \exp{\beta_i x}`.
Parameters
-----------
breakpoints: list
a list of times when a new exponential model is constructed.
penalizer: float
penalize the variance of the :math:`\lambda_i`. See blog post below.
alpha: float, optional (default=0.05)
the level in the confidence intervals.
Examples
----------
See blog post `here <https://dataorigami.net/blogs/napkin-folding/churn>`_ and
paper replication `here <https://github.com/CamDavidsonPilon/lifelines-replications/blob/master/replications/Friedman_1982.ipynb>`_
"""
# about 50% faster than BFGS
_scipy_fit_method = "SLSQP"
_scipy_fit_options = {"ftol": 1e-6, "maxiter": 200}
def __init__(self, breakpoints, alpha=0.05, penalizer=0.0):
super(PiecewiseExponentialRegressionFitter, self).__init__(alpha=alpha)
breakpoints = np.sort(breakpoints)
if len(breakpoints) and not (breakpoints[-1] < np.inf):
raise ValueError("Do not add inf to the breakpoints.")
if len(breakpoints) and breakpoints[0] < 0:
raise ValueError("First breakpoint must be greater than 0.")
self.breakpoints = np.append(breakpoints, [np.inf])
self.n_breakpoints = len(self.breakpoints)
self.penalizer = penalizer
self._fitted_parameter_names = ["lambda_%d_" % i for i in range(self.n_breakpoints)]
def _add_penalty(self, params, neg_ll):
params_stacked = np.stack(params.values())
coef_penalty = 0
if self.penalizer > 0:
for i in range(params_stacked.shape[1]):
if not self._constant_cols[i]:
coef_penalty = coef_penalty + (params_stacked[:, i]).var()
return neg_ll + self.penalizer * coef_penalty
def _cumulative_hazard(self, params, T, Xs):
n = T.shape[0]
T = T.reshape((n, 1))
M = np.minimum(np.tile(self.breakpoints, (n, 1)), T)
M = np.hstack([M[:, tuple([0])], np.diff(M, axis=1)])
lambdas_ = np.array([safe_exp(-np.dot(Xs[param], params[param])) for param in self._fitted_parameter_names])
return (M * lambdas_.T).sum(1)
def _log_hazard(self, params, T, X):
hz = self._hazard(params, T, X)
hz = np.clip(hz, 1e-20, np.inf)
return np.log(hz)
def _prep_inputs_for_prediction_and_return_parameters(self, X):
X = X.copy()
if isinstance(X, pd.DataFrame):
X = X[self.params_["lambda_0_"].index]
return np.array([np.exp(np.dot(X, self.params_["lambda_%d_" % i])) for i in range(self.n_breakpoints)])
def predict_cumulative_hazard(self, df, times=None, conditional_after=None):
"""
Return the cumulative hazard rate of subjects in X at time points.
Parameters
----------
X: numpy array or DataFrame
a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
times: iterable, optional
an iterable of increasing times to predict the cumulative hazard at. Default
is the set of all durations (observed and unobserved). Uses a linear interpolation if
points in time are not in the index.
Returns
-------
cumulative_hazard_ : DataFrame
the cumulative hazard of individuals over the timeline
"""
if isinstance(df, pd.Series):
return self.predict_cumulative_hazard(df.to_frame().T)
if conditional_after is not None:
raise NotImplementedError()
times = np.atleast_1d(coalesce(times, self.timeline, np.unique(self.durations))).astype(float)
n = times.shape[0]
times = times.reshape((n, 1))
lambdas_ = self._prep_inputs_for_prediction_and_return_parameters(df)
bp = self.breakpoints
M = np.minimum(np.tile(bp, (n, 1)), times)
M = np.hstack([M[:, tuple([0])], np.diff(M, axis=1)])
return pd.DataFrame(np.dot(M, (1 / lambdas_)), columns=_get_index(df), index=times[:, 0])
@property
def _ll_null(self):
if hasattr(self, "_ll_null_"):
return self._ll_null_
initial_point = np.zeros(len(self._fitted_parameter_names))
model = self.__class__(breakpoints=self.breakpoints[:-1], penalizer=self.penalizer)
regressors = {param_name: ["_intercept"] for param_name in self._fitted_parameter_names}
if CensoringType.is_right_censoring(self):
df = pd.DataFrame({"T": self.durations, "E": self.event_observed, "entry": self.entry, "_intercept": 1.0})
model.fit_right_censoring(
df, "T", "E", initial_point=initial_point, entry_col="entry", regressors=regressors
)
elif CensoringType.is_interval_censoring(self):
df = pd.DataFrame(
{
"lb": self.lower_bound,
"ub": self.upper_bound,
"E": self.event_observed,
"entry": self.entry,
"_intercept": 1.0,
}
)
model.fit_interval_censoring(
df, "lb", "ub", "E", initial_point=initial_point, entry_col="entry", regressors=regressors
)
if CensoringType.is_left_censoring(self):
raise NotImplementedError()
self._ll_null_ = model.log_likelihood_
return self._ll_null_
|
the-stack_0_16167 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A utility for integration test workflow.
This script helps to update PR/Issue comments and labels during testing process.
For PR comment, this script will update (create if not exist) the "Test Result" in comment.
stage value: [start, progress, end]
USAGE:
python scripts/gha/it_workflow.py --stage <stage> \
--token ${{github.token}} \
--issue_number ${{needs.check_trigger.outputs.pr_number}}\
--actor ${{github.actor}} \
--commit ${{needs.prepare_matrix.outputs.github_ref}} \
--run_id ${{github.run_id}} \
[--new_token ${{steps.generate-token.outputs.token}}]
For Daily Report, this script will update (create if not exist) the "Test Result" in Issue
with title "Nightly Integration Testing Report" and label "nightly-testing".
stage value: [report]
USAGE:
python scripts/gha/it_workflow.py --stage report \
--token ${{github.token}} \
--actor ${{github.actor}} \
--commit ${{needs.prepare_matrix.outputs.github_ref}} \
--run_id ${{github.run_id}}
"""
import datetime
import pytz
import shutil
from absl import app
from absl import flags
from absl import logging
import github
import summarize_test_results as summarize
_REPORT_LABEL = "nightly-testing"
_REPORT_TITLE = "Nightly Integration Testing Report"
_LABEL_TRIGGER_FULL = "tests-requested: full"
_LABEL_TRIGGER_QUICK = "tests-requested: quick"
_LABEL_PROGRESS = "tests: in-progress"
_LABEL_FAILED = "tests: failed"
_LABEL_SUCCEED = "tests: succeeded"
_COMMENT_TITLE_PROGESS = "### ⏳ Integration test in progress...\n"
_COMMENT_TITLE_PROGESS_FLAKY = "### Integration test with FLAKINESS (but still ⏳ in progress)\n"
_COMMENT_TITLE_PROGESS_FAIL = "### ❌ Integration test FAILED (but still ⏳ in progress)\n"
_COMMENT_TITLE_FLAKY = "### Integration test with FLAKINESS (succeeded after retry)\n"
_COMMENT_TITLE_FAIL = "### ❌ Integration test FAILED\n"
_COMMENT_TITLE_SUCCEED = "### ✅ Integration test succeeded!\n"
_COMMENT_IDENTIFIER = "integration-test-status-comment"
_COMMENT_SUFFIX = f'\n<hidden value="{_COMMENT_IDENTIFIER}"></hidden>'
_LOG_ARTIFACT_NAME = "log-artifact"
_LOG_OUTPUT_DIR = "test_results"
_BUILD_STAGES_START = "start"
_BUILD_STAGES_PROGRESS = "progress"
_BUILD_STAGES_END = "end"
_BUILD_STAGES_REPORT = "report"
_BUILD_STAGES = [_BUILD_STAGES_START, _BUILD_STAGES_PROGRESS, _BUILD_STAGES_END, _BUILD_STAGES_REPORT]
_BUILD_AGAINST_SDK = "sdk"
_BUILD_AGAINST_REPO = "repo"
FLAGS = flags.FLAGS
flags.DEFINE_string(
"stage", None,
"Different stage while running the workflow. Valid values in _BUILD_STAGES.")
flags.DEFINE_string(
"token", None,
"github.token: A token to authenticate on your repository.")
flags.DEFINE_string(
"issue_number", None,
"Github's issue # or pull request #.")
flags.DEFINE_string(
"actor", None,
"github.actor: The login of the user that initiated the workflow run.")
flags.DEFINE_string(
"commit", None, "GitHub commit hash")
flags.DEFINE_string(
"run_id", None,
"github.run_id: A unique number for each workflow run within a repository.")
flags.DEFINE_string(
"new_token", None,
"Only used with --stage end"
"Use a different token to remove the \"in-progress\" label,"
"to allow the removal to trigger the \"Check Labels\" workflow.")
flags.DEFINE_string(
"build_against", None,
"Integration testapps could either build against packaged SDK or repo")
def test_start(token, issue_number, actor, commit, run_id):
"""In PR, when start testing, add comment and label \"tests: in-progress\""""
github.add_label(token, issue_number, _LABEL_PROGRESS)
for label in [_LABEL_TRIGGER_FULL, _LABEL_TRIGGER_QUICK, _LABEL_FAILED, _LABEL_SUCCEED]:
github.delete_label(token, issue_number, label)
comment = (_COMMENT_TITLE_PROGESS +
_get_description(actor, commit, run_id) +
_COMMENT_SUFFIX)
_update_comment(token, issue_number, comment)
def test_progress(token, issue_number, actor, commit, run_id):
"""In PR, when some test failed, update failure info and
add label \"tests: failed\""""
success_or_only_flakiness, log_summary = _get_summary_table(token, run_id)
if success_or_only_flakiness and not log_summary:
# succeeded (without flakiness)
return
else:
if success_or_only_flakiness:
# all failures/errors are due to flakiness (succeeded after retry)
title = _COMMENT_TITLE_PROGESS_FLAKY
else:
# failures/errors still exist after retry
title = _COMMENT_TITLE_PROGESS_FAIL
github.add_label(token, issue_number, _LABEL_FAILED)
comment = (title +
_get_description(actor, commit, run_id) +
log_summary +
_COMMENT_SUFFIX)
_update_comment(token, issue_number, comment)
def test_end(token, issue_number, actor, commit, run_id, new_token):
"""In PR, when some test end, update Test Result Report and
update label: add \"tests: failed\" if test failed, add label
\"tests: succeeded\" if test succeed"""
success_or_only_flakiness, log_summary = _get_summary_table(token, run_id)
if success_or_only_flakiness and not log_summary:
# succeeded (without flakiness)
github.add_label(token, issue_number, _LABEL_SUCCEED)
comment = (_COMMENT_TITLE_SUCCEED +
_get_description(actor, commit, run_id) +
_COMMENT_SUFFIX)
_update_comment(token, issue_number, comment)
else:
if success_or_only_flakiness:
# all failures/errors are due to flakiness (succeeded after retry)
title = _COMMENT_TITLE_FLAKY
github.add_label(token, issue_number, _LABEL_SUCCEED)
else:
# failures/errors still exist after retry
title = _COMMENT_TITLE_FAIL
github.add_label(token, issue_number, _LABEL_FAILED)
comment = (title +
_get_description(actor, commit, run_id) +
log_summary +
_COMMENT_SUFFIX)
_update_comment(token, issue_number, comment)
github.delete_label(new_token, issue_number, _LABEL_PROGRESS)
def test_report(token, actor, commit, run_id, build_against):
"""Update (create if not exist) a Daily Report in Issue.
The Issue with title _REPORT_TITLE and label _REPORT_LABEL:
https://github.com/firebase/firebase-unity-sdk/issues?q=is%3Aissue+label%3Anightly-testing
"""
issue_number = _get_issue_number(token, _REPORT_TITLE, _REPORT_LABEL)
previous_comment = github.get_issue_body(token, issue_number)
[previous_comment_repo, previous_comment_sdk] = previous_comment.split(_COMMENT_SUFFIX)
success_or_only_flakiness, log_summary = _get_summary_table(token, run_id)
if success_or_only_flakiness and not log_summary:
# succeeded (without flakiness)
title = _COMMENT_TITLE_SUCCEED
comment = title + _get_description(actor, commit, run_id)
else:
title = _COMMENT_TITLE_FLAKY
comment = title + _get_description(actor, commit, run_id) + log_summary
if build_against==_BUILD_AGAINST_REPO:
comment = comment + _COMMENT_SUFFIX + previous_comment_sdk
else:
comment = previous_comment_repo + _COMMENT_SUFFIX + comment
if _COMMENT_TITLE_SUCCEED:
github.close_issue(token, issue_number)
else:
github.open_issue(token, issue_number)
github.update_issue_comment(token, issue_number, comment)
def _get_issue_number(token, title, label):
issues = github.search_issues_by_label(label)
for issue in issues:
if issue["title"] == title:
return issue["number"]
return github.create_issue(token, title, label, _COMMENT_SUFFIX)["number"]
def _update_comment(token, issue_number, comment):
comment_id = _get_comment_id(token, issue_number, _COMMENT_SUFFIX)
if not comment_id:
github.add_comment(token, issue_number, comment)
else:
github.update_comment(token, comment_id, comment)
def _get_comment_id(token, issue_number, comment_identifier):
comments = github.list_comments(token, issue_number)
for comment in comments:
if comment_identifier in comment['body']:
return comment['id']
return None
def _get_description(actor, commit, run_id):
"""Test Result Report Title and description"""
return ("Requested by @%s on commit %s\n" % (actor, commit) +
"Last updated: %s \n" % _get_datetime() +
"**[View integration test log & download artifacts](https://github.com/firebase/firebase-unity-sdk/actions/runs/%s)**\n" % run_id)
def _get_datetime():
"""Date time when Test Result Report updated"""
pst_now = datetime.datetime.utcnow().astimezone(pytz.timezone("America/Los_Angeles"))
return pst_now.strftime("%a %b %e %H:%M %Z %G")
def _get_summary_table(token, run_id):
"""Test Result Report Body, which is failed test table with markdown format"""
return summarize.summarize_logs(dir=_LOG_OUTPUT_DIR, markdown=True)
def _get_artifact_id(token, run_id, name):
artifacts = github.list_artifacts(token, run_id)
for artifact in artifacts:
if artifact["name"] == name:
return artifact["id"]
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
if FLAGS.stage == _BUILD_STAGES_START:
test_start(FLAGS.token, FLAGS.issue_number, FLAGS.actor, FLAGS.commit, FLAGS.run_id)
elif FLAGS.stage == _BUILD_STAGES_PROGRESS:
test_progress(FLAGS.token, FLAGS.issue_number, FLAGS.actor, FLAGS.commit, FLAGS.run_id)
elif FLAGS.stage == _BUILD_STAGES_END:
test_end(FLAGS.token, FLAGS.issue_number, FLAGS.actor, FLAGS.commit, FLAGS.run_id, FLAGS.new_token)
elif FLAGS.stage == _BUILD_STAGES_REPORT:
test_report(FLAGS.token, FLAGS.actor, FLAGS.commit, FLAGS.run_id, FLAGS.build_against)
else:
print("Invalid stage value. Valid value: " + ",".join(_BUILD_STAGES))
if __name__ == "__main__":
flags.mark_flag_as_required("stage")
flags.mark_flag_as_required("token")
flags.mark_flag_as_required("actor")
flags.mark_flag_as_required("commit")
flags.mark_flag_as_required("run_id")
app.run(main)
|
the-stack_0_16169 | from flask_wtf.file import FileField
from wtforms import Field
from wtforms.fields import Label
from wtforms.validators import InputRequired, Optional, ValidationError
from wtforms.widgets import TextInput
from re import search
from werkzeug.utils import secure_filename
def validate_filename(form, field):
'''
Validates the file inside the field if it matches the label defined in __init__
Does not raise error if field is empty - include Optional() or InputRequired() in the form if needed
'''
if field.data:
filename = secure_filename(field.data.filename)
#raise ValidationError('{}\t{}'.format(filename, field.expect))
if filename != field.expect:
message = 'Filename must match {} exactly'.format(field.expect)
raise ValidationError(message)
class NamedFileField(FileField):
'''
A file field that checks the name of its uploaded file against an expected title.
Inspect the class functions for more details.
'''
def __init__(self, label='', validators=None, expect='', required=False, **kwargs):
'''
Initializes the NamedFileField by calling super() on the FileField
Args:
label (str): if a value is provided, it will be formatted by "label.format(expect)".
validators (list of validators): optionally add extra validators.
expect (str): the title of the file to expect. ".tab" extension is not required if "label" is not provided.
required (bool): whether this field is required or not.
If label is not provided then additional text indicating the requirement will be added here in the label.
Note:
Flask WTForm docs suggest putting super().__init__ at the beginning of the function.
But since there are some built-in modifications to label text and validators, super is put at the end instead.
'''
if label:
self.expect = expect
else:
labeltxt = 'Upload ' + expect + '.tab'
self.expect = expect + '.tab'
if required:
labeltxt += ' (required)'
else:
labeltxt += ' (optional)'
if not validators:
validators = []
if required:
validators.insert(0, InputRequired())
else:
validators.insert(0, Optional())
validators.append(validate_filename)
super(FileField, self).__init__(labeltxt, validators, **kwargs)
class FloatListField(Field):
'''
A custom field to represent a list of floating point numbers.
Attributes:
widget: a text input box that is used to enter the list of numbers.
'''
widget = TextInput()
def _value(self):
'''
Reads default value from a literal float list and returns a comma-separated string
'''
if self.data:
return ', '.join(self.data)
else:
return ''
def process_formdata(self, valuelist):
'''
Processes the entered data and saves it to self.
Called at form submission but before validation.
'''
if valuelist:
self.data = [x.strip() for x in valuelist[0].split(',')]
else:
self.data = []
def validate_float_list(form, field):
'''
An inline validator to check that the entered data is a float list.
Raises:
ValidationError: if the entered data is not a float list.
'''
for x in field.data:
try:
float(x)
except ValueError:
raise ValidationError('Must be a comma-separated list of decimal numbers') |
the-stack_0_16172 | from __future__ import print_function, division
from future import standard_library
standard_library.install_aliases()
from builtins import range
from builtins import object
import os
import pickle as pickle
import numpy as np
from DSVC import optim
class Solver(object):
"""
A Solver encapsulates all the logic necessary for training classification
models. The Solver performs stochastic gradient descent using different
update rules defined in optim.py.
The solver accepts both training and validataion data and labels so it can
periodically check classification accuracy on both training and validation
data to watch out for overfitting.
To train a model, you will first construct a Solver instance, passing the
model, dataset, and various optoins (learning rate, batch size, etc) to the
constructor. You will then call the train() method to run the optimization
procedure and train the model.
After the train() method returns, model.params will contain the parameters
that performed best on the validation set over the course of training.
In addition, the instance variable solver.loss_history will contain a list
of all losses encountered during training and the instance variables
solver.train_acc_history and solver.val_acc_history will be lists of the
accuracies of the model on the training and validation set at each epoch.
Example usage might look something like this:
data = {
'X_train': # training data
'y_train': # training labels
'X_val': # validation data
'y_val': # validation labels
}
model = MyAwesomeModel(hidden_size=100, reg=10)
solver = Solver(model, data,
update_rule='sgd',
optim_config={
'learning_rate': 1e-3,
},
lr_decay=0.95,
num_epochs=10, batch_size=100,
print_every=100)
solver.train()
A Solver works on a model object that must conform to the following API:
- model.params must be a dictionary mapping string parameter names to numpy
arrays containing parameter values.
- model.loss(X, y) must be a function that computes training-time loss and
gradients, and test-time classification scores, with the following inputs
and outputs:
Inputs:
- X: Array giving a minibatch of input data of shape (N, d_1, ..., d_k)
- y: Array of labels, of shape (N,) giving labels for X where y[i] is the
label for X[i].
Returns:
If y is None, run a test-time forward pass and return:
- scores: Array of shape (N, C) giving classification scores for X where
scores[i, c] gives the score of class c for X[i].
If y is not None, run a training time forward and backward pass and
return a tuple of:
- loss: Scalar giving the loss
- grads: Dictionary with the same keys as self.params mapping parameter
names to gradients of the loss with respect to those parameters.
"""
def __init__(self, model, data, **kwargs):
"""
Construct a new Solver instance.
Required arguments:
- model: A model object conforming to the API described above
- data: A dictionary of training and validation data containing:
'X_train': Array, shape (N_train, d_1, ..., d_k) of training images
'X_val': Array, shape (N_val, d_1, ..., d_k) of validation images
'y_train': Array, shape (N_train,) of labels for training images
'y_val': Array, shape (N_val,) of labels for validation images
Optional arguments:
- update_rule: A string giving the name of an update rule in optim.py.
Default is 'sgd'.
- optim_config: A dictionary containing hyperparameters that will be
passed to the chosen update rule. Each update rule requires different
hyperparameters (see optim.py) but all update rules require a
'learning_rate' parameter so that should always be present.
- lr_decay: A scalar for learning rate decay; after each epoch the
learning rate is multiplied by this value.
- batch_size: Size of minibatches used to compute loss and gradient
during training.
- num_epochs: The number of epochs to run for during training.
- print_every: Integer; training losses will be printed every
print_every iterations.
- verbose: Boolean; if set to false then no output will be printed
during training.
- num_train_samples: Number of training samples used to check training
accuracy; default is 1000; set to None to use entire training set.
- num_val_samples: Number of validation samples to use to check val
accuracy; default is None, which uses the entire validation set.
- checkpoint_name: If not None, then save model checkpoints here every
epoch.
"""
self.model = model
self.X_train = data['X_train']
self.y_train = data['y_train']
self.X_val = data['X_val']
self.y_val = data['y_val']
# Unpack keyword arguments
self.update_rule = kwargs.pop('update_rule', 'sgd')
self.optim_config = kwargs.pop('optim_config', {})
self.lr_decay = kwargs.pop('lr_decay', 1.0)
self.batch_size = kwargs.pop('batch_size', 100)
self.num_epochs = kwargs.pop('num_epochs', 10)
self.num_train_samples = kwargs.pop('num_train_samples', 1000)
self.num_val_samples = kwargs.pop('num_val_samples', None)
self.checkpoint_name = kwargs.pop('checkpoint_name', None)
self.print_every = kwargs.pop('print_every', 10)
self.verbose = kwargs.pop('verbose', True)
# Throw an error if there are extra keyword arguments
if len(kwargs) > 0:
extra = ', '.join('"%s"' % k for k in list(kwargs.keys()))
raise ValueError('Unrecognized arguments %s' % extra)
# Make sure the update rule exists, then replace the string
# name with the actual function
if not hasattr(optim, self.update_rule):
raise ValueError('Invalid update_rule "%s"' % self.update_rule)
self.update_rule = getattr(optim, self.update_rule)
self._reset()
def _reset(self):
"""
Set up some book-keeping variables for optimization. Don't call this
manually.
"""
# Set up some variables for book-keeping
self.epoch = 0
self.best_val_acc = 0
self.best_params = {}
self.loss_history = []
self.train_acc_history = []
self.val_acc_history = []
# Make a deep copy of the optim_config for each parameter
self.optim_configs = {}
for p in self.model.params:
d = {k: v for k, v in self.optim_config.items()}
self.optim_configs[p] = d
def _step(self):
"""
Make a single gradient update. This is called by train() and should not
be called manually.
"""
# Make a minibatch of training data
num_train = self.X_train.shape[0]
batch_mask = np.random.choice(num_train, self.batch_size)
X_batch = self.X_train[batch_mask]
y_batch = self.y_train[batch_mask]
# Compute loss and gradient
loss, grads = self.model.loss(X_batch, y_batch)
self.loss_history.append(loss)
# Perform a parameter update
for p, w in self.model.params.items():
dw = grads[p]
config = self.optim_configs[p]
next_w, next_config = self.update_rule(w, dw, config)
self.model.params[p] = next_w
self.optim_configs[p] = next_config
def _save_checkpoint(self):
if self.checkpoint_name is None: return
checkpoint = {
'model': self.model,
'update_rule': self.update_rule,
'lr_decay': self.lr_decay,
'optim_config': self.optim_config,
'batch_size': self.batch_size,
'num_train_samples': self.num_train_samples,
'num_val_samples': self.num_val_samples,
'epoch': self.epoch,
'loss_history': self.loss_history,
'train_acc_history': self.train_acc_history,
'val_acc_history': self.val_acc_history,
}
filename = '%s_epoch_%d.pkl' % (self.checkpoint_name, self.epoch)
if self.verbose:
print('Saving checkpoint to "%s"' % filename)
with open(filename, 'wb') as f:
pickle.dump(checkpoint, f)
def check_accuracy(self, X, y, num_samples=None, batch_size=100):
"""
Check accuracy of the model on the provided data.
Inputs:
- X: Array of data, of shape (N, d_1, ..., d_k)
- y: Array of labels, of shape (N,)
- num_samples: If not None, subsample the data and only test the model
on num_samples datapoints.
- batch_size: Split X and y into batches of this size to avoid using
too much memory.
Returns:
- acc: Scalar giving the fraction of instances that were correctly
classified by the model.
"""
# Maybe subsample the data
N = X.shape[0]
if num_samples is not None and N > num_samples:
mask = np.random.choice(N, num_samples)
N = num_samples
X = X[mask]
y = y[mask]
# Compute predictions in batches
num_batches = N // batch_size
if N % batch_size != 0:
num_batches += 1
y_pred = []
for i in range(num_batches):
start = i * batch_size
end = (i + 1) * batch_size
scores = self.model.loss(X[start:end])
y_pred.append(np.argmax(scores, axis=1))
y_pred = np.hstack(y_pred)
acc = np.mean(y_pred == y)
return acc
def train(self):
"""
Run optimization to train the model.
"""
num_train = self.X_train.shape[0]
iterations_per_epoch = max(num_train // self.batch_size, 1)
num_iterations = self.num_epochs * iterations_per_epoch
for t in range(num_iterations):
self._step()
# Maybe print training loss
if self.verbose and t % self.print_every == 0:
print('(Iteration %d / %d) loss: %f' % (
t + 1, num_iterations, self.loss_history[-1]))
# At the end of every epoch, increment the epoch counter and decay
# the learning rate.
epoch_end = (t + 1) % iterations_per_epoch == 0
if epoch_end:
self.epoch += 1
for k in self.optim_configs:
self.optim_configs[k]['learning_rate'] *= self.lr_decay
# Check train and val accuracy on the first iteration, the last
# iteration, and at the end of each epoch.
first_it = (t == 0)
last_it = (t == num_iterations - 1)
if first_it or last_it or epoch_end:
train_acc = self.check_accuracy(self.X_train, self.y_train,
num_samples=self.num_train_samples)
val_acc = self.check_accuracy(self.X_val, self.y_val,
num_samples=self.num_val_samples)
self.train_acc_history.append(train_acc)
self.val_acc_history.append(val_acc)
self._save_checkpoint()
if self.verbose:
print('(Epoch %d / %d) train acc: %f; val_acc: %f' % (
self.epoch, self.num_epochs, train_acc, val_acc))
# Keep track of the best model
if val_acc > self.best_val_acc:
self.best_val_acc = val_acc
self.best_params = {}
for k, v in self.model.params.items():
self.best_params[k] = v.copy()
# At the end of training swap the best params into the model
self.model.params = self.best_params
|
the-stack_0_16173 | # ckwg +29
# Copyright 2020 by Kitware, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither name of Kitware, Inc. nor the names of any contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
from kwiver.vital.algo import ComputeRefHomography
from kwiver.vital.tests.py_helpers import CommonConfigurationMixin
class SimpleComputeRefHomography(CommonConfigurationMixin, ComputeRefHomography):
"""
Implementation of ComputeRefHomography to test it
Examples:
"""
def __init__(self):
ComputeRefHomography.__init__(self)
def __vital_algorithm_register__():
from kwiver.vital.algo import algorithm_factory
# Register Algorithm
implementation_name = "SimpleComputeRefHomography"
if algorithm_factory.has_algorithm_impl_name(
SimpleComputeRefHomography.static_type_name(),
implementation_name):
return
algorithm_factory.add_algorithm( implementation_name,
"Test kwiver.vital.algo.ComputeRefHomography",
SimpleComputeRefHomography )
algorithm_factory.mark_algorithm_as_loaded( implementation_name )
|
the-stack_0_16174 | import av
import os
from collections import OrderedDict
import importlib
from .base import EncoderConfig
vencoders = OrderedDict()
aencoders = OrderedDict()
sencoders = OrderedDict()
for codec in sorted(av.codecs_available):
try:
c = av.codec.Codec(codec, "w")
except Exception:
pass
else:
if c.type == "video":
vencoders[codec] = c.long_name
if c.type == "audio":
aencoders[codec] = c.long_name
if c.type == "subtitle":
sencoders[codec] = c.long_name
def createConfigObj(codec):
if codec in encoders:
return encoders[codec]()
return EncoderConfig(codec)
encoders = {}
def scan():
_path = os.path.split(__file__)[0]
encoders.clear()
for _module in os.listdir(_path):
if _module[0] in "_." or _module in ("base.py",):
continue
if (os.path.isfile(os.path.join(_path, _module))
and _module.lower().endswith(".py")):
_module = importlib.import_module(f"{__name__}.{_module[:-3]}")
elif (os.path.isdir(os.path.join(_path, _module))
and os.path.isfile(os.path.join(_path, _module, "__init__.py"))):
_module = importlib.import_module(f"{__name__}.{_module}")
else:
continue
for _key in dir(_module):
_cls = getattr(_module, _key)
if (isinstance(_cls, type)
and issubclass(_cls, EncoderConfig)
and _cls not in (EncoderConfig,)
and hasattr(_cls, "codec")):
encoders[_cls.codec] = _cls
scan()
|
the-stack_0_16175 | """KeysightDAQ enables controlling various Keysight DAQs."""
from __future__ import print_function
from typing import List, Optional, Union
import time
from pyvisainstrument.VisaResource import VisaResource
class KeysightDAQ(VisaResource):
""" KeysightDAQ enables controlling various Keysight DAQs.
Args:
num_slots(int): Number of slots in DAQ
num_channels(int): Number of channels per slot
sc_format(str, optional): Slot+channel route format. Default is SCC
"""
def __init__(self, num_slots: int, num_channels: int, *args, sc_format: Optional[str] = None, **kwargs):
super().__init__(name='DAQ', *args, **kwargs)
self.num_slots = num_slots
self.num_channels = num_channels
self.ch_precision = sc_format.upper().count('C') if sc_format else 2
def is_channel_closed(self, channel: Union[int, str]):
""" Get if channel is closed.
Args:
channel (int): Actuator with format SCC[C]
Returns:
bool: True if channel is closed
"""
return self.query(f'ROUT:CLOS? (@{channel})') == '1'
def is_channel_open(self, channel: Union[int, str]):
""" Get if channel is open.
Args:
channel (int): Actuator with format SCC[C]
Returns:
bool: True if channel is open
"""
return not self.is_channel_closed(channel)
def open_all_channels(self, slot: Union[int, str], delay: float = 0):
""" Open all channels of a slot.
Args:
slot int: Slot (1-based)
"""
slot_start = f'{slot}{1:0{self.ch_precision}d}'
slot_end = f'{slot}{self.num_channels:0{self.ch_precision}d}'
self.write(f'ROUT:OPEN (@{slot_start}:{slot_end})')
time.sleep(delay)
def close_all_channels(self, slot: Union[int, str], delay: float = 0):
""" Close all channels of a slot.
Args:
slot (Union[int, str]): Slot (1-based)
delay (float, optional):
Delay between each channel operation.
Default is 0 - no delay
"""
# NOTE: Will continue closing one at a time due to large current draw that may result
# if done concurrently.
for i in range(self.num_channels):
ch = f'{slot}{i+1:0{self.ch_precision}d}'
self.close_channel(ch, delay)
def open_channels(self, channels: List[Union[int, str]], delay: float = 0):
""" Open specified channels.
Args:
channels ([Union[int, str]]):
Channel indices with format SCC
delay (int, optional):
Delay between each channel operation.
Default is 0 - no delay
"""
for ch in channels:
self.open_channel(ch, delay)
def close_channels(self, channels: List[Union[int, str]], delay: float = 0):
""" Close specified channels.
Args:
channels ([Union[int, str]]):
Channel indices with format SCC
delay (int, optional):
Delay between each channel operation.
Default is 0 - no delay
"""
for ch in channels:
self.close_channel(ch, delay)
def open_channel(self, channel: Union[int, str], delay: float = 0):
""" Open specified channel.
Args:
channel (Union[int, str]):
Channel index with format SCCc
delay (int, optional):
Delay after channel operation.
Default is 0 - no delay
"""
self.write(f'ROUT:OPEN (@{channel})')
time.sleep(delay)
def close_channel(self, channel: Union[int, str], delay: float = 0):
""" Close specified channel.
Args:
channel (Union[int, str]):
Channel index with format SCC
delay (int, optional):
Delay after channel operation.
Default is 0 - no delay
"""
self.write(f'ROUT:CLOS (@{channel})')
time.sleep(delay)
def measure_temperature(self, probe: str, probe_type: str, resolution: Optional[str] = None):
""" Reset, configure, and measure temperature.
Args:
probe: {FRTD | RTD | FTHermistor | THERmistor | TCouple | DEF}
probe_type:
For FRTD and RTD: Type 85
For FTHermistor and THERmistor: Type 2252, 5000, and 10,000
For TCouple:Type B, E, J, K, N, R, S, and T
resolution: Default 1 PLC
Returns:
float: temperature (°C is default unit)
"""
return float(self.query(f'MEAS:TEMP? {probe},{probe_type}'))
def measure_relative_humidity(self, probe: str, probe_type: str, resolution: Optional[str] = None):
""" Reset, configure, and measure relative humidity.
NOTE: This is not a standard SCPI command for DAQs.
Args:
probe: {FRTD | RTD | FTHermistor | THERmistor | TCouple | DEF}
probe_type:
For FRTD and RTD: Type 85
For FTHermistor and THERmistor: Type 2252, 5000, and 10,000
For TCouple:Type B, E, J, K, N, R, S, and T
resolution: Default 1 PLC
Returns:
float: rel humidity (%)
"""
return float(self.query(f'MEAS:RHumidity? {probe},{probe_type}'))
def wait_for_completion(self, timeout: float = 2):
"""Wait for physical operation to complete.
Args:
timeout (float):
Max time to wait for completion in secs.
Returns:
Exception if timeout reached
"""
done = False
wait_time = 0.0
while not done:
time.sleep(15E-3)
done_str = self.resource.query('ROUT:DONE?', delay=15E-3)
if isinstance(done_str, str) and done_str.strip().isnumeric():
done = int(done_str.strip())
wait_time += 15E-3
if wait_time >= timeout:
raise Exception('Timeout occurred waiting for route to finish.')
if __name__ == '__main__':
print('Started')
daq = KeysightDAQ(
bus_address='TCPIP::127.0.0.1::5020::SOCKET',
num_slots=3,
num_channels=20
)
daq.open(baud_rate=None, read_term='\n', write_term='\n')
daq.open_all_channels(1)
daq.open_channels([101, 103, 105])
daq.close_channels([101, 103, 105])
print("Finished")
|
the-stack_0_16178 | from RecoTracker.IterativeTracking.LowPtQuadStep_cff import *
from HIPixelTripletSeeds_cff import *
from HIPixel3PrimTracks_cfi import *
hiLowPtQuadStepClusters = cms.EDProducer("HITrackClusterRemover",
clusterLessSolution = cms.bool(True),
trajectories = cms.InputTag("hiGlobalPrimTracks"),
overrideTrkQuals = cms.InputTag('hiInitialStepSelector','hiInitialStep'),
TrackQuality = cms.string('highPurity'),
minNumberOfLayersWithMeasBeforeFiltering = cms.int32(0),
pixelClusters = cms.InputTag("siPixelClusters"),
stripClusters = cms.InputTag("siStripClusters"),
Common = cms.PSet(
maxChi2 = cms.double(9.0),
),
Strip = cms.PSet(
#Yen-Jie's mod to preserve merged clusters
maxSize = cms.uint32(2),
maxChi2 = cms.double(9.0)
)
)
# SEEDING LAYERS
# Using 4 layers layerlist
hiLowPtQuadStepSeedLayers = hiPixelLayerQuadruplets.clone()
hiLowPtQuadStepSeedLayers.BPix.skipClusters = cms.InputTag('hiLowPtQuadStepClusters')
hiLowPtQuadStepSeedLayers.FPix.skipClusters = cms.InputTag('hiLowPtQuadStepClusters')
# SEEDS
from RecoTracker.TkTrackingRegions.globalTrackingRegionWithVertices_cfi import globalTrackingRegionWithVertices as _globalTrackingRegionWithVertices
from RecoTracker.TkHitPairs.hitPairEDProducer_cfi import hitPairEDProducer as _hitPairEDProducer
from RecoPixelVertexing.PixelTriplets.pixelTripletHLTEDProducer_cfi import pixelTripletHLTEDProducer as _pixelTripletHLTEDProducer
from RecoPixelVertexing.PixelLowPtUtilities.ClusterShapeHitFilterESProducer_cfi import *
from RecoPixelVertexing.PixelLowPtUtilities.trackCleaner_cfi import *
from RecoPixelVertexing.PixelTrackFitting.pixelFitterByHelixProjections_cfi import *
from RecoHI.HiTracking.HIPixelTrackFilter_cff import *
from RecoHI.HiTracking.HITrackingRegionProducer_cfi import *
hiLowPtQuadStepTrackingRegions = _globalTrackingRegionWithVertices.clone(RegionPSet=dict(
precise = True,
useMultipleScattering = False,
useFakeVertices = False,
beamSpot = "offlineBeamSpot",
useFixedError = True,
nSigmaZ = 4.0,
sigmaZVertex = 4.0,
fixedError = 0.5,
VertexCollection = "hiSelectedPixelVertex",
ptMin = 0.3,#0.2 for pp
useFoundVertices = True,
originRadius = 0.02 #0.02 for pp
))
hiLowPtQuadStepTracksHitDoubletsCA = _hitPairEDProducer.clone(
clusterCheck = "",
seedingLayers = "hiLowPtQuadStepSeedLayers",
trackingRegions = "hiLowPtQuadStepTrackingRegions",
maxElement = 50000000,
produceIntermediateHitDoublets = True,
layerPairs = [0,1,2]
)
import RecoPixelVertexing.PixelLowPtUtilities.LowPtClusterShapeSeedComparitor_cfi
from RecoPixelVertexing.PixelTriplets.caHitQuadrupletEDProducer_cfi import caHitQuadrupletEDProducer as _caHitQuadrupletEDProducer
hiLowPtQuadStepTracksHitQuadrupletsCA = _caHitQuadrupletEDProducer.clone(
doublets = "hiLowPtQuadStepTracksHitDoubletsCA",
extraHitRPhitolerance = 0.0,
SeedComparitorPSet = RecoPixelVertexing.PixelLowPtUtilities.LowPtClusterShapeSeedComparitor_cfi.LowPtClusterShapeSeedComparitor.clone(),
maxChi2 = dict(
pt1 = 0.7, pt2 = 2,
value1 = 1000, value2 = 150,
),
useBendingCorrection = True,
fitFastCircle = True,
fitFastCircleChi2Cut = True,
CAThetaCut = 0.0017,
CAPhiCut = 0.3,
)
hiLowPtQuadStepPixelTracksFilter = hiFilter.clone(
nSigmaTipMaxTolerance = 0,
lipMax = 1.0,
tipMax = 1.0,
ptMin = 0.4, #seeding region is 0.3
)
hiLowPtQuadStepPixelTracks = cms.EDProducer("PixelTrackProducer",
passLabel = cms.string('Pixel detached tracks with vertex constraint'),
# Ordered Hits
SeedingHitSets = cms.InputTag("hiLowPtQuadStepTracksHitQuadrupletsCA"),
# Fitter
Fitter = cms.InputTag("pixelFitterByHelixProjections"),
# Filter
Filter = cms.InputTag("hiLowPtQuadStepPixelTracksFilter"),
# Cleaner
Cleaner = cms.string("trackCleaner")
)
import RecoPixelVertexing.PixelLowPtUtilities.TrackSeeds_cfi
hiLowPtQuadStepSeeds = RecoPixelVertexing.PixelLowPtUtilities.TrackSeeds_cfi.pixelTrackSeeds.clone(
InputCollection = 'hiLowPtQuadStepPixelTracks'
)
# QUALITY CUTS DURING TRACK BUILDING
import TrackingTools.TrajectoryFiltering.TrajectoryFilter_cff
hiLowPtQuadStepTrajectoryFilter = TrackingTools.TrajectoryFiltering.TrajectoryFilter_cff.CkfBaseTrajectoryFilter_block.clone(
#maxLostHits = 1,
minimumNumberOfHits = 3,#3 for pp
minPt = cms.double(0.075),# 0.075 for pp
#constantValueForLostHitsFractionFilter = cms.double(0.701)
)
import TrackingTools.KalmanUpdators.Chi2MeasurementEstimator_cfi
hiLowPtQuadStepChi2Est = TrackingTools.KalmanUpdators.Chi2MeasurementEstimator_cfi.Chi2MeasurementEstimator.clone(
ComponentName = cms.string('hiLowPtQuadStepChi2Est'),
nSigma = cms.double(3.0),
MaxChi2 = cms.double(9.0)
)
# TRACK BUILDING
import RecoTracker.CkfPattern.GroupedCkfTrajectoryBuilder_cfi
hiLowPtQuadStepTrajectoryBuilder = RecoTracker.CkfPattern.GroupedCkfTrajectoryBuilder_cfi.GroupedCkfTrajectoryBuilder.clone(
MeasurementTrackerName = '',
trajectoryFilter = cms.PSet(refToPSet_ = cms.string('hiLowPtQuadStepTrajectoryFilter')),
maxCand = 4,#4 for pp
estimator = cms.string('hiLowPtQuadStepChi2Est'),
maxDPhiForLooperReconstruction = cms.double(2.0),#2.0 for pp
# 0.63 GeV is the maximum pT for a charged particle to loop within the 1.1m radius
# of the outermost Tracker barrel layer (B=3.8T)
maxPtForLooperReconstruction = cms.double(0.7),# 0.7 for pp
alwaysUseInvalidHits = cms.bool(False)
)
# MAKING OF TRACK CANDIDATES
# Trajectory cleaner in default
import RecoTracker.CkfPattern.CkfTrackCandidates_cfi
hiLowPtQuadStepTrackCandidates = RecoTracker.CkfPattern.CkfTrackCandidates_cfi.ckfTrackCandidates.clone(
src = cms.InputTag('hiLowPtQuadStepSeeds'),
### these two parameters are relevant only for the CachingSeedCleanerBySharedInput
numHitsForSeedCleaner = cms.int32(50),
onlyPixelHitsForSeedCleaner = cms.bool(True),
TrajectoryBuilderPSet = cms.PSet(refToPSet_ = cms.string('hiLowPtQuadStepTrajectoryBuilder')),
TrajectoryBuilder = cms.string('hiLowPtQuadStepTrajectoryBuilder'),
clustersToSkip = cms.InputTag('hiLowPtQuadStepClusters'),
doSeedingRegionRebuilding = True,
useHitsSplitting = True
)
# TRACK FITTING
import RecoTracker.TrackProducer.TrackProducer_cfi
hiLowPtQuadStepTracks = RecoTracker.TrackProducer.TrackProducer_cfi.TrackProducer.clone(
src = 'hiLowPtQuadStepTrackCandidates',
AlgorithmName = cms.string('lowPtQuadStep'),
Fitter=cms.string('FlexibleKFFittingSmoother')
)
# Final selection
import RecoHI.HiTracking.hiMultiTrackSelector_cfi
hiLowPtQuadStepSelector = RecoHI.HiTracking.hiMultiTrackSelector_cfi.hiMultiTrackSelector.clone(
src='hiLowPtQuadStepTracks',
useAnyMVA = cms.bool(True),
GBRForestLabel = cms.string('HIMVASelectorIter8'),#FIXME MVA for new iteration
GBRForestVars = cms.vstring(['chi2perdofperlayer', 'nhits', 'nlayers', 'eta']),
trackSelectors= cms.VPSet(
RecoHI.HiTracking.hiMultiTrackSelector_cfi.hiLooseMTS.clone(
name = 'hiLowPtQuadStepLoose',
applyAdaptedPVCuts = cms.bool(False),
useMVA = cms.bool(False),
), #end of pset
RecoHI.HiTracking.hiMultiTrackSelector_cfi.hiTightMTS.clone(
name = 'hiLowPtQuadStepTight',
preFilterName = 'hiLowPtQuadStepLoose',
applyAdaptedPVCuts = cms.bool(False),
useMVA = cms.bool(True),
minMVA = cms.double(-0.2)
),
RecoHI.HiTracking.hiMultiTrackSelector_cfi.hiHighpurityMTS.clone(
name = 'hiLowPtQuadStep',
preFilterName = 'hiLowPtQuadStepTight',
applyAdaptedPVCuts = cms.bool(False),
useMVA = cms.bool(True),
minMVA = cms.double(-0.09)
),
) #end of vpset
) #end of clone
from Configuration.Eras.Modifier_trackingPhase1_cff import trackingPhase1
trackingPhase1.toModify(hiLowPtQuadStepSelector, useAnyMVA = cms.bool(False))
trackingPhase1.toModify(hiLowPtQuadStepSelector, trackSelectors= cms.VPSet(
RecoHI.HiTracking.hiMultiTrackSelector_cfi.hiLooseMTS.clone(
name = 'hiLowPtQuadStepLoose',
applyAdaptedPVCuts = cms.bool(False),
useMVA = cms.bool(False),
), #end of pset
RecoHI.HiTracking.hiMultiTrackSelector_cfi.hiTightMTS.clone(
name = 'hiLowPtQuadStepTight',
preFilterName = 'hiLowPtQuadStepLoose',
applyAdaptedPVCuts = cms.bool(False),
useMVA = cms.bool(False),
minMVA = cms.double(-0.2)
),
RecoHI.HiTracking.hiMultiTrackSelector_cfi.hiHighpurityMTS.clone(
name = 'hiLowPtQuadStep',
preFilterName = 'hiLowPtQuadStepTight',
applyAdaptedPVCuts = cms.bool(False),
useMVA = cms.bool(False),
minMVA = cms.double(-0.09)
),
) #end of vpset
)
import RecoTracker.FinalTrackSelectors.trackListMerger_cfi
hiLowPtQuadStepQual = RecoTracker.FinalTrackSelectors.trackListMerger_cfi.trackListMerger.clone(
TrackProducers=cms.VInputTag(cms.InputTag('hiLowPtQuadStepTracks')),
hasSelector=cms.vint32(1),
selectedTrackQuals = cms.VInputTag(cms.InputTag("hiLowPtQuadStepSelector","hiLowPtQuadStep")),
copyExtras = True,
makeReKeyedSeeds = cms.untracked.bool(False),
)
hiLowPtQuadStep = cms.Sequence(hiLowPtQuadStepClusters*
hiLowPtQuadStepSeedLayers*
hiLowPtQuadStepTrackingRegions*
hiLowPtQuadStepTracksHitDoubletsCA*
hiLowPtQuadStepTracksHitQuadrupletsCA*
pixelFitterByHelixProjections*
hiLowPtQuadStepPixelTracksFilter*
hiLowPtQuadStepPixelTracks*
hiLowPtQuadStepSeeds*
hiLowPtQuadStepTrackCandidates*
hiLowPtQuadStepTracks*
hiLowPtQuadStepSelector*
hiLowPtQuadStepQual)
|
the-stack_0_16180 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""A `Network` is way to compose layers: the topological form of a `Model`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import itertools
import json
import os
from six.moves import zip # pylint: disable=redefined-builtin
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import context
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import backend
from tensorflow.python.keras import saving
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.engine import training_utils
from tensorflow.python.keras.mixed_precision.experimental import policy
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.keras.utils.io_utils import ask_to_proceed_with_overwrite
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.training.tracking import data_structures
from tensorflow.python.training.tracking import layer_utils as trackable_layer_utils
from tensorflow.python.training.tracking import util as trackable_utils
from tensorflow.python.util import nest
from tensorflow.python.util import serialization
from tensorflow.python.util import tf_inspect
# pylint: disable=g-import-not-at-top
try:
import h5py
except ImportError:
h5py = None
try:
import yaml
except ImportError:
yaml = None
# pylint: enable=g-import-not-at-top
class Network(base_layer.Layer):
"""A `Network` is a composition of layers.
`Network` is the topological form of a "model". A `Model`
is simply a `Network` with added training routines.
Two types of `Networks` exist: Graph Networks and Subclass Networks. Graph
networks are used in the Keras Functional and Sequential APIs. Subclassed
networks are used when a user subclasses the `Model` class. In general,
more Keras features are supported with Graph Networks than with Subclassed
Networks, specifically:
- Model cloning (`keras.models.clone`)
- Serialization (`model.get_config()/from_config`, `model.to_json()/to_yaml()`
- Whole-model saving (`model.save()`)
A Graph Network can be instantiated by passing two arguments to `__init__`.
The first argument is the `keras.Input` Tensors that represent the inputs
to the Network. The second argument specifies the output Tensors that
represent the outputs of this Network. Both arguments can be a nested
structure of Tensors.
Example:
```
inputs = {'x1': keras.Input(shape=(10,)), 'x2': keras.Input(shape=(1,))}
t = keras.layers.Dense(1, activation='relu')(inputs['x1'])
outputs = keras.layers.Add()([t, inputs['x2'])
network = Network(inputs, outputs)
```
A Graph Network constructed using the Functional API can also include raw
TensorFlow functions, with the exception of functions that create Variables
or assign ops.
Example:
```
inputs = keras.Input(shape=(10,))
x = keras.layers.Dense(1)(inputs)
outputs = tf.nn.relu(x)
network = Network(inputs, outputs)
```
Subclassed Networks can be instantiated via `name` and (optional) `dynamic`
keyword arguments. Subclassed Networks keep track of their Layers, and their
`call` method can be overridden. Subclassed Networks are typically created
indirectly, by subclassing the `Model` class.
Example:
```
class MyModel(keras.Model):
def __init__(self):
super(MyModel, self).__init__(name='my_model', dynamic=False)
self.layer1 = keras.layers.Dense(10, activation='relu')
def call(self, inputs):
return self.layer1(inputs)
```
"""
# See tf.Module for the usage of this property.
# The key of _layer_call_argspecs is a layer. tf.Module._flatten will fail to
# flatten the key since it is trying to convert Trackable/Layer to a string.
_TF_MODULE_IGNORED_PROPERTIES = frozenset(itertools.chain(
('_layer_call_argspecs',),
base_layer.Layer._TF_MODULE_IGNORED_PROPERTIES
))
def __init__(self, *args, **kwargs): # pylint: disable=super-init-not-called
# Signature detection
if (len(args) == 2 or
len(args) == 1 and 'outputs' in kwargs or
'inputs' in kwargs and 'outputs' in kwargs):
# Graph network
self._init_graph_network(*args, **kwargs)
else:
# Subclassed network
self._init_subclassed_network(**kwargs)
tf_utils.assert_no_legacy_layers(self.layers)
# Several Network methods have "no_automatic_dependency_tracking"
# annotations. Since Network does automatic dependency tracking on attribute
# assignment, including for common data structures such as lists, by default
# we'd have quite a few empty dependencies which users don't care about (or
# would need some way to ignore dependencies automatically, which is confusing
# when applied to user code). Some attributes, such as _layers, would cause
# structural issues (_layers being the place where Layers assigned to tracked
# attributes are stored).
#
# Aside from these aesthetic and structural issues, useless dependencies on
# empty lists shouldn't cause issues; adding or removing them will not break
# checkpoints, but may cause "all Python objects matched" assertions to fail
# (in which case less strict assertions may be substituted if necessary).
@trackable.no_automatic_dependency_tracking
def _base_init(self, name=None):
# The following are implemented as property functions:
# self.trainable_weights
# self.non_trainable_weights
# self.input_spec
# self.losses
# self.updates
self._init_set_name(name, zero_based=True)
self._activity_regularizer = None
# This acts just like the `trainable` attribute of any layer instance.
# It does not affect users of the underlying layers, only users of the
# Network instance.
self.trainable = True
self._is_compiled = False
self._expects_training_arg = False
# This is True for Sequential networks and Functional networks.
self._compute_output_and_mask_jointly = False
self.supports_masking = False
if not hasattr(self, 'optimizer'):
# Don't reset optimizer if already set.
self.optimizer = None
# Private attributes to implement compatibility with Layer.
self._maybe_create_attribute('_trainable_weights', [])
self._maybe_create_attribute('_non_trainable_weights', [])
self._updates = [] # Used in symbolic mode only.
self._losses = []
self._eager_losses = []
self._callable_losses = []
# A list of metric instances corresponding to the symbolic metric tensors
# added using the `add_metric` API.
self._metrics = []
# A dictionary that maps metric names to metric result tensors.
self._metrics_tensors = {}
self._scope = None # Never used.
self._reuse = None # Never used.
if context.executing_eagerly():
self._graph = None
else:
self._graph = ops.get_default_graph() # Used in symbolic mode only.
# A Network does not create weights of its own, thus has no dtype.
self._dtype = None
# All layers in order of horizontal graph traversal.
# Entries are unique. Includes input and output layers.
self._maybe_create_attribute('_layers', [])
# Used in symbolic mode only, only in conjunction with graph-networks
self._outbound_nodes = []
self._inbound_nodes = []
self._trackable_saver = (
trackable_utils.saver_with_op_caching(self))
# Networks do not need to do any casting of inputs or variables, because
# each of its layers will handle casting through the layer's own
# implementation. Therefore networks use the 'infer' policy, which does no
# casting.
self._mixed_precision_policy = policy.Policy('infer')
@trackable.no_automatic_dependency_tracking
def _init_graph_network(self, inputs, outputs, name=None):
self._call_convention = (base_layer_utils
.CallConvention.EXPLICIT_INPUTS_ARGUMENT)
# Normalize and set self.inputs, self.outputs.
if isinstance(inputs, list) and len(nest.flatten(inputs)) == 1:
inputs = inputs[0]
if isinstance(outputs, list) and len(nest.flatten(outputs)) == 1:
outputs = outputs[0]
self._nested_outputs = outputs
self._nested_inputs = inputs
self.inputs = nest.flatten(inputs)
self.outputs = nest.flatten(outputs)
if any(not hasattr(tensor, '_keras_history') for tensor in self.outputs):
base_layer_utils.create_keras_history(self._nested_outputs)
self._base_init(name=name)
self._validate_graph_inputs_and_outputs()
self._compute_previous_mask = (
'mask' in tf_inspect.getfullargspec(self.call).args or
hasattr(self, 'compute_mask'))
# A Network does not create weights of its own, thus it is already
# built.
self.built = True
self._compute_output_and_mask_jointly = True
self._is_graph_network = True
self._dynamic = False
# `_expects_training_arg` is True since the `training` argument is always
# present in the signature of the `call` method of a graph network.
self._expects_training_arg = True
self._input_layers = []
self._output_layers = []
self._input_coordinates = []
self._output_coordinates = []
# This is for performance optimization when calling the Network on new
# inputs. Every time the Network is called on a set on input tensors,
# we compute the output tensors, output masks and output shapes in one pass,
# then cache them here. When any of these outputs is queried later, we
# retrieve it from there instead of recomputing it.
self._output_mask_cache = {}
self._output_tensor_cache = {}
self._output_shape_cache = {}
# Build self._output_layers:
for x in self.outputs:
layer, node_index, tensor_index = x._keras_history # pylint: disable=protected-access
self._output_layers.append(layer)
self._output_coordinates.append((layer, node_index, tensor_index))
# Build self._input_layers:
for x in self.inputs:
layer, node_index, tensor_index = x._keras_history # pylint: disable=protected-access
# It's supposed to be an input layer, so only one node
# and one tensor output.
assert node_index == 0
assert tensor_index == 0
self._input_layers.append(layer)
self._input_coordinates.append((layer, node_index, tensor_index))
# Keep track of the network's nodes and layers.
nodes, nodes_by_depth, layers, layers_by_depth = _map_graph_network(
self.inputs, self.outputs)
self._network_nodes = nodes
self._nodes_by_depth = nodes_by_depth
self._layers = layers
self._layers_by_depth = layers_by_depth
self._layer_call_argspecs = {}
for layer in self._layers:
self._layer_call_argspecs[layer] = tf_inspect.getfullargspec(layer.call)
self._track_layers(layers)
# Create the node linking internal inputs to internal outputs.
base_layer.Node(
outbound_layer=self,
inbound_layers=[],
node_indices=[],
tensor_indices=[],
input_tensors=self._nested_inputs,
output_tensors=self._nested_outputs)
# Build self.input_names and self.output_names.
self._set_output_names()
self.input_names = []
self._feed_input_names = []
self._feed_inputs = []
self._feed_input_shapes = []
for i, layer in enumerate(self._input_layers):
self.input_names.append(layer.name)
if layer.is_placeholder:
self._feed_input_names.append(layer.name)
self._feed_input_shapes.append(backend.int_shape(self.inputs[i]))
self._feed_inputs.append(layer.input)
def _set_output_names(self):
"""Assigns unique names to the Network's outputs.
Output layers with multiple output tensors would otherwise lead to duplicate
names in self.output_names.
"""
uniquified = []
output_names = set()
prefix_count = {}
for layer in self._output_layers:
proposal = layer.name
while proposal in output_names:
existing_count = prefix_count.get(layer.name, 1)
proposal = '{}_{}'.format(layer.name, existing_count)
prefix_count[layer.name] = existing_count + 1
output_names.add(proposal)
uniquified.append(proposal)
self.output_names = uniquified
@trackable.no_automatic_dependency_tracking
def _init_subclassed_network(self, name=None, dynamic=False):
self._base_init(name=name)
self._is_graph_network = False
self._dynamic = dynamic
call_argspec = tf_inspect.getfullargspec(self.call)
if 'training' in call_argspec.args:
self._expects_training_arg = True
else:
self._expects_training_arg = False
self._call_convention = self._determine_call_convention(call_argspec)
self.outputs = []
self.inputs = []
self.built = False
@property
def dynamic(self):
if self._is_graph_network:
return any(layer.dynamic for layer in self.layers)
return self._dynamic or any(layer.dynamic for layer in self.layers)
def _determine_call_convention(self, call_argspec):
"""Decides how `self.call()` is invoked. See `CallConvention`."""
if call_argspec.varargs:
may_take_single_argument = False
else:
try:
# Note: tf_inspect doesn't raise a TypeError when regular inspect would,
# so we need to keep in mind that "getcallargs" may have returned
# something even though we under-specified positional arguments.
all_args = tf_inspect.getcallargs(self.call, None)
self_args = set()
for arg_name, obj in all_args.items():
if obj is self:
self_args.add(arg_name)
may_take_single_argument = True
except TypeError:
may_take_single_argument = False
if may_take_single_argument:
# A single positional argument (plus "self") is considered equivalent to
# an "inputs" argument.
all_positional_args = len(call_argspec.args)
if call_argspec.defaults is not None:
all_positional_args -= len(call_argspec.defaults)
non_self_positional_args = all_positional_args
for positional_arg_name in call_argspec.args[:all_positional_args]:
if positional_arg_name in self_args:
non_self_positional_args -= 1
if non_self_positional_args == 1:
if 'inputs' in call_argspec.args[all_positional_args:]:
raise TypeError(
"Model.call() takes a single positional argument (to which "
"inputs are passed by convention) and a separate 'inputs' "
"argument. Unable to determine which arguments are inputs.")
return base_layer_utils.CallConvention.SINGLE_POSITIONAL_ARGUMENT
if 'inputs' in call_argspec.args:
return base_layer_utils.CallConvention.EXPLICIT_INPUTS_ARGUMENT
else:
return base_layer_utils.CallConvention.POSITIONAL_ARGUMENTS_ARE_INPUTS
def _track_layers(self, layers):
"""Add Trackable dependencies on a list of Layers."""
weight_layer_index = 0
for layer_index, layer in enumerate(layers):
try:
if layer.weights:
# Keep a separate index for layers which have weights. This allows
# users to insert Layers without weights anywhere in the network
# without breaking checkpoints.
self._track_trackable(
layer, name='layer_with_weights-%d' % weight_layer_index,
overwrite=True)
weight_layer_index += 1
except ValueError:
# The layer might have weights, but may not be built yet. We just treat
# it as layer without weight.
pass
# Even if it doesn't have weights, we should still track everything in
# case it has/will have Trackable dependencies.
self._track_trackable(
layer, name='layer-%d' % layer_index, overwrite=True)
def __setattr__(self, name, value):
if not getattr(self, '_self_setattr_tracking', True):
super(Network, self).__setattr__(name, value)
return
if all(
isinstance(v, (base_layer.Layer,
data_structures.TrackableDataStructure)) or
trackable_layer_utils.has_weights(v) for v in nest.flatten(value)):
try:
self._is_graph_network
except AttributeError:
raise RuntimeError('It looks like you are subclassing `Model` and you '
'forgot to call `super(YourClass, self).__init__()`.'
' Always start with this line.')
super(Network, self).__setattr__(name, value)
# Keep track of metric instance created in subclassed model/layer.
# We do this so that we can maintain the correct order of metrics by adding
# the instance to the `metrics` list as soon as it is created.
from tensorflow.python.keras import metrics as metrics_module # pylint: disable=g-import-not-at-top
if isinstance(value, metrics_module.Metric):
self._metrics.append(value)
@property
def stateful(self):
return any((hasattr(layer, 'stateful') and layer.stateful)
for layer in self.layers)
def reset_states(self):
for layer in self.layers:
if hasattr(layer, 'reset_states') and getattr(layer, 'stateful', False):
layer.reset_states()
@property
def state_updates(self):
"""Returns the `updates` from all layers that are stateful.
This is useful for separating training updates and
state updates, e.g. when we need to update a layer's internal state
during prediction.
Returns:
A list of update ops.
"""
state_updates = []
for layer in self.layers:
if getattr(layer, 'stateful', False):
if hasattr(layer, 'updates'):
state_updates += layer.updates
return state_updates
@property
def weights(self):
"""Returns the list of all layer variables/weights.
Returns:
A list of variables.
"""
self._assert_weights_created()
weights = []
for layer in self._layers:
weights += layer.weights
weights += (self._trainable_weights + self._non_trainable_weights)
return weights
def compute_mask(self, inputs, mask):
if not self._is_graph_network:
return None
# TODO(omalleyt): b/123540974 This function is not really safe to call
# by itself because it will duplicate any updates and losses in graph
# mode by `call`ing the Layers again.
output_tensors = self._run_internal_graph(inputs, mask=mask)
return nest.map_structure(lambda t: t._keras_mask, output_tensors)
@property
def layers(self):
return trackable_layer_utils.filter_empty_layer_containers(
self._layers)
def get_layer(self, name=None, index=None):
"""Retrieves a layer based on either its name (unique) or index.
If `name` and `index` are both provided, `index` will take precedence.
Indices are based on order of horizontal graph traversal (bottom-up).
Arguments:
name: String, name of layer.
index: Integer, index of layer.
Returns:
A layer instance.
Raises:
ValueError: In case of invalid layer name or index.
"""
# TODO(fchollet): We could build a dictionary based on layer names
# since they are constant, but we have not done that yet.
if index is not None:
if len(self.layers) <= index:
raise ValueError('Was asked to retrieve layer at index ' + str(index) +
' but model only has ' + str(len(self.layers)) +
' layers.')
else:
return self.layers[index]
else:
if not name:
raise ValueError('Provide either a layer name or layer index.')
for layer in self.layers:
if layer.name == name:
return layer
raise ValueError('No such layer: ' + name)
@trackable.no_automatic_dependency_tracking
def _clear_losses(self):
"""Used every step in eager to reset losses."""
self._eager_losses = []
for layer in self.layers:
layer._clear_losses()
@property
def trainable_weights(self):
self._assert_weights_created()
return trackable_layer_utils.gather_trainable_weights(
trainable=self.trainable,
sub_layers=self._layers,
extra_variables=self._trainable_weights)
@property
def non_trainable_weights(self):
self._assert_weights_created()
return trackable_layer_utils.gather_non_trainable_weights(
trainable=self.trainable,
sub_layers=self._layers,
extra_variables=self._non_trainable_weights + self._trainable_weights)
@property
def _all_metrics_tensors(self):
"""Returns the network's symbolic metric tensors."""
# TODO(psv): Remove this property.
metrics_tensors = {}
for layer in self.layers:
if isinstance(layer, Network):
metrics_tensors.update(layer._all_metrics_tensors)
else:
metrics_tensors.update(layer._metrics_tensors)
metrics_tensors.update(self._metrics_tensors)
return metrics_tensors
@property
def input_spec(self):
"""Gets the network's input specs.
Returns:
A list of `InputSpec` instances (one per input to the model)
or a single instance if the model has only one input.
"""
# If subclassed model, can't assume anything.
if not self._is_graph_network:
return None
specs = []
for layer in self._input_layers:
if layer.input_spec is None:
specs.append(None)
else:
if not isinstance(layer.input_spec, list):
raise TypeError('Layer ' + layer.name +
' has an input_spec attribute that '
'is not a list. We expect a list. '
'Found input_spec = ' + str(layer.input_spec))
specs += layer.input_spec
if len(specs) == 1:
return specs[0]
return specs
@base_layer.default
def build(self, input_shape):
"""Builds the model based on input shapes received.
This is to be used for subclassed models, which do not know at instantiation
time what their inputs look like.
This method only exists for users who want to call `model.build()` in a
standalone way (as a substitute for calling the model on real data to
build it). It will never be called by the framework (and thus it will
never throw unexpected errors in an unrelated workflow).
Args:
input_shape: Single tuple, TensorShape, or list of shapes, where shapes
are tuples, integers, or TensorShapes.
Raises:
ValueError:
1. In case of invalid user-provided data (not of type tuple,
list, or TensorShape).
2. If the model requires call arguments that are agnostic
to the input shapes (positional or kwarg in call signature).
3. If not all layers were properly built.
4. If float type inputs are not supported within the layers.
In each of these cases, the user should build their model by calling it
on real tensor data.
"""
if self._is_graph_network:
self.built = True
return
# If subclass network
if input_shape is None:
raise ValueError('Input shape must be defined when calling build on a '
'model subclass network.')
valid_types = (tuple, list, tensor_shape.TensorShape)
if not isinstance(input_shape, valid_types):
raise ValueError('Specified input shape is not one of the valid types. '
'Please specify a batch input shape of type tuple or '
'list of input shapes. User provided '
'input type: {}'.format(type(input_shape)))
if input_shape and not self.inputs:
# We create placeholders for the `None`s in the shape and build the model
# in a Graph. Since tf.Variable is compatible with both eager execution
# and graph building, the variables created after building the model in
# a Graph are still valid when executing eagerly.
if context.executing_eagerly():
graph = func_graph.FuncGraph('build_graph')
else:
graph = backend.get_graph()
with graph.as_default():
if isinstance(input_shape, list):
x = [base_layer_utils.generate_placeholders_from_shape(shape)
for shape in input_shape]
else:
x = base_layer_utils.generate_placeholders_from_shape(input_shape)
kwargs = {}
call_signature = tf_inspect.getfullargspec(self.call)
call_args = call_signature.args
# Exclude `self`, `inputs`, and any argument with a default value.
if len(call_args) > 2:
if call_signature.defaults:
call_args = call_args[2:-len(call_signature.defaults)]
else:
call_args = call_args[2:]
for arg in call_args:
if arg == 'training':
# Case where `training` is a positional arg with no default.
kwargs['training'] = False
else:
# Has invalid call signature with unknown positional arguments.
raise ValueError(
'Currently, you cannot build your model if it has '
'positional or keyword arguments that are not '
'inputs to the model, but are required for its '
'`call` method. Instead, in order to instantiate '
'and build your model, `call` your model on real '
'tensor data with all expected call arguments.')
elif len(call_args) < 2:
# Signature without `inputs`.
raise ValueError('You can only call `build` on a model if its `call` '
'method accepts an `inputs` argument.')
try:
self.call(x, **kwargs)
except (errors.InvalidArgumentError, TypeError):
raise ValueError('You cannot build your model by calling `build` '
'if your layers do not support float type inputs. '
'Instead, in order to instantiate and build your '
'model, `call` your model on real tensor data (of '
'the correct dtype).')
if self._layers:
self._track_layers(self._layers)
self.built = True
def call(self, inputs, training=None, mask=None):
"""Calls the model on new inputs.
In this case `call` just reapplies
all ops in the graph to the new inputs
(e.g. build a new computational graph from the provided inputs).
Arguments:
inputs: A tensor or list of tensors.
training: Boolean or boolean scalar tensor, indicating whether to run
the `Network` in training mode or inference mode.
mask: A mask or list of masks. A mask can be
either a tensor or None (no mask).
Returns:
A tensor if there is a single output, or
a list of tensors if there are more than one outputs.
"""
if not self._is_graph_network:
raise NotImplementedError('When subclassing the `Model` class, you should'
' implement a `call` method.')
return self._run_internal_graph(inputs, training=training, mask=mask)
def compute_output_shape(self, input_shape):
if not self._is_graph_network:
return super(Network, self).compute_output_shape(input_shape)
# Convert any shapes in tuple format to TensorShapes.
input_shape = tf_utils.convert_shapes(input_shape, to_tuples=False)
if len(nest.flatten(input_shape)) != len(nest.flatten(self._input_layers)):
raise ValueError('Invalid input_shape argument ' + str(input_shape) +
': model has ' + str(len(self._input_layers)) +
' tensor inputs.')
cache_key = generic_utils.object_list_uid(input_shape)
if cache_key in self._output_shape_cache:
# Cache hit. Return shapes as TensorShapes.
return self._output_shape_cache[cache_key]
layers_to_output_shapes = {}
for layer, shape in zip(self._input_layers, nest.flatten(input_shape)):
# It's an input layer: then `compute_output_shape` is identity,
# and there is only one node and one tensor..
shape_key = layer.name + '_0_0'
layers_to_output_shapes[shape_key] = shape
depth_keys = list(self._nodes_by_depth.keys())
depth_keys.sort(reverse=True)
# Iterate over nodes, by depth level.
if len(depth_keys) > 1:
for depth in depth_keys:
nodes = self._nodes_by_depth[depth]
for node in nodes:
# This is always a single layer, never a list.
layer = node.outbound_layer
if layer in self._input_layers:
# We've already covered the input layers
# a few lines above.
continue
# Potentially redundant list,
# same size as node.input_tensors.
layer_input_shapes = []
for inbound_layer, node_id, tensor_id, _ in node.iterate_inbound():
input_layer_key = inbound_layer.name + '_%s_%s' % (node_id,
tensor_id)
layer_input_shapes.append(layers_to_output_shapes[input_layer_key])
layer_input_shapes = nest.pack_sequence_as(node.inbound_layers,
layer_input_shapes)
# Layers expect shapes to be tuples for `compute_output_shape`.
layer_input_shapes = tf_utils.convert_shapes(
layer_input_shapes, to_tuples=True)
layer_output_shapes = layer.compute_output_shape(layer_input_shapes)
# Convert back to TensorShapes.
layer_output_shapes = tf_utils.convert_shapes(
layer_output_shapes, to_tuples=False)
node_index = layer._inbound_nodes.index(node) # pylint: disable=protected-access
for j, shape in enumerate(nest.flatten(layer_output_shapes)):
shape_key = layer.name + '_%s_%s' % (node_index, j)
layers_to_output_shapes[shape_key] = shape
# Read final output shapes from layers_to_output_shapes.
output_shapes = []
for i in range(len(self._output_layers)):
layer, node_index, tensor_index = self._output_coordinates[i]
shape_key = layer.name + '_%s_%s' % (node_index, tensor_index)
output_shapes.append(layers_to_output_shapes[shape_key])
output_shapes = nest.pack_sequence_as(self._nested_outputs, output_shapes)
# Store in cache.
self._output_shape_cache[cache_key] = output_shapes
# Return shapes as TensorShapes.
return output_shapes
def _run_internal_graph(self, inputs, training=None, mask=None):
"""Computes output tensors for new inputs.
# Note:
- Expects `inputs` to be a list (potentially with 1 element).
- Can be run on non-Keras tensors.
Arguments:
inputs: Tensor or nested structure of Tensors.
training: Boolean learning phase.
mask: (Optional) Tensor or nested structure of Tensors.
Returns:
Two lists: output_tensors, output_masks
"""
# Note: masking support is relevant mainly for Keras.
# It cannot be factored out without having the fully reimplement the network
# calling logic on the Keras side. We choose to incorporate it in
# Network because 1) it may be useful to fully support in tf.layers in
# the future and 2) Keras is a major user of Network. If you don't
# use masking, it does not interfere with regular behavior at all and you
# can ignore it.
inputs = nest.flatten(inputs)
if mask is None:
masks = [None for _ in range(len(inputs))]
else:
masks = nest.flatten(mask)
for input_t, mask in zip(inputs, masks):
input_t._keras_mask = mask
# Dictionary mapping reference tensors to computed tensors.
tensor_dict = {}
for x, y, mask in zip(self.inputs, inputs, masks):
tensor_dict[str(id(x))] = y
depth_keys = list(self._nodes_by_depth.keys())
depth_keys.sort(reverse=True)
# Ignore the InputLayers when computing the graph.
depth_keys = depth_keys[1:]
for depth in depth_keys:
nodes = self._nodes_by_depth[depth]
for node in nodes:
# This is always a single layer, never a list.
layer = node.outbound_layer
if all(
str(id(tensor)) in tensor_dict
for tensor in nest.flatten(node.input_tensors)):
# Call layer (reapplying ops to new inputs).
computed_tensors = nest.map_structure(
lambda t: tensor_dict[str(id(t))], node.input_tensors)
# Ensure `training` and `mask` arg propagation if applicable.
kwargs = node.arguments or {}
argspec = self._layer_call_argspecs[layer].args
if 'training' in argspec:
kwargs.setdefault('training', training)
if 'mask' in argspec:
computed_masks = nest.map_structure(
lambda t: getattr(t, '_keras_mask', None),
computed_tensors)
kwargs.setdefault('mask', computed_masks)
# Compute outputs.
output_tensors = layer(computed_tensors, **kwargs)
# Update tensor_dict.
for x, y in zip(
nest.flatten(node.output_tensors), nest.flatten(output_tensors)):
tensor_dict[str(id(x))] = y
output_tensors = []
output_shapes = []
for x in self.outputs:
assert str(id(x)) in tensor_dict, 'Could not compute output ' + str(x)
tensor = tensor_dict[str(id(x))]
output_shapes.append(x.shape)
output_tensors.append(tensor)
if output_shapes is not None:
input_shapes = [x.shape for x in inputs]
cache_key = generic_utils.object_list_uid(input_shapes)
self._output_shape_cache[cache_key] = nest.pack_sequence_as(
self._nested_outputs, output_shapes)
output_tensors = nest.pack_sequence_as(self._nested_outputs, output_tensors)
return output_tensors
def get_config(self):
if not self._is_graph_network:
raise NotImplementedError
config = {
'name': self.name,
}
node_conversion_map = {}
for layer in self.layers:
if issubclass(layer.__class__, Network):
# Networks start with a pre-existing node
# linking their input to output.
kept_nodes = 1
else:
kept_nodes = 0
for original_node_index, node in enumerate(layer._inbound_nodes):
node_key = _make_node_key(layer.name, original_node_index)
if node_key in self._network_nodes:
node_conversion_map[node_key] = kept_nodes
kept_nodes += 1
layer_configs = []
for layer in self.layers: # From the earliest layers on.
layer_class_name = layer.__class__.__name__
layer_config = layer.get_config()
filtered_inbound_nodes = []
for original_node_index, node in enumerate(layer._inbound_nodes):
node_key = _make_node_key(layer.name, original_node_index)
if node_key in self._network_nodes:
# The node is relevant to the model:
# add to filtered_inbound_nodes.
if node.arguments:
try:
json.dumps(node.arguments)
kwargs = node.arguments
except TypeError:
logging.warning(
'Layer ' + layer.name +
' was passed non-serializable keyword arguments: ' +
str(node.arguments) + '. They will not be included '
'in the serialized model (and thus will be missing '
'at deserialization time).')
kwargs = {}
else:
kwargs = {}
if node.inbound_layers:
node_data = []
for inbound_layer, node_id, tensor_id, _ in node.iterate_inbound():
node_key = _make_node_key(inbound_layer.name, node_id)
new_node_index = node_conversion_map.get(node_key, 0)
node_data.append(
tf_utils.ListWrapper(
[inbound_layer.name, new_node_index, tensor_id, kwargs]))
node_data = nest.pack_sequence_as(node.input_tensors, node_data)
if not nest.is_sequence(node_data):
node_data = [node_data]
# Convert ListWrapper to list for backwards compatible configs.
node_data = tf_utils.convert_inner_node_data(node_data)
filtered_inbound_nodes.append(node_data)
layer_configs.append({
'name': layer.name,
'class_name': layer_class_name,
'config': layer_config,
'inbound_nodes': filtered_inbound_nodes,
})
config['layers'] = layer_configs
# Gather info about inputs and outputs.
model_inputs = []
for i in range(len(self._input_layers)):
layer, node_index, tensor_index = self._input_coordinates[i]
node_key = _make_node_key(layer.name, node_index)
if node_key not in self._network_nodes:
continue
new_node_index = node_conversion_map[node_key]
model_inputs.append(
tf_utils.ListWrapper([layer.name, new_node_index, tensor_index]))
model_inputs = nest.pack_sequence_as(self._nested_inputs, model_inputs)
# Preserve external Keras compat for Models with single input.
if not nest.is_sequence(model_inputs):
model_inputs = [model_inputs]
model_inputs = tf_utils.convert_inner_node_data(model_inputs)
config['input_layers'] = model_inputs
model_outputs = []
for i in range(len(self._output_layers)):
layer, node_index, tensor_index = self._output_coordinates[i]
node_key = _make_node_key(layer.name, node_index)
if node_key not in self._network_nodes:
continue
new_node_index = node_conversion_map[node_key]
model_outputs.append(
tf_utils.ListWrapper([layer.name, new_node_index, tensor_index]))
model_outputs = nest.pack_sequence_as(self._nested_outputs, model_outputs)
# Preserve external Keras compat for Models with single output.
if not nest.is_sequence(model_outputs):
model_outputs = [model_outputs]
model_outputs = tf_utils.convert_inner_node_data(model_outputs)
config['output_layers'] = model_outputs
return copy.deepcopy(config)
@classmethod
def from_config(cls, config, custom_objects=None):
"""Instantiates a Model from its config (output of `get_config()`).
Arguments:
config: Model config dictionary.
custom_objects: Optional dictionary mapping names
(strings) to custom classes or functions to be
considered during deserialization.
Returns:
A model instance.
Raises:
ValueError: In case of improperly formatted config dict.
"""
# Layer instances created during
# the graph reconstruction process
created_layers = {}
# Dictionary mapping layer instances to
# node data that specifies a layer call.
# It acts as a queue that maintains any unprocessed
# layer call until it becomes possible to process it
# (i.e. until the input tensors to the call all exist).
unprocessed_nodes = {}
def add_unprocessed_node(layer, node_data):
if layer not in unprocessed_nodes:
unprocessed_nodes[layer] = [node_data]
else:
unprocessed_nodes[layer].append(node_data)
def process_node(layer, node_data):
"""Deserialize a node.
Arguments:
layer: layer instance.
node_data: Nested structure of `ListWrapper`.
Raises:
ValueError: In case of improperly formatted `node_data`.
"""
input_tensors = []
for input_data in nest.flatten(node_data):
input_data = input_data.as_list()
inbound_layer_name = input_data[0]
inbound_node_index = input_data[1]
inbound_tensor_index = input_data[2]
if len(input_data) == 3:
kwargs = {}
elif len(input_data) == 4:
kwargs = input_data[3]
else:
raise ValueError('Improperly formatted model config.')
inbound_layer = created_layers[inbound_layer_name]
if len(inbound_layer._inbound_nodes) <= inbound_node_index:
add_unprocessed_node(layer, node_data)
return
inbound_node = inbound_layer._inbound_nodes[inbound_node_index]
input_tensors.append(
nest.flatten(inbound_node.output_tensors)[inbound_tensor_index])
input_tensors = nest.pack_sequence_as(node_data, input_tensors)
# Call layer on its inputs, thus creating the node
# and building the layer if needed.
if input_tensors is not None:
# Preserve compatibility with older configs.
flat_input_tensors = nest.flatten(input_tensors)
if len(flat_input_tensors) == 1:
layer(flat_input_tensors[0], **kwargs)
else:
layer(input_tensors, **kwargs)
def process_layer(layer_data):
"""Deserializes a layer, then call it on appropriate inputs.
Arguments:
layer_data: layer config dict.
Raises:
ValueError: In case of improperly formatted `layer_data` dict.
"""
layer_name = layer_data['name']
# Instantiate layer.
from tensorflow.python.keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top
layer = deserialize_layer(layer_data, custom_objects=custom_objects)
created_layers[layer_name] = layer
# Gather layer inputs and convert to `ListWrapper` objects.
inbound_nodes_data = layer_data['inbound_nodes']
inbound_nodes_data = tf_utils.convert_inner_node_data(
inbound_nodes_data, wrap=True)
for node_data in inbound_nodes_data:
# We don't process nodes (i.e. make layer calls)
# on the fly because the inbound node may not yet exist,
# in case of layer shared at different topological depths
# (e.g. a model such as A(B(A(B(x)))))
add_unprocessed_node(layer, node_data)
# First, we create all layers and enqueue nodes to be processed
for layer_data in config['layers']:
process_layer(layer_data)
# Then we process nodes in order of layer depth.
# Nodes that cannot yet be processed (if the inbound node
# does not yet exist) are re-enqueued, and the process
# is repeated until all nodes are processed.
while unprocessed_nodes:
for layer_data in config['layers']:
layer = created_layers[layer_data['name']]
if layer in unprocessed_nodes:
for node_data in unprocessed_nodes.pop(layer):
process_node(layer, node_data)
name = config.get('name')
input_tensors = []
output_tensors = []
input_layers = tf_utils.convert_inner_node_data(
config['input_layers'], wrap=True)
for layer_data in nest.flatten(input_layers):
layer_name, node_index, tensor_index = layer_data.as_list()
assert layer_name in created_layers
layer = created_layers[layer_name]
layer_output_tensors = layer._inbound_nodes[node_index].output_tensors
input_tensors.append(nest.flatten(layer_output_tensors)[tensor_index])
output_layers = tf_utils.convert_inner_node_data(
config['output_layers'], wrap=True)
for layer_data in nest.flatten(output_layers):
layer_name, node_index, tensor_index = layer_data.as_list()
assert layer_name in created_layers
layer = created_layers[layer_name]
layer_output_tensors = layer._inbound_nodes[node_index].output_tensors
output_tensors.append(nest.flatten(layer_output_tensors)[tensor_index])
input_tensors = nest.pack_sequence_as(input_layers, input_tensors)
output_tensors = nest.pack_sequence_as(output_layers, output_tensors)
model = cls(inputs=input_tensors, outputs=output_tensors, name=name)
# Layers not connected to outputs, such as those added in `add_loss`.
ancillary_layers = [
layer for layer in created_layers.values() if layer not in model.layers
]
if ancillary_layers:
model._insert_layers(ancillary_layers)
return model
def save(self,
filepath,
overwrite=True,
include_optimizer=True,
save_format=None):
"""Saves the model to Tensorflow SavedModel or a single HDF5 file.
The savefile includes:
- The model architecture, allowing to re-instantiate the model.
- The model weights.
- The state of the optimizer, allowing to resume training
exactly where you left off.
This allows you to save the entirety of the state of a model
in a single file.
Saved models can be reinstantiated via `keras.models.load_model`.
The model returned by `load_model`
is a compiled model ready to be used (unless the saved model
was never compiled in the first place).
Arguments:
filepath: String, path to SavedModel or H5 file to save the model.
overwrite: Whether to silently overwrite any existing file at the
target location, or provide the user with a manual prompt.
include_optimizer: If True, save optimizer's state together.
save_format: Either 'tf' or 'h5', indicating whether to save the model
to Tensorflow SavedModel or HDF5. The default is currently 'h5', but
will switch to 'tf' in TensorFlow 2.0. The 'tf' option is currently
disabled (use `tf.keras.experimental.export_saved_model` instead).
Example:
```python
from keras.models import load_model
model.save('my_model.h5') # creates a HDF5 file 'my_model.h5'
del model # deletes the existing model
# returns a compiled model
# identical to the previous one
model = load_model('my_model.h5')
```
"""
saving.save_model(self, filepath, overwrite, include_optimizer, save_format)
def save_weights(self, filepath, overwrite=True, save_format=None):
"""Saves all layer weights.
Either saves in HDF5 or in TensorFlow format based on the `save_format`
argument.
When saving in HDF5 format, the weight file has:
- `layer_names` (attribute), a list of strings
(ordered names of model layers).
- For every layer, a `group` named `layer.name`
- For every such layer group, a group attribute `weight_names`,
a list of strings
(ordered names of weights tensor of the layer).
- For every weight in the layer, a dataset
storing the weight value, named after the weight tensor.
When saving in TensorFlow format, all objects referenced by the network are
saved in the same format as `tf.train.Checkpoint`, including any `Layer`
instances or `Optimizer` instances assigned to object attributes. For
networks constructed from inputs and outputs using `tf.keras.Model(inputs,
outputs)`, `Layer` instances used by the network are tracked/saved
automatically. For user-defined classes which inherit from `tf.keras.Model`,
`Layer` instances must be assigned to object attributes, typically in the
constructor. See the documentation of `tf.train.Checkpoint` and
`tf.keras.Model` for details.
Arguments:
filepath: String, path to the file to save the weights to. When saving
in TensorFlow format, this is the prefix used for checkpoint files
(multiple files are generated). Note that the '.h5' suffix causes
weights to be saved in HDF5 format.
overwrite: Whether to silently overwrite any existing file at the
target location, or provide the user with a manual prompt.
save_format: Either 'tf' or 'h5'. A `filepath` ending in '.h5' or
'.keras' will default to HDF5 if `save_format` is `None`. Otherwise
`None` defaults to 'tf'.
Raises:
ImportError: If h5py is not available when attempting to save in HDF5
format.
ValueError: For invalid/unknown format arguments.
"""
self._assert_weights_created()
filepath_is_h5 = _is_hdf5_filepath(filepath)
if save_format is None:
if filepath_is_h5:
save_format = 'h5'
else:
save_format = 'tf'
else:
user_format = save_format.lower().strip()
if user_format in ('tensorflow', 'tf'):
save_format = 'tf'
elif user_format in ('hdf5', 'h5', 'keras'):
save_format = 'h5'
else:
raise ValueError(
'Unknown format "%s". Was expecting one of {"tf", "h5"}.' % (
save_format,))
if save_format == 'tf' and filepath_is_h5:
raise ValueError(
('save_weights got save_format="tf"/"tensorflow", but the '
'filepath ("%s") looks like an HDF5 file. Omit the ".h5"/".keras" '
'when saving in TensorFlow format.')
% filepath)
if save_format == 'h5' and h5py is None:
raise ImportError(
'`save_weights` requires h5py when saving in hdf5.')
if save_format == 'tf':
check_filepath = filepath + '.index'
else:
check_filepath = filepath
# If file exists and should not be overwritten:
if not overwrite and os.path.isfile(check_filepath):
proceed = ask_to_proceed_with_overwrite(check_filepath)
if not proceed:
return
if save_format == 'h5':
with h5py.File(filepath, 'w') as f:
saving.save_weights_to_hdf5_group(f, self.layers)
else:
if context.executing_eagerly():
session = None
else:
session = backend.get_session()
optimizer = getattr(self, 'optimizer', None)
if (optimizer
and not isinstance(optimizer, trackable.Trackable)):
logging.warning(
('This model was compiled with a Keras optimizer (%s) but is being '
'saved in TensorFlow format with `save_weights`. The model\'s '
'weights will be saved, but unlike with TensorFlow optimizers in '
'the TensorFlow format the optimizer\'s state will not be '
'saved.\n\nConsider using a TensorFlow optimizer from `tf.train`.')
% (optimizer,))
self._trackable_saver.save(filepath, session=session)
# Record this checkpoint so it's visible from tf.train.latest_checkpoint.
checkpoint_management.update_checkpoint_state_internal(
save_dir=os.path.dirname(filepath),
model_checkpoint_path=filepath,
save_relative_paths=True,
all_model_checkpoint_paths=[filepath])
def load_weights(self, filepath, by_name=False):
"""Loads all layer weights, either from a TensorFlow or an HDF5 weight file.
If `by_name` is False weights are loaded based on the network's
topology. This means the architecture should be the same as when the weights
were saved. Note that layers that don't have weights are not taken into
account in the topological ordering, so adding or removing layers is fine as
long as they don't have weights.
If `by_name` is True, weights are loaded into layers only if they share the
same name. This is useful for fine-tuning or transfer-learning models where
some of the layers have changed.
Only topological loading (`by_name=False`) is supported when loading weights
from the TensorFlow format. Note that topological loading differs slightly
between TensorFlow and HDF5 formats for user-defined classes inheriting from
`tf.keras.Model`: HDF5 loads based on a flattened list of weights, while the
TensorFlow format loads based on the object-local names of attributes to
which layers are assigned in the `Model`'s constructor.
Arguments:
filepath: String, path to the weights file to load. For weight files in
TensorFlow format, this is the file prefix (the same as was passed
to `save_weights`).
by_name: Boolean, whether to load weights by name or by topological
order. Only topological loading is supported for weight files in
TensorFlow format.
Returns:
When loading a weight file in TensorFlow format, returns the same status
object as `tf.train.Checkpoint.restore`. When graph building, restore
ops are run automatically as soon as the network is built (on first call
for user-defined classes inheriting from `Model`, immediately if it is
already built).
When loading weights in HDF5 format, returns `None`.
Raises:
ImportError: If h5py is not available and the weight file is in HDF5
format.
"""
if _is_hdf5_filepath(filepath):
save_format = 'h5'
else:
try:
pywrap_tensorflow.NewCheckpointReader(filepath)
save_format = 'tf'
except errors_impl.DataLossError:
# The checkpoint is not readable in TensorFlow format. Try HDF5.
save_format = 'h5'
if save_format == 'tf':
status = self._trackable_saver.restore(filepath)
if by_name:
raise NotImplementedError(
'Weights may only be loaded based on topology into Models when '
'loading TensorFlow-formatted weights (got by_name=True to '
'load_weights).')
if not context.executing_eagerly():
session = backend.get_session()
# Restore existing variables (if any) immediately, and set up a
# streaming restore for any variables created in the future.
trackable_utils.streaming_restore(status=status, session=session)
status.assert_nontrivial_match()
return status
if h5py is None:
raise ImportError(
'`load_weights` requires h5py when loading weights from HDF5.')
if self._is_graph_network and not self.built:
raise NotImplementedError(
'Unable to load weights saved in HDF5 format into a subclassed '
'Model which has not created its variables yet. Call the Model '
'first, then load the weights.')
self._assert_weights_created()
with h5py.File(filepath, 'r') as f:
if 'layer_names' not in f.attrs and 'model_weights' in f:
f = f['model_weights']
if by_name:
saving.load_weights_from_hdf5_group_by_name(f, self.layers)
else:
saving.load_weights_from_hdf5_group(f, self.layers)
def _updated_config(self):
"""Util shared between different serialization methods.
Returns:
Model config with Keras version information added.
"""
from tensorflow.python.keras import __version__ as keras_version # pylint: disable=g-import-not-at-top
config = self.get_config()
model_config = {
'class_name': self.__class__.__name__,
'config': config,
'keras_version': keras_version,
'backend': backend.backend()
}
return model_config
def to_json(self, **kwargs):
"""Returns a JSON string containing the network configuration.
To load a network from a JSON save file, use
`keras.models.model_from_json(json_string, custom_objects={})`.
Arguments:
**kwargs: Additional keyword arguments
to be passed to `json.dumps()`.
Returns:
A JSON string.
"""
model_config = self._updated_config()
return json.dumps(
model_config, default=serialization.get_json_type, **kwargs)
def to_yaml(self, **kwargs):
"""Returns a yaml string containing the network configuration.
To load a network from a yaml save file, use
`keras.models.model_from_yaml(yaml_string, custom_objects={})`.
`custom_objects` should be a dictionary mapping
the names of custom losses / layers / etc to the corresponding
functions / classes.
Arguments:
**kwargs: Additional keyword arguments
to be passed to `yaml.dump()`.
Returns:
A YAML string.
Raises:
ImportError: if yaml module is not found.
"""
if yaml is None:
raise ImportError(
'Requires yaml module installed (`pip install pyyaml`).')
return yaml.dump(self._updated_config(), **kwargs)
def summary(self, line_length=None, positions=None, print_fn=None):
"""Prints a string summary of the network.
Arguments:
line_length: Total length of printed lines
(e.g. set this to adapt the display to different
terminal window sizes).
positions: Relative or absolute positions of log elements
in each line. If not provided,
defaults to `[.33, .55, .67, 1.]`.
print_fn: Print function to use. Defaults to `print`.
It will be called on each line of the summary.
You can set it to a custom function
in order to capture the string summary.
Raises:
ValueError: if `summary()` is called before the model is built.
"""
if not self.built:
raise ValueError('This model has not yet been built. '
'Build the model first by calling `build()` or calling '
'`fit()` with some data, or specify '
'an `input_shape` argument in the first layer(s) for '
'automatic build.')
layer_utils.print_summary(self,
line_length=line_length,
positions=positions,
print_fn=print_fn)
def _validate_graph_inputs_and_outputs(self):
"""Validates the inputs and outputs of a Graph Network."""
# Check for redundancy in inputs.
if len(set(self.inputs)) != len(self.inputs):
raise ValueError('The list of inputs passed to the model '
'is redundant. '
'All inputs should only appear once.'
' Found: ' + str(self.inputs))
for x in self.inputs:
# Check that x has appropriate `_keras_history` metadata.
if not hasattr(x, '_keras_history'):
cls_name = self.__class__.__name__
raise ValueError('Input tensors to a ' + cls_name + ' ' +
'must come from `tf.keras.Input`. '
'Received: ' + str(x) +
' (missing previous layer metadata).')
# Check that x is an input tensor.
# pylint: disable=protected-access
layer, _, _ = x._keras_history
if len(layer._inbound_nodes) > 1 or (
layer._inbound_nodes and layer._inbound_nodes[0].inbound_layers):
cls_name = self.__class__.__name__
logging.warning(cls_name + ' inputs must come from '
'`tf.keras.Input` (thus holding past layer metadata), '
'they cannot be the output of '
'a previous non-Input layer. '
'Here, a tensor specified as '
'input to "' + self.name + '" was not an Input tensor, '
'it was generated by layer ' + layer.name + '.\n'
'Note that input tensors are '
'instantiated via `tensor = tf.keras.Input(shape)`.\n'
'The tensor that caused the issue was: ' + str(x.name))
# Check compatibility of batch sizes of Input Layers.
input_batch_sizes = [
training_utils.get_static_batch_size(x._keras_history[0])
for x in self.inputs
]
consistent_batch_size = None
for batch_size in input_batch_sizes:
if batch_size is not None:
if (consistent_batch_size is not None and
batch_size != consistent_batch_size):
raise ValueError('The specified batch sizes of the Input Layers'
' are incompatible. Found batch sizes: {}'.format(
input_batch_sizes))
consistent_batch_size = batch_size
for x in self.outputs:
if not hasattr(x, '_keras_history'):
cls_name = self.__class__.__name__
raise ValueError('Output tensors to a ' + cls_name + ' must be '
'the output of a TensorFlow `Layer` '
'(thus holding past layer metadata). Found: ' + str(x))
def _insert_layers(self, layers, relevant_nodes=None):
"""Inserts Layers into the Network after Network creation.
This is only valid for Keras Graph Networks. Layers added via this function
will be included in the `call` computation and `get_config` of this Network.
They will not be added to the Network's outputs.
Arguments:
layers: Arbitrary nested structure of Layers. Layers must be reachable
from one or more of the `keras.Input` Tensors that correspond to this
Network's inputs.
relevant_nodes: Nodes from the Layers that should be considered part of
this Network. If `None`, all Nodes will be considered part of this
Network.
Raises:
ValueError: If the layers depend on `Input`s not found in this Model.
"""
layers = nest.flatten(layers)
tf_utils.assert_no_legacy_layers(layers)
node_to_depth = {}
for depth, nodes in self._nodes_by_depth.items():
node_to_depth.update({node: depth for node in nodes})
# The nodes of these Layers that are relevant to this Network. If not
# provided, assume all Nodes are relevant
if not relevant_nodes:
relevant_nodes = nest.flatten([layer._inbound_nodes for layer in layers])
network_nodes = set(relevant_nodes + list(node_to_depth.keys()))
def _get_min_depth(node):
"""Gets the minimum depth at which node can be computed."""
min_depth = 0
for layer, node_id, _, _ in node.iterate_inbound():
inbound_node = layer._inbound_nodes[node_id]
if inbound_node in node_to_depth:
min_depth = min(min_depth, node_to_depth[inbound_node])
elif inbound_node not in network_nodes:
continue
else:
# Previous relevant nodes haven't been processed yet.
return None
# New node is one shallower than its shallowest input.
return min_depth - 1
# Insert nodes into `_nodes_by_depth` and other node attrs.
unprocessed_nodes = copy.copy(relevant_nodes)
i = 0
while unprocessed_nodes:
i += 1
# Do a sanity check. This can occur if `Input`s from outside this Model
# are being relied on.
if i > 10000:
raise ValueError('Layers could not be added due to missing '
'dependencies.')
node = unprocessed_nodes.pop(0)
depth = _get_min_depth(node)
if depth is None:
unprocessed_nodes.append(node)
else:
node_key = _make_node_key(
node.outbound_layer.name,
node.outbound_layer._inbound_nodes.index(node))
node_to_depth[node] = depth
self._network_nodes.add(node_key)
self._nodes_by_depth[depth].append(node)
# Insert layers into `_layer_by_depth` and other layer attrs.
for layer in layers:
depth = min([
node_to_depth[node]
for node in layer.inbound_nodes
if node in network_nodes
])
self._layers_by_depth[depth].append(layer)
self._layers.append(layer)
self._layer_call_argspecs[layer] = tf_inspect.getfullargspec(layer.call)
def _assert_weights_created(self):
"""Asserts that all the weights for the network have been created.
For a non-dynamic network, the weights must already be created after the
layer has been called. For a dynamic network, the exact list of weights can
never be known for certain since it may change at any time during execution.
We run this check right before accessing weights or getting the Numpy value
for the current weights. Otherwise, if the layer has never been called,
the user would just get an empty list, which is misleading.
Raises:
ValueError: if the weights of the network has not yet been created.
"""
if self.dynamic:
return
if (not self._is_graph_network and
'build' in self.__class__.__dict__ and
not self.built):
# For any model that has customized build() method but hasn't
# been invoked yet, this will cover both sequential and subclass model.
raise ValueError('Weights for model %s have not yet been created. '
'Weights are created when the Model is first called on '
'inputs or `build()` is called with an `input_shape`.' %
self.name)
def _is_hdf5_filepath(filepath):
return (filepath.endswith('.h5') or filepath.endswith('.keras') or
filepath.endswith('.hdf5'))
def _make_node_key(layer_name, node_index):
return layer_name + '_ib-' + str(node_index)
def _map_graph_network(inputs, outputs):
"""Validates a network's topology and gather its layers and nodes.
Arguments:
inputs: List of input tensors.
outputs: List of outputs tensors.
Returns:
A tuple `(nodes, nodes_by_depth, layers, layers_by_depth)`.
- nodes: list of Node instances.
- nodes_by_depth: dict mapping ints (depth) to lists of node instances.
- layers: list of Layer instances.
- layers_by_depth: dict mapping ints (depth) to lists of layer instances.
Raises:
ValueError: In case the network is not valid (e.g. disconnected graph).
"""
# Network_nodes: set of nodes included in the graph of layers
# (not all nodes included in the layers are relevant to the current graph).
network_nodes = set() # ids of all nodes relevant to the Network
nodes_depths = {} # dict {node: depth value}
layers_depths = {} # dict {layer: depth value}
layer_indices = {} # dict {layer: index in traversal}
nodes_in_decreasing_depth = []
def build_map(tensor,
finished_nodes,
nodes_in_progress,
layer,
node_index,
tensor_index):
"""Builds a map of the graph of layers.
This recursively updates the map `layer_indices`,
the list `nodes_in_decreasing_depth` and the set `network_nodes`.
Arguments:
tensor: Some tensor in a graph.
finished_nodes: Set of nodes whose subgraphs have been traversed
completely. Useful to prevent duplicated work.
nodes_in_progress: Set of nodes that are currently active on the
recursion stack. Useful to detect cycles.
layer: Layer from which `tensor` comes from. If not provided,
will be obtained from `tensor._keras_history`.
node_index: Node index from which `tensor` comes from.
tensor_index: Tensor_index from which `tensor` comes from.
Raises:
ValueError: if a cycle is detected.
"""
node = layer._inbound_nodes[node_index] # pylint: disable=protected-access
# Prevent cycles.
if node in nodes_in_progress:
raise ValueError('The tensor ' + str(tensor) + ' at layer "' +
layer.name + '" is part of a cycle.')
# Don't repeat work for shared subgraphs
if node in finished_nodes:
return
node_key = _make_node_key(layer.name, node_index)
# Update network_nodes.
network_nodes.add(node_key)
# Store the traversal order for layer sorting.
if layer not in layer_indices:
layer_indices[layer] = len(layer_indices)
nodes_in_progress.add(node)
# Propagate to all previous tensors connected to this node.
for layer, node_index, tensor_index, tensor in node.iterate_inbound():
build_map(tensor, finished_nodes, nodes_in_progress, layer, node_index,
tensor_index)
finished_nodes.add(node)
nodes_in_progress.remove(node)
nodes_in_decreasing_depth.append(node)
finished_nodes = set()
nodes_in_progress = set()
for x in outputs:
layer, node_index, tensor_index = x._keras_history # pylint: disable=protected-access
build_map(x, finished_nodes, nodes_in_progress,
layer=layer,
node_index=node_index,
tensor_index=tensor_index)
for node in reversed(nodes_in_decreasing_depth):
# If the depth is not set, the node has no outbound nodes (depth 0).
depth = nodes_depths.setdefault(node, 0)
# Update the depth of the corresponding layer
previous_depth = layers_depths.get(node.outbound_layer, 0)
# If we've seen this layer before at a higher depth,
# we should use that depth instead of the node depth.
# This is necessary for shared layers that have inputs at different
# depth levels in the graph.
depth = max(depth, previous_depth)
layers_depths[node.outbound_layer] = depth
nodes_depths[node] = depth
# Update the depth of inbound nodes.
# The "depth" of a node is the max of the depths
# of all layers it is connected to.
for inbound_layer, node_index, _, _ in node.iterate_inbound():
inbound_node = inbound_layer._inbound_nodes[node_index] # pylint: disable=protected-access
previous_depth = nodes_depths.get(inbound_node, 0)
nodes_depths[inbound_node] = max(depth + 1, previous_depth)
# Handle inputs that are not connected to outputs.
for input_t in inputs:
input_layer = input_t._keras_history[0]
if input_layer not in layers_depths:
layers_depths[input_layer] = 0
layer_indices[input_layer] = -1
nodes_depths[input_layer._inbound_nodes[0]] = 0
# Build a dict {depth: list of nodes with this depth}
nodes_by_depth = collections.defaultdict(list)
for node, depth in nodes_depths.items():
nodes_by_depth[depth].append(node)
# Build a dict {depth: list of layers with this depth}
layers_by_depth = collections.defaultdict(list)
for layer, depth in layers_depths.items():
layers_by_depth[depth].append(layer)
# Get sorted list of layer depths.
depth_keys = list(layers_by_depth.keys())
depth_keys.sort(reverse=True)
# Set self.layers and self._layers_by_depth.
layers = []
for depth in depth_keys:
layers_for_depth = layers_by_depth[depth]
# Network.layers needs to have a deterministic order:
# here we order them by traversal order.
layers_for_depth.sort(key=lambda x: layer_indices[x])
layers.extend(layers_for_depth)
# Get sorted list of node depths.
depth_keys = list(nodes_by_depth.keys())
depth_keys.sort(reverse=True)
# Check that all tensors required are computable.
# computable_tensors: all tensors in the graph
# that can be computed from the inputs provided.
computable_tensors = []
for x in inputs:
computable_tensors.append(x)
layers_with_complete_input = [] # To provide a better error msg.
for depth in depth_keys:
for node in nodes_by_depth[depth]:
layer = node.outbound_layer
if layer:
for x in nest.flatten(node.input_tensors):
if x not in computable_tensors:
raise ValueError('Graph disconnected: '
'cannot obtain value for tensor ' + str(x) +
' at layer "' + layer.name + '". '
'The following previous layers '
'were accessed without issue: ' +
str(layers_with_complete_input))
for x in nest.flatten(node.output_tensors):
computable_tensors.append(x)
layers_with_complete_input.append(layer.name)
# Ensure name unicity, which will be crucial for serialization
# (since serialized nodes refer to layers by their name).
all_names = [layer.name for layer in layers]
for name in all_names:
if all_names.count(name) != 1:
raise ValueError('The name "' + name + '" is used ' +
str(all_names.count(name)) + ' times in the model. '
'All layer names should be unique.')
return network_nodes, nodes_by_depth, layers, layers_by_depth
|
the-stack_0_16181 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import inspect
import logging
import requests
import xmltodict
from xml.parsers.expat import ExpatError
from optionaldict import optionaldict
from wechatpy.utils import random_string
from wechatpy.exceptions import WeChatPayException, InvalidSignatureException
from wechatpy.pay.utils import (
calculate_signature, calculate_signature_hmac, _check_signature, dict_to_xml
)
from wechatpy.pay.base import BaseWeChatPayAPI
from wechatpy.pay import api
logger = logging.getLogger(__name__)
def _is_api_endpoint(obj):
return isinstance(obj, BaseWeChatPayAPI)
class WeChatPay(object):
"""
微信支付接口
:param appid: 微信公众号 appid
:param sub_appid: 当前调起支付的小程序APPID
:param api_key: 商户 key,不要在这里使用小程序的密钥
:param mch_id: 商户号
:param sub_mch_id: 可选,子商户号,受理模式下必填
:param mch_cert: 必填,商户证书路径
:param mch_key: 必填,商户证书私钥路径
:param timeout: 可选,请求超时时间,单位秒,默认无超时设置
:param sandbox: 可选,是否使用测试环境,默认为 False
"""
redpack = api.WeChatRedpack()
"""红包接口"""
transfer = api.WeChatTransfer()
"""企业付款接口"""
coupon = api.WeChatCoupon()
"""代金券接口"""
order = api.WeChatOrder()
"""订单接口"""
refund = api.WeChatRefund()
"""退款接口"""
micropay = api.WeChatMicroPay()
"""刷卡支付接口"""
tools = api.WeChatTools()
"""工具类接口"""
jsapi = api.WeChatJSAPI()
"""公众号网页 JS 支付接口"""
withhold = api.WeChatWithhold()
"""代扣接口"""
API_BASE_URL = 'https://api.mch.weixin.qq.com/'
def __new__(cls, *args, **kwargs):
self = super(WeChatPay, cls).__new__(cls)
api_endpoints = inspect.getmembers(self, _is_api_endpoint)
for name, _api in api_endpoints:
api_cls = type(_api)
_api = api_cls(self)
setattr(self, name, _api)
return self
def __init__(self, appid, api_key, mch_id, sub_mch_id=None,
mch_cert=None, mch_key=None, timeout=None, sandbox=False, sub_appid=None):
self.appid = appid
self.sub_appid = sub_appid
self.api_key = api_key
self.mch_id = mch_id
self.sub_mch_id = sub_mch_id
self.mch_cert = mch_cert
self.mch_key = mch_key
self.timeout = timeout
self.sandbox = sandbox
self._sandbox_api_key = None
self._http = requests.Session()
def _fetch_sandbox_api_key(self):
nonce_str = random_string(32)
sign = calculate_signature({'mch_id': self.mch_id, 'nonce_str': nonce_str}, self.api_key)
payload = dict_to_xml({
'mch_id': self.mch_id,
'nonce_str': nonce_str,
}, sign=sign)
headers = {'Content-Type': 'text/xml'}
api_url = '{base}sandboxnew/pay/getsignkey'.format(base=self.API_BASE_URL)
response = self._http.post(api_url, data=payload, headers=headers)
return xmltodict.parse(response.text)['xml'].get('sandbox_signkey')
def _request(self, method, url_or_endpoint, **kwargs):
if not url_or_endpoint.startswith(('http://', 'https://')):
api_base_url = kwargs.pop('api_base_url', self.API_BASE_URL)
if self.sandbox:
api_base_url = '{url}sandboxnew/'.format(url=api_base_url)
url = '{base}{endpoint}'.format(
base=api_base_url,
endpoint=url_or_endpoint
)
else:
url = url_or_endpoint
if isinstance(kwargs.get('data', ''), dict):
data = kwargs['data']
if 'mchid' not in data:
# Fuck Tencent
data.setdefault('mch_id', self.mch_id)
data.setdefault('sub_mch_id', self.sub_mch_id)
data.setdefault('nonce_str', random_string(32))
data = optionaldict(data)
if data.get('sign_type', 'MD5') == 'HMAC-SHA256':
sign = calculate_signature_hmac(data, self.sandbox_api_key if self.sandbox else self.api_key)
else:
sign = calculate_signature(data, self.sandbox_api_key if self.sandbox else self.api_key)
body = dict_to_xml(data, sign)
body = body.encode('utf-8')
kwargs['data'] = body
# 商户证书
if self.mch_cert and self.mch_key:
kwargs['cert'] = (self.mch_cert, self.mch_key)
kwargs['timeout'] = kwargs.get('timeout', self.timeout)
logger.debug('Request to WeChat API: %s %s\n%s', method, url, kwargs)
res = self._http.request(
method=method,
url=url,
**kwargs
)
try:
res.raise_for_status()
except requests.RequestException as reqe:
raise WeChatPayException(
return_code=None,
client=self,
request=reqe.request,
response=reqe.response
)
return self._handle_result(res)
def _handle_result(self, res):
res.encoding = 'utf-8'
xml = res.text
logger.debug('Response from WeChat API \n %s', xml)
try:
data = xmltodict.parse(xml)['xml']
except (xmltodict.ParsingInterrupted, ExpatError):
# 解析 XML 失败
logger.debug('WeChat payment result xml parsing error', exc_info=True)
return xml
return_code = data['return_code']
return_msg = data.get('return_msg')
result_code = data.get('result_code')
errcode = data.get('err_code')
errmsg = data.get('err_code_des')
if return_code != 'SUCCESS' or result_code != 'SUCCESS':
# 返回状态码不为成功
raise WeChatPayException(
return_code,
result_code,
return_msg,
errcode,
errmsg,
client=self,
request=res.request,
response=res
)
return data
def get(self, url, **kwargs):
return self._request(
method='get',
url_or_endpoint=url,
**kwargs
)
def post(self, url, **kwargs):
return self._request(
method='post',
url_or_endpoint=url,
**kwargs
)
def check_signature(self, params):
return _check_signature(params, self.api_key if not self.sandbox else self.sandbox_api_key)
def parse_payment_result(self, xml):
"""解析微信支付结果通知"""
try:
data = xmltodict.parse(xml)
except (xmltodict.ParsingInterrupted, ExpatError):
raise InvalidSignatureException()
if not data or 'xml' not in data:
raise InvalidSignatureException()
data = data['xml']
sign = data.pop('sign', None)
real_sign = calculate_signature(data, self.api_key if not self.sandbox else self.sandbox_api_key)
if sign != real_sign:
raise InvalidSignatureException()
for key in ('total_fee', 'settlement_total_fee', 'cash_fee', 'coupon_fee', 'coupon_count'):
if key in data:
data[key] = int(data[key])
data['sign'] = sign
return data
@property
def sandbox_api_key(self):
if self.sandbox and self._sandbox_api_key is None:
self._sandbox_api_key = self._fetch_sandbox_api_key()
return self._sandbox_api_key
|
the-stack_0_16183 | # Copyright 2020, 2021 by B. Knueven, D. Mildebrath, C. Muir, J-P Watson, and D.L. Woodruff
# This software is distributed under the 3-clause BSD License.
# Illustrate the use of sequential sampling for programmers using aircond.
#
import sys
import numpy as np
import argparse
import mpisppy.tests.examples.aircond as aircond
import pyomo.environ as pyo
import mpisppy.utils.sputils as sputils
import mpisppy.utils.amalgomator as amalgomator
import mpisppy.confidence_intervals.multi_seqsampling as multi_seqsampling
import mpisppy.confidence_intervals.confidence_parsers as confidence_parsers
from mpisppy.utils import baseparsers
#============================
def xhat_generator_aircond(scenario_names, solvername="gurobi", solver_options=None,
branching_factors=None, mudev = 0, sigmadev = 40,
start_ups=None, start_seed = 0):
'''
For sequential sampling.
Takes scenario names as input and provide the best solution for the
approximate problem associated with the scenarios.
Parameters
----------
scenario_names: list of str
Names of the scenario we use
solvername: str, optional
Name of the solver used. The default is "gurobi"
solver_options: dict, optional
Solving options. The default is None.
branching_factors: list, optional
Branching factors of the scenario 3. The default is [3,2,3]
(a 4 stage model with 18 different scenarios)
mudev: float, optional
The average deviation of demand between two stages; The default is 0.
sigma_dev: float, optional
The standard deviation from mudev for the demand difference between
two stages. The default is 40.
start_seed: int, optional
The starting seed, used to create different sample scenario trees.
The default is 0.
Returns
-------
xhat: str
A generated xhat, solution to the approximate problem induced by scenario_names.
NOTE: This tool only works when the file is in mpisppy. In SPInstances,
you must change the from_module line.
'''
num_scens = len(scenario_names)
ama_options = { "EF-mstage": True,
"EF_solver_name": solvername,
"EF_solver_options": solver_options,
"num_scens": num_scens,
"_mpisppy_probability": 1/num_scens,
"branching_factors":branching_factors,
"mudev":mudev,
"start_ups":start_ups,
"start_seed":start_seed,
"sigmadev":sigmadev
}
#We use from_module to build easily an Amalgomator object
ama = amalgomator.from_module("mpisppy.tests.examples.aircond",
ama_options,use_command_line=False)
#Correcting the building by putting the right scenarios.
ama.scenario_names = scenario_names
ama.verbose = False
ama.run()
# get the xhat
xhat = sputils.nonant_cache_from_ef(ama.ef)
return {'ROOT': xhat['ROOT']}
def main(args):
""" Code for aircond sequential sampling (in a function for easier testing)
Args:
args (parseargs): the command line arguments object from parseargs
Returns:
results (dict): the solution, gap confidence interval and T
"""
refmodelname = "mpisppy.tests.examples.aircond"
scenario_creator = aircond.scenario_creator
BFs = args.branching_factors
num_scens = np.prod(BFs)
solver_name = args.solver_name
mudev = args.mudev
sigmadev = args.sigmadev
scenario_creator_kwargs = {"num_scens" : num_scens,
"branching_factors": BFs,
"mudev": mudev,
"sigmadev": sigmadev,
"start_ups": False,
"start_seed": args.seed,
}
scenario_names = ['Scenario' + str(i) for i in range(num_scens)]
xhat_gen_options = {"scenario_names": scenario_names,
"solvername": solver_name,
"solver_options": None,
"branching_factors" : BFs,
"mudev": mudev,
"sigmadev": sigmadev,
"start_ups": False,
"start_seed": args.seed,
}
# simply called "options" by the SeqSampling constructor
inneroptions = {"solvername": solver_name,
"branching_factors": BFs,
"solver_options": None,
"sample_size_ratio": args.sample_size_ratio,
"xhat_gen_options": xhat_gen_options,
"ArRP": args.ArRP,
"kf_xhat": args.kf_GS,
"kf_xhat": args.kf_xhat,
"confidence_level": args.confidence_level,
"start_ups": False,
}
if args.BM_vs_BPL == "BM":
# Bayraksan and Morton
optionsBM = {'h': args.BM_h,
'hprime': args.BM_hprime,
'eps': args.BM_eps,
'epsprime': args.BM_eps_prime,
"p": args.BM_p,
"q": args.BM_q,
"xhat_gen_options": xhat_gen_options,
}
optionsBM.update(inneroptions)
sampler = multi_seqsampling.IndepScens_SeqSampling(refmodelname,
xhat_generator_aircond,
optionsBM,
stochastic_sampling=False,
stopping_criterion="BM",
solving_type="EF-mstage",
)
else: # must be BPL
optionsBPL = {'eps': args.BPL_eps,
"c0": args.BPL_c0,
"n0min": args.BPL_n0min,
"xhat_gen_options": xhat_gen_options,
}
optionsBPL.update(inneroptions)
ss = int(args.BPL_n0min) != 0
sampler = multi_seqsampling.IndepScens_SeqSampling(refmodelname,
xhat_generator_aircond,
optionsBPL,
stochastic_sampling=ss,
stopping_criterion="BPL",
solving_type="EF-mstage",
)
xhat = sampler.run()
return xhat
def _parse_args():
parser = baseparsers._basic_multistage("aircond_seqsampling")
parser = confidence_parsers.confidence_parser(parser)
parser = confidence_parsers.sequential_parser(parser)
parser = confidence_parsers.BM_parser(parser)
parser = confidence_parsers.BPL_parser(parser) # --help will show both BM and BPL
parser = aircond.inparser_adder(parser)
parser.add_argument("--solver-name",
help = "solver name (default gurobi)",
dest="solver_name",
type = str,
default="gurobi")
parser.add_argument("--seed",
help="Seed for random numbers (default is 1134)",
dest="seed",
type=int,
default=1134)
parser.add_argument("--BM-vs-BPL",
help="BM or BPL for Bayraksan and Morton or B and Pierre Louis",
dest="BM_vs_BPL",
type=str,
default=None)
parser.add_argument("--xhat1-file",
help="File to which xhat1 should be (e.g. to process with zhat4hat.py)",
dest="xhat1_file",
type=str,
default=None)
args = parser.parse_args()
if args.BM_vs_BPL is None:
raise argparse.ArgumentTypeError("--BM-vs_BPL must be given.")
if args.BM_vs_BPL != "BM" and args.BM_vs_BPL != "BPL":
raise argparse.ArgumentTypeError(f"--BM-vs_BPL must be BM or BPL (you gave {args.BM_vs_BMPL})")
return args
if __name__ == '__main__':
args = _parse_args()
results = main(args)
print(f"Final gap confidence interval results:", results)
if args.xhat1_file is not None:
print(f"Writing xhat1 to {args.xhat1_file}.npy")
root_nonants =np.fromiter((v for v in results["Candidate_solution"]["ROOT"]), float)
np.save(args.xhat1_file, root_nonants)
|
the-stack_0_16184 | # -*- coding: utf-8 -*-
# Copyright 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import email.mime.multipart
import email.utils
import logging
import time
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from six.moves import urllib
import bleach
import jinja2
from twisted.internet import defer
from twisted.mail.smtp import sendmail
from synapse.api.constants import EventTypes
from synapse.api.errors import StoreError
from synapse.push.presentable_names import (
calculate_room_name,
descriptor_from_member_events,
name_from_member_event,
)
from synapse.types import UserID
from synapse.util.async_helpers import concurrently_execute
from synapse.visibility import filter_events_for_client
logger = logging.getLogger(__name__)
MESSAGE_FROM_PERSON_IN_ROOM = "You have a message on %(app)s from %(person)s " \
"in the %(room)s room..."
MESSAGE_FROM_PERSON = "You have a message on %(app)s from %(person)s..."
MESSAGES_FROM_PERSON = "You have messages on %(app)s from %(person)s..."
MESSAGES_IN_ROOM = "You have messages on %(app)s in the %(room)s room..."
MESSAGES_IN_ROOM_AND_OTHERS = \
"You have messages on %(app)s in the %(room)s room and others..."
MESSAGES_FROM_PERSON_AND_OTHERS = \
"You have messages on %(app)s from %(person)s and others..."
INVITE_FROM_PERSON_TO_ROOM = "%(person)s has invited you to join the " \
"%(room)s room on %(app)s..."
INVITE_FROM_PERSON = "%(person)s has invited you to chat on %(app)s..."
CONTEXT_BEFORE = 1
CONTEXT_AFTER = 1
# From https://github.com/matrix-org/matrix-react-sdk/blob/master/src/HtmlUtils.js
ALLOWED_TAGS = [
'font', # custom to matrix for IRC-style font coloring
'del', # for markdown
# deliberately no h1/h2 to stop people shouting.
'h3', 'h4', 'h5', 'h6', 'blockquote', 'p', 'a', 'ul', 'ol',
'nl', 'li', 'b', 'i', 'u', 'strong', 'em', 'strike', 'code', 'hr', 'br', 'div',
'table', 'thead', 'caption', 'tbody', 'tr', 'th', 'td', 'pre'
]
ALLOWED_ATTRS = {
# custom ones first:
"font": ["color"], # custom to matrix
"a": ["href", "name", "target"], # remote target: custom to matrix
# We don't currently allow img itself by default, but this
# would make sense if we did
"img": ["src"],
}
# When bleach release a version with this option, we can specify schemes
# ALLOWED_SCHEMES = ["http", "https", "ftp", "mailto"]
class Mailer(object):
def __init__(self, hs, app_name, notif_template_html, notif_template_text):
self.hs = hs
self.notif_template_html = notif_template_html
self.notif_template_text = notif_template_text
self.store = self.hs.get_datastore()
self.macaroon_gen = self.hs.get_macaroon_generator()
self.state_handler = self.hs.get_state_handler()
self.app_name = app_name
logger.info("Created Mailer for app_name %s" % app_name)
@defer.inlineCallbacks
def send_notification_mail(self, app_id, user_id, email_address,
push_actions, reason):
try:
from_string = self.hs.config.email_notif_from % {
"app": self.app_name
}
except TypeError:
from_string = self.hs.config.email_notif_from
raw_from = email.utils.parseaddr(from_string)[1]
raw_to = email.utils.parseaddr(email_address)[1]
if raw_to == '':
raise RuntimeError("Invalid 'to' address")
rooms_in_order = deduped_ordered_list(
[pa['room_id'] for pa in push_actions]
)
notif_events = yield self.store.get_events(
[pa['event_id'] for pa in push_actions]
)
notifs_by_room = {}
for pa in push_actions:
notifs_by_room.setdefault(pa["room_id"], []).append(pa)
# collect the current state for all the rooms in which we have
# notifications
state_by_room = {}
try:
user_display_name = yield self.store.get_profile_displayname(
UserID.from_string(user_id).localpart
)
if user_display_name is None:
user_display_name = user_id
except StoreError:
user_display_name = user_id
@defer.inlineCallbacks
def _fetch_room_state(room_id):
room_state = yield self.store.get_current_state_ids(room_id)
state_by_room[room_id] = room_state
# Run at most 3 of these at once: sync does 10 at a time but email
# notifs are much less realtime than sync so we can afford to wait a bit.
yield concurrently_execute(_fetch_room_state, rooms_in_order, 3)
# actually sort our so-called rooms_in_order list, most recent room first
rooms_in_order.sort(
key=lambda r: -(notifs_by_room[r][-1]['received_ts'] or 0)
)
rooms = []
for r in rooms_in_order:
roomvars = yield self.get_room_vars(
r, user_id, notifs_by_room[r], notif_events, state_by_room[r]
)
rooms.append(roomvars)
reason['room_name'] = yield calculate_room_name(
self.store, state_by_room[reason['room_id']], user_id,
fallback_to_members=True
)
summary_text = yield self.make_summary_text(
notifs_by_room, state_by_room, notif_events, user_id, reason
)
template_vars = {
"user_display_name": user_display_name,
"unsubscribe_link": self.make_unsubscribe_link(
user_id, app_id, email_address
),
"summary_text": summary_text,
"app_name": self.app_name,
"rooms": rooms,
"reason": reason,
}
html_text = self.notif_template_html.render(**template_vars)
html_part = MIMEText(html_text, "html", "utf8")
plain_text = self.notif_template_text.render(**template_vars)
text_part = MIMEText(plain_text, "plain", "utf8")
multipart_msg = MIMEMultipart('alternative')
multipart_msg['Subject'] = "[%s] %s" % (self.app_name, summary_text)
multipart_msg['From'] = from_string
multipart_msg['To'] = email_address
multipart_msg['Date'] = email.utils.formatdate()
multipart_msg['Message-ID'] = email.utils.make_msgid()
multipart_msg.attach(text_part)
multipart_msg.attach(html_part)
logger.info("Sending email push notification to %s" % email_address)
# logger.debug(html_text)
yield sendmail(
self.hs.config.email_smtp_host,
raw_from, raw_to, multipart_msg.as_string(),
port=self.hs.config.email_smtp_port,
requireAuthentication=self.hs.config.email_smtp_user is not None,
username=self.hs.config.email_smtp_user,
password=self.hs.config.email_smtp_pass,
requireTransportSecurity=self.hs.config.require_transport_security
)
@defer.inlineCallbacks
def get_room_vars(self, room_id, user_id, notifs, notif_events, room_state_ids):
my_member_event_id = room_state_ids[("m.room.member", user_id)]
my_member_event = yield self.store.get_event(my_member_event_id)
is_invite = my_member_event.content["membership"] == "invite"
room_name = yield calculate_room_name(self.store, room_state_ids, user_id)
room_vars = {
"title": room_name,
"hash": string_ordinal_total(room_id), # See sender avatar hash
"notifs": [],
"invite": is_invite,
"link": self.make_room_link(room_id),
}
if not is_invite:
for n in notifs:
notifvars = yield self.get_notif_vars(
n, user_id, notif_events[n['event_id']], room_state_ids
)
# merge overlapping notifs together.
# relies on the notifs being in chronological order.
merge = False
if room_vars['notifs'] and 'messages' in room_vars['notifs'][-1]:
prev_messages = room_vars['notifs'][-1]['messages']
for message in notifvars['messages']:
pm = list(filter(lambda pm: pm['id'] == message['id'],
prev_messages))
if pm:
if not message["is_historical"]:
pm[0]["is_historical"] = False
merge = True
elif merge:
# we're merging, so append any remaining messages
# in this notif to the previous one
prev_messages.append(message)
if not merge:
room_vars['notifs'].append(notifvars)
defer.returnValue(room_vars)
@defer.inlineCallbacks
def get_notif_vars(self, notif, user_id, notif_event, room_state_ids):
results = yield self.store.get_events_around(
notif['room_id'], notif['event_id'],
before_limit=CONTEXT_BEFORE, after_limit=CONTEXT_AFTER
)
ret = {
"link": self.make_notif_link(notif),
"ts": notif['received_ts'],
"messages": [],
}
the_events = yield filter_events_for_client(
self.store, user_id, results["events_before"]
)
the_events.append(notif_event)
for event in the_events:
messagevars = yield self.get_message_vars(notif, event, room_state_ids)
if messagevars is not None:
ret['messages'].append(messagevars)
defer.returnValue(ret)
@defer.inlineCallbacks
def get_message_vars(self, notif, event, room_state_ids):
if event.type != EventTypes.Message:
return
sender_state_event_id = room_state_ids[("m.room.member", event.sender)]
sender_state_event = yield self.store.get_event(sender_state_event_id)
sender_name = name_from_member_event(sender_state_event)
sender_avatar_url = sender_state_event.content.get("avatar_url")
# 'hash' for deterministically picking default images: use
# sender_hash % the number of default images to choose from
sender_hash = string_ordinal_total(event.sender)
msgtype = event.content.get("msgtype")
ret = {
"msgtype": msgtype,
"is_historical": event.event_id != notif['event_id'],
"id": event.event_id,
"ts": event.origin_server_ts,
"sender_name": sender_name,
"sender_avatar_url": sender_avatar_url,
"sender_hash": sender_hash,
}
if msgtype == "m.text":
self.add_text_message_vars(ret, event)
elif msgtype == "m.image":
self.add_image_message_vars(ret, event)
if "body" in event.content:
ret["body_text_plain"] = event.content["body"]
defer.returnValue(ret)
def add_text_message_vars(self, messagevars, event):
msgformat = event.content.get("format")
messagevars["format"] = msgformat
formatted_body = event.content.get("formatted_body")
body = event.content.get("body")
if msgformat == "org.matrix.custom.html" and formatted_body:
messagevars["body_text_html"] = safe_markup(formatted_body)
elif body:
messagevars["body_text_html"] = safe_text(body)
return messagevars
def add_image_message_vars(self, messagevars, event):
messagevars["image_url"] = event.content["url"]
return messagevars
@defer.inlineCallbacks
def make_summary_text(self, notifs_by_room, room_state_ids,
notif_events, user_id, reason):
if len(notifs_by_room) == 1:
# Only one room has new stuff
room_id = notifs_by_room.keys()[0]
# If the room has some kind of name, use it, but we don't
# want the generated-from-names one here otherwise we'll
# end up with, "new message from Bob in the Bob room"
room_name = yield calculate_room_name(
self.store, room_state_ids[room_id], user_id, fallback_to_members=False
)
my_member_event_id = room_state_ids[room_id][("m.room.member", user_id)]
my_member_event = yield self.store.get_event(my_member_event_id)
if my_member_event.content["membership"] == "invite":
inviter_member_event_id = room_state_ids[room_id][
("m.room.member", my_member_event.sender)
]
inviter_member_event = yield self.store.get_event(
inviter_member_event_id
)
inviter_name = name_from_member_event(inviter_member_event)
if room_name is None:
defer.returnValue(INVITE_FROM_PERSON % {
"person": inviter_name,
"app": self.app_name
})
else:
defer.returnValue(INVITE_FROM_PERSON_TO_ROOM % {
"person": inviter_name,
"room": room_name,
"app": self.app_name,
})
sender_name = None
if len(notifs_by_room[room_id]) == 1:
# There is just the one notification, so give some detail
event = notif_events[notifs_by_room[room_id][0]["event_id"]]
if ("m.room.member", event.sender) in room_state_ids[room_id]:
state_event_id = room_state_ids[room_id][
("m.room.member", event.sender)
]
state_event = yield self.store.get_event(state_event_id)
sender_name = name_from_member_event(state_event)
if sender_name is not None and room_name is not None:
defer.returnValue(MESSAGE_FROM_PERSON_IN_ROOM % {
"person": sender_name,
"room": room_name,
"app": self.app_name,
})
elif sender_name is not None:
defer.returnValue(MESSAGE_FROM_PERSON % {
"person": sender_name,
"app": self.app_name,
})
else:
# There's more than one notification for this room, so just
# say there are several
if room_name is not None:
defer.returnValue(MESSAGES_IN_ROOM % {
"room": room_name,
"app": self.app_name,
})
else:
# If the room doesn't have a name, say who the messages
# are from explicitly to avoid, "messages in the Bob room"
sender_ids = list(set([
notif_events[n['event_id']].sender
for n in notifs_by_room[room_id]
]))
member_events = yield self.store.get_events([
room_state_ids[room_id][("m.room.member", s)]
for s in sender_ids
])
defer.returnValue(MESSAGES_FROM_PERSON % {
"person": descriptor_from_member_events(member_events.values()),
"app": self.app_name,
})
else:
# Stuff's happened in multiple different rooms
# ...but we still refer to the 'reason' room which triggered the mail
if reason['room_name'] is not None:
defer.returnValue(MESSAGES_IN_ROOM_AND_OTHERS % {
"room": reason['room_name'],
"app": self.app_name,
})
else:
# If the reason room doesn't have a name, say who the messages
# are from explicitly to avoid, "messages in the Bob room"
sender_ids = list(set([
notif_events[n['event_id']].sender
for n in notifs_by_room[reason['room_id']]
]))
member_events = yield self.store.get_events([
room_state_ids[room_id][("m.room.member", s)]
for s in sender_ids
])
defer.returnValue(MESSAGES_FROM_PERSON_AND_OTHERS % {
"person": descriptor_from_member_events(member_events.values()),
"app": self.app_name,
})
def make_room_link(self, room_id):
if self.hs.config.email_riot_base_url:
base_url = "%s/#/room" % (self.hs.config.email_riot_base_url)
elif self.app_name == "Vector":
# need /beta for Universal Links to work on iOS
base_url = "https://vector.im/beta/#/room"
else:
base_url = "https://matrix.to/#"
return "%s/%s" % (base_url, room_id)
def make_notif_link(self, notif):
if self.hs.config.email_riot_base_url:
return "%s/#/room/%s/%s" % (
self.hs.config.email_riot_base_url,
notif['room_id'], notif['event_id']
)
elif self.app_name == "Vector":
# need /beta for Universal Links to work on iOS
return "https://vector.im/beta/#/room/%s/%s" % (
notif['room_id'], notif['event_id']
)
else:
return "https://matrix.to/#/%s/%s" % (
notif['room_id'], notif['event_id']
)
def make_unsubscribe_link(self, user_id, app_id, email_address):
params = {
"access_token": self.macaroon_gen.generate_delete_pusher_token(user_id),
"app_id": app_id,
"pushkey": email_address,
}
# XXX: make r0 once API is stable
return "%s_matrix/client/unstable/pushers/remove?%s" % (
self.hs.config.public_baseurl,
urllib.parse.urlencode(params),
)
def safe_markup(raw_html):
return jinja2.Markup(bleach.linkify(bleach.clean(
raw_html, tags=ALLOWED_TAGS, attributes=ALLOWED_ATTRS,
# bleach master has this, but it isn't released yet
# protocols=ALLOWED_SCHEMES,
strip=True
)))
def safe_text(raw_text):
"""
Process text: treat it as HTML but escape any tags (ie. just escape the
HTML) then linkify it.
"""
return jinja2.Markup(bleach.linkify(bleach.clean(
raw_text, tags=[], attributes={},
strip=False
)))
def deduped_ordered_list(l):
seen = set()
ret = []
for item in l:
if item not in seen:
seen.add(item)
ret.append(item)
return ret
def string_ordinal_total(s):
tot = 0
for c in s:
tot += ord(c)
return tot
def format_ts_filter(value, format):
return time.strftime(format, time.localtime(value / 1000))
def load_jinja2_templates(config):
"""Load the jinja2 email templates from disk
Returns:
(notif_template_html, notif_template_text)
"""
logger.info("loading jinja2")
loader = jinja2.FileSystemLoader(config.email_template_dir)
env = jinja2.Environment(loader=loader)
env.filters["format_ts"] = format_ts_filter
env.filters["mxc_to_http"] = _create_mxc_to_http_filter(config)
notif_template_html = env.get_template(
config.email_notif_template_html
)
notif_template_text = env.get_template(
config.email_notif_template_text
)
return notif_template_html, notif_template_text
def _create_mxc_to_http_filter(config):
def mxc_to_http_filter(value, width, height, resize_method="crop"):
if value[0:6] != "mxc://":
return ""
serverAndMediaId = value[6:]
fragment = None
if '#' in serverAndMediaId:
(serverAndMediaId, fragment) = serverAndMediaId.split('#', 1)
fragment = "#" + fragment
params = {
"width": width,
"height": height,
"method": resize_method,
}
return "%s_matrix/media/v1/thumbnail/%s?%s%s" % (
config.public_baseurl,
serverAndMediaId,
urllib.parse.urlencode(params),
fragment or "",
)
return mxc_to_http_filter
|
the-stack_0_16185 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from scipy.io import wavfile
import tensorflow.compat.v2 as tf
from tensorflow_examples.lite.model_maker.core.data_util import audio_dataloader
from tensorflow_examples.lite.model_maker.core.task.model_spec import audio_spec
def write_file(root, filepath):
full_path = os.path.join(root, filepath)
os.makedirs(os.path.dirname(full_path), exist_ok=True)
with open(full_path, 'w') as f:
f.write('<content>')
def write_sample(root,
category,
file_name,
sample_rate,
duration_sec,
value,
dtype=np.int16):
os.makedirs(os.path.join(root, category), exist_ok=True)
xs = value * np.ones(shape=(int(sample_rate * duration_sec),), dtype=dtype)
wavfile.write(os.path.join(root, category, file_name), sample_rate, xs)
class MockSpec(audio_spec.BaseSpec):
def create_model(self):
return None
def run_classifier(self, *args, **kwargs):
return None
class Base(tf.test.TestCase):
def _get_folder_path(self, sub_folder_name):
folder_path = os.path.join(self.get_temp_dir(), sub_folder_name)
if os.path.exists(folder_path):
return
tf.compat.v1.logging.info('Test path: %s', folder_path)
os.mkdir(folder_path)
return folder_path
class LoadFromESC50Test(Base):
def test_spec(self):
folder_path = self._get_folder_path('test_examples_helper')
spec = audio_spec.YAMNetSpec()
audio_dataloader.DataLoader.from_esc50(spec, folder_path)
spec = audio_spec.BrowserFFTSpec()
with self.assertRaises(AssertionError):
audio_dataloader.DataLoader.from_esc50(spec, folder_path)
class LoadFromFolderTest(Base):
def test_spec(self):
folder_path = self._get_folder_path('test_examples_helper')
write_sample(folder_path, 'unknown', '2s.wav', 44100, 2, value=1)
spec = audio_spec.YAMNetSpec()
with self.assertRaises(AssertionError):
audio_dataloader.DataLoader.from_folder(spec, folder_path)
spec = audio_spec.BrowserFFTSpec()
audio_dataloader.DataLoader.from_folder(spec, folder_path)
def test_examples_helper(self):
root = self._get_folder_path('test_examples_helper')
write_file(root, 'a/1.wav')
write_file(root, 'a/2.wav')
write_file(root, 'b/1.wav')
write_file(root, 'b/README') # Ignored
write_file(root, 'a/b/c/d.wav') # Ignored
write_file(root, 'AUTHORS.md') # Ignored
write_file(root, 'temp.wav') # Ignored
def is_wav_files(name):
return name.endswith('.wav')
def fullpath(name):
return os.path.join(root, name)
helper = audio_dataloader.ExamplesHelper(root, is_wav_files)
self.assertEqual(helper.sorted_cateogries, ['a', 'b'])
self.assertEqual(
helper.examples_and_labels(),
([fullpath('a/1.wav'),
fullpath('a/2.wav'),
fullpath('b/1.wav')], ['a', 'a', 'b']))
self.assertEqual(
helper.examples_and_label_indices(),
([fullpath('a/1.wav'),
fullpath('a/2.wav'),
fullpath('b/1.wav')], [0, 0, 1]))
def test_no_audio_files_found(self):
folder_path = self._get_folder_path('test_no_audio_files_found')
write_sample(folder_path, 'unknown', '2s.bak', 44100, 2, value=1)
with self.assertRaisesRegexp(ValueError, 'No audio files found'):
spec = MockSpec(model_dir=folder_path)
audio_dataloader.DataLoader.from_folder(spec, folder_path)
def test_check_encoding(self):
folder_path = self._get_folder_path('test_check_encoding')
write_sample(
folder_path, 'unknown', '2s.wav', 44100, 2, value=0, dtype=np.uint8)
with self.assertRaisesRegexp(ValueError, '16 bit PCM'):
spec = MockSpec(model_dir=folder_path)
audio_dataloader.DataLoader.from_folder(spec, folder_path)
def test_from_folder(self):
folder_path = self._get_folder_path('test_from_folder')
write_sample(folder_path, 'background', '2s.wav', 44100, 2, value=0)
write_sample(folder_path, 'command1', '1s.wav', 44100, 1, value=1)
# Too short, skipped.
write_sample(folder_path, 'command1', '0.1s.wav', 44100, .1, value=2)
# Not long enough for 2 files, the remaining .5s will be skipped.
write_sample(folder_path, 'command2', '1.5s.wav', 44100, 1.5, value=3)
# Skipped, too short.
write_sample(folder_path, 'command0', '0.1s.wav', 4410, .1, value=4)
# Resampled, after resample, the content becomes [4 5 5 ... 4 5 4]
write_sample(folder_path, 'command0', '1.8s.wav', 4410, 1.8, value=5)
# Ignored due to wrong file extension
write_sample(folder_path, 'command0', '1.8s.bak', 4410, 1.8, value=6)
spec = MockSpec(model_dir=folder_path)
loader = audio_dataloader.DataLoader.from_folder(spec, folder_path)
self.assertEqual(len(loader), 5)
self.assertEqual(loader.index_to_label,
['background', 'command0', 'command1', 'command2'])
def is_cached(filename):
path = os.path.join(folder_path, 'cache', filename)
self.assertTrue(tf.io.gfile.exists(path))
sampling_rate, _ = wavfile.read(path)
self.assertEqual(sampling_rate, 44100)
is_cached('background/2s_0.wav')
is_cached('background/2s_1.wav')
is_cached('command1/1s_0.wav')
is_cached('command2/1.5s_0.wav')
is_cached('command0/1.8s_0.wav')
# Consistent dataset.
consistent_loader = audio_dataloader.DataLoader.from_folder(
spec, folder_path, shuffle=False)
expected_labels = iter(
['background', 'background', 'command0', 'command1', 'command2'])
expected_values = iter([0., 0., 4., 1., 3.])
for feature, label_idx in consistent_loader.gen_dataset().unbatch():
self.assertEqual(consistent_loader.index_to_label[label_idx],
next(expected_labels))
self.assertEqual(feature.shape, (1, spec.expected_waveform_len))
self.assertEqual(feature.dtype, tf.float32)
# tf.audio.decode_wav op scales the int16 PCM to float value between -1
# and 1 so the multiplier is 1 << 15
# Check tensorflow/core/lib/wav/wav_io.cc for the implementation.
self.assertNear(feature[0][0] * (1 << 15), next(expected_values), 1e-4)
if __name__ == '__main__':
tf.test.main()
|
the-stack_0_16188 | # coding=utf-8
# Copyright 2021-present, the Recognai S.L. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import pytest
import rubrix as rb
from rubrix.labeling.text_classification import find_label_errors
from rubrix.labeling.text_classification.label_errors import (
MissingPredictionError,
NoRecordsError,
SortBy,
_construct_s_and_psx,
)
from tests.server.test_helpers import client, mocking_client
@pytest.fixture(
params=[False, True], ids=["single_label", "multi_label"], scope="module"
)
def records(request):
if request.param:
return [
rb.TextClassificationRecord(
inputs="test", annotation=anot, prediction=pred, multi_label=True, id=i
)
for i, anot, pred in zip(
range(2 * 6),
[["bad"], ["bad", "good"]] * 6,
[[("bad", 0.1), ("good", 0.9)], [("good", 0.9), ("bad", 0.01)]] * 6,
)
]
return [
rb.TextClassificationRecord(
inputs="test", annotation=anot, prediction=pred, id=i
)
for i, anot, pred in zip(
range(2 * 6),
["good", "bad"] * 6,
[[("bad", 0.9), ("good", 0.1)], [("good", 0.8), ("bad", 0.2)]] * 6,
)
]
def test_sort_by_enum():
with pytest.raises(ValueError, match="mock is not a valid SortBy"):
SortBy("mock")
def test_not_installed(monkeypatch):
monkeypatch.setitem(sys.modules, "cleanlab", None)
with pytest.raises(ModuleNotFoundError, match="pip install cleanlab"):
find_label_errors(None)
def test_no_records():
records = [
rb.TextClassificationRecord(inputs="test", prediction=[("mock", 0.0)]),
rb.TextClassificationRecord(inputs="test", annotation="test"),
]
with pytest.raises(
NoRecordsError, match="none of your records have a prediction AND annotation"
):
find_label_errors(records)
def test_multi_label_warning(caplog):
record = rb.TextClassificationRecord(
inputs="test", prediction=[("mock", 0.0)], annotation="mock"
)
find_label_errors([record], multi_label="True")
assert (
"You provided the kwarg 'multi_label', but it is determined automatically"
in caplog.text
)
@pytest.mark.parametrize(
"sort_by,expected",
[
("likelihood", "normalized_margin"),
("prediction", "prob_given_label"),
("none", None),
],
)
def test_sort_by(monkeypatch, sort_by, expected):
def mock_get_noise_indices(*args, **kwargs):
assert kwargs["sorted_index_method"] == expected
return []
monkeypatch.setattr(
"cleanlab.pruning.get_noise_indices",
mock_get_noise_indices,
)
record = rb.TextClassificationRecord(
inputs="mock", prediction=[("mock", 0.1)], annotation="mock"
)
find_label_errors(records=[record], sort_by=sort_by)
def test_kwargs(monkeypatch, records):
is_multi_label = records[0].multi_label
def mock_get_noise_indices(s, psx, **kwargs):
assert kwargs == {
"multi_label": is_multi_label,
"sorted_index_method": "normalized_margin",
"mock": "mock",
}
return []
monkeypatch.setattr(
"cleanlab.pruning.get_noise_indices",
mock_get_noise_indices,
)
with pytest.raises(
ValueError, match="'sorted_index_method' kwarg is not supported"
):
find_label_errors(records=records, sorted_index_method="mock")
find_label_errors(records=records, mock="mock")
def test_construct_s_and_psx(records):
import numpy as np
s, psx = _construct_s_and_psx(records[:2])
if records[0].multi_label:
s_expected = np.array(
[
list([0]),
list([0, 1]),
]
)
psx_expected = np.array(
[
[0.1, 0.9],
[0.01, 0.9],
]
)
else:
s_expected = np.array([1, 0])
psx_expected = np.array(
[
[0.9, 0.1],
[0.2, 0.8],
]
)
assert (s == s_expected).all()
assert (psx == psx_expected).all()
def test_missing_predictions():
records = [
rb.TextClassificationRecord(
inputs="test", annotation="mock", prediction=[("mock2", 0.1)]
)
]
with pytest.raises(
MissingPredictionError,
match="It seems predictions are missing for the label 'mock'",
):
_construct_s_and_psx(records)
records.append(
rb.TextClassificationRecord(
inputs="test", annotation="mock", prediction=[("mock", 0.1)]
)
)
with pytest.raises(
MissingPredictionError,
match="It seems a prediction for 'mock' is missing in the following record",
):
_construct_s_and_psx(records)
@pytest.fixture
def dataset(monkeypatch, records):
mocking_client(monkeypatch, client)
dataset = "dataset_for_label_errors"
rb.log(records, name=dataset)
yield dataset
rb.delete(dataset)
def test_find_label_errors_integration(dataset):
records = rb.load(dataset, as_pandas=False)
recs = find_label_errors(records)
assert [rec.id for rec in recs] == list(range(0, 11, 2)) + list(range(1, 12, 2))
|
the-stack_0_16190 | import asyncio
import importlib
import logging
import os
import sys
import threading
import traceback
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
from concurrent.futures.process import BrokenProcessPool
from numbers import Number
from operator import add
from time import sleep
from unittest import mock
import psutil
import pytest
from tlz import first, pluck, sliding_window
import dask
from dask import delayed
from dask.system import CPU_COUNT
from dask.utils import tmpfile
import distributed
from distributed import (
Client,
Nanny,
Reschedule,
default_client,
get_client,
get_worker,
wait,
)
from distributed.comm.registry import backends
from distributed.comm.tcp import TCPBackend
from distributed.compatibility import LINUX, WINDOWS
from distributed.core import CommClosedError, Status, rpc
from distributed.diagnostics import nvml
from distributed.diagnostics.plugin import PipInstall
from distributed.metrics import time
from distributed.scheduler import Scheduler
from distributed.utils import TimeoutError
from distributed.utils_test import (
TaskStateMetadataPlugin,
_LockedCommPool,
captured_logger,
dec,
div,
gen_cluster,
gen_test,
inc,
mul,
nodebug,
slowinc,
slowsum,
)
from distributed.worker import Worker, error_message, logger, parse_memory_limit
pytestmark = pytest.mark.ci1
@gen_cluster(nthreads=[])
async def test_worker_nthreads(s):
async with Worker(s.address) as w:
assert w.executor._max_workers == CPU_COUNT
@gen_cluster()
async def test_str(s, a, b):
assert a.address in str(a)
assert a.address in repr(a)
assert str(a.nthreads) in str(a)
assert str(a.nthreads) in repr(a)
assert str(a.executing_count) in repr(a)
@gen_cluster(nthreads=[])
async def test_identity(s):
async with Worker(s.address) as w:
ident = w.identity(None)
assert "Worker" in ident["type"]
assert ident["scheduler"] == s.address
assert isinstance(ident["nthreads"], int)
assert isinstance(ident["memory_limit"], Number)
@gen_cluster(client=True)
async def test_worker_bad_args(c, s, a, b):
class NoReprObj:
"""This object cannot be properly represented as a string."""
def __str__(self):
raise ValueError("I have no str representation.")
def __repr__(self):
raise ValueError("I have no repr representation.")
x = c.submit(NoReprObj, workers=a.address)
await wait(x)
assert not a.executing_count
assert a.data
def bad_func(*args, **kwargs):
1 / 0
class MockLoggingHandler(logging.Handler):
"""Mock logging handler to check for expected logs."""
def __init__(self, *args, **kwargs):
self.reset()
super().__init__(*args, **kwargs)
def emit(self, record):
self.messages[record.levelname.lower()].append(record.getMessage())
def reset(self):
self.messages = {
"debug": [],
"info": [],
"warning": [],
"error": [],
"critical": [],
}
hdlr = MockLoggingHandler()
old_level = logger.level
logger.setLevel(logging.DEBUG)
logger.addHandler(hdlr)
y = c.submit(bad_func, x, k=x, workers=b.address)
await wait(y)
assert not b.executing_count
assert y.status == "error"
# Make sure job died because of bad func and not because of bad
# argument.
with pytest.raises(ZeroDivisionError):
await y
tb = await y._traceback()
assert any("1 / 0" in line for line in pluck(3, traceback.extract_tb(tb)) if line)
assert "Compute Failed" in hdlr.messages["warning"][0]
logger.setLevel(old_level)
# Now we check that both workers are still alive.
xx = c.submit(add, 1, 2, workers=a.address)
yy = c.submit(add, 3, 4, workers=b.address)
results = await c._gather([xx, yy])
assert tuple(results) == (3, 7)
@gen_cluster(client=True)
async def test_upload_file(c, s, a, b):
assert not os.path.exists(os.path.join(a.local_directory, "foobar.py"))
assert not os.path.exists(os.path.join(b.local_directory, "foobar.py"))
assert a.local_directory != b.local_directory
with rpc(a.address) as aa, rpc(b.address) as bb:
await asyncio.gather(
aa.upload_file(filename="foobar.py", data=b"x = 123"),
bb.upload_file(filename="foobar.py", data="x = 123"),
)
assert os.path.exists(os.path.join(a.local_directory, "foobar.py"))
assert os.path.exists(os.path.join(b.local_directory, "foobar.py"))
def g():
import foobar
return foobar.x
future = c.submit(g, workers=a.address)
result = await future
assert result == 123
await c.close()
await s.close(close_workers=True)
assert not os.path.exists(os.path.join(a.local_directory, "foobar.py"))
@pytest.mark.skip(reason="don't yet support uploading pyc files")
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)])
async def test_upload_file_pyc(c, s, w):
with tmpfile() as dirname:
os.mkdir(dirname)
with open(os.path.join(dirname, "foo.py"), mode="w") as f:
f.write("def f():\n return 123")
sys.path.append(dirname)
try:
import foo
assert foo.f() == 123
pyc = importlib.util.cache_from_source(os.path.join(dirname, "foo.py"))
assert os.path.exists(pyc)
await c.upload_file(pyc)
def g():
import foo
return foo.x
future = c.submit(g)
result = await future
assert result == 123
finally:
sys.path.remove(dirname)
@gen_cluster(client=True)
async def test_upload_egg(c, s, a, b):
eggname = "testegg-1.0.0-py3.4.egg"
local_file = __file__.replace("test_worker.py", eggname)
assert not os.path.exists(os.path.join(a.local_directory, eggname))
assert not os.path.exists(os.path.join(b.local_directory, eggname))
assert a.local_directory != b.local_directory
await c.upload_file(filename=local_file)
assert os.path.exists(os.path.join(a.local_directory, eggname))
assert os.path.exists(os.path.join(b.local_directory, eggname))
def g(x):
import testegg
return testegg.inc(x)
future = c.submit(g, 10, workers=a.address)
result = await future
assert result == 10 + 1
await c.close()
await s.close()
await a.close()
await b.close()
assert not os.path.exists(os.path.join(a.local_directory, eggname))
@gen_cluster(client=True)
async def test_upload_pyz(c, s, a, b):
pyzname = "mytest.pyz"
local_file = __file__.replace("test_worker.py", pyzname)
assert not os.path.exists(os.path.join(a.local_directory, pyzname))
assert not os.path.exists(os.path.join(b.local_directory, pyzname))
assert a.local_directory != b.local_directory
await c.upload_file(filename=local_file)
assert os.path.exists(os.path.join(a.local_directory, pyzname))
assert os.path.exists(os.path.join(b.local_directory, pyzname))
def g(x):
from mytest import mytest
return mytest.inc(x)
future = c.submit(g, 10, workers=a.address)
result = await future
assert result == 10 + 1
await c.close()
await s.close()
await a.close()
await b.close()
assert not os.path.exists(os.path.join(a.local_directory, pyzname))
@pytest.mark.xfail(reason="Still lose time to network I/O")
@gen_cluster(client=True)
async def test_upload_large_file(c, s, a, b):
pytest.importorskip("crick")
await asyncio.sleep(0.05)
with rpc(a.address) as aa:
await aa.upload_file(filename="myfile.dat", data=b"0" * 100000000)
await asyncio.sleep(0.05)
assert a.digests["tick-duration"].components[0].max() < 0.050
@gen_cluster()
async def test_broadcast(s, a, b):
with rpc(s.address) as cc:
results = await cc.broadcast(msg={"op": "ping"})
assert results == {a.address: b"pong", b.address: b"pong"}
@gen_cluster(nthreads=[])
async def test_worker_with_port_zero(s):
async with Worker(s.address) as w:
assert isinstance(w.port, int)
assert w.port > 1024
@gen_cluster(nthreads=[])
async def test_worker_port_range(s):
port = "9867:9868"
async with Worker(s.address, port=port) as w1:
assert w1.port == 9867 # Selects first port in range
async with Worker(s.address, port=port) as w2:
assert w2.port == 9868 # Selects next port in range
with pytest.raises(
ValueError, match="Could not start Worker"
): # No more ports left
async with Worker(s.address, port=port):
pass
@pytest.mark.slow
@gen_test(timeout=60)
async def test_worker_waits_for_scheduler():
w = Worker("127.0.0.1:8724")
try:
await asyncio.wait_for(w, 3)
except TimeoutError:
pass
else:
assert False
assert w.status not in (Status.closed, Status.running, Status.paused)
await w.close(timeout=0.1)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)])
async def test_worker_task_data(c, s, w):
x = delayed(2)
xx = c.persist(x)
await wait(xx)
assert w.data[x.key] == 2
def test_error_message():
class MyException(Exception):
def __init__(self, a, b):
self.args = (a + b,)
def __str__(self):
return "MyException(%s)" % self.args
msg = error_message(MyException("Hello", "World!"))
assert "Hello" in str(msg["exception"])
max_error_len = 100
with dask.config.set({"distributed.admin.max-error-length": max_error_len}):
msg = error_message(RuntimeError("-" * max_error_len))
assert len(msg["exception_text"]) <= max_error_len + 30
assert len(msg["exception_text"]) < max_error_len * 2
msg = error_message(RuntimeError("-" * max_error_len * 20))
max_error_len = 1000000
with dask.config.set({"distributed.admin.max-error-length": max_error_len}):
msg = error_message(RuntimeError("-" * max_error_len * 2))
assert len(msg["exception_text"]) > 10100 # default + 100
@gen_cluster(client=True)
async def test_chained_error_message(c, s, a, b):
def chained_exception_fn():
class MyException(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return "MyException(%s)" % self.msg
exception = MyException("Foo")
inner_exception = MyException("Bar")
try:
raise inner_exception
except Exception as e:
raise exception from e
f = c.submit(chained_exception_fn)
try:
await f
except Exception as e:
assert e.__cause__ is not None
assert "Bar" in str(e.__cause__)
@gen_cluster(client=True)
async def test_gather(c, s, a, b):
x, y = await c.scatter(["x", "y"], workers=[b.address])
with rpc(a.address) as aa:
resp = await aa.gather(who_has={x.key: [b.address], y.key: [b.address]})
assert resp == {"status": "OK"}
assert a.data[x.key] == b.data[x.key] == "x"
assert a.data[y.key] == b.data[y.key] == "y"
@gen_cluster(client=True)
async def test_gather_missing_keys(c, s, a, b):
"""A key is missing. Other keys are gathered successfully."""
x = await c.scatter("x", workers=[b.address])
with rpc(a.address) as aa:
resp = await aa.gather(who_has={x.key: [b.address], "y": [b.address]})
assert resp == {"status": "partial-fail", "keys": {"y": (b.address,)}}
assert a.data[x.key] == b.data[x.key] == "x"
@gen_cluster(client=True, worker_kwargs={"timeout": "100ms"})
async def test_gather_missing_workers(c, s, a, b):
"""A worker owning the only copy of a key is missing.
Keys from other workers are gathered successfully.
"""
assert b.address.startswith("tcp://127.0.0.1:")
bad_addr = "tcp://127.0.0.1:12345"
x = await c.scatter("x", workers=[b.address])
with rpc(a.address) as aa:
resp = await aa.gather(who_has={x.key: [b.address], "y": [bad_addr]})
assert resp == {"status": "partial-fail", "keys": {"y": (bad_addr,)}}
assert a.data[x.key] == b.data[x.key] == "x"
@pytest.mark.parametrize("missing_first", [False, True])
@gen_cluster(client=True, worker_kwargs={"timeout": "100ms"})
async def test_gather_missing_workers_replicated(c, s, a, b, missing_first):
"""A worker owning a redundant copy of a key is missing.
The key is successfully gathered from other workers.
"""
assert b.address.startswith("tcp://127.0.0.1:")
x = await c.scatter("x", workers=[b.address])
bad_addr = "tcp://127.0.0.1:12345"
# Order matters! Test both
addrs = [bad_addr, b.address] if missing_first else [b.address, bad_addr]
with rpc(a.address) as aa:
resp = await aa.gather(who_has={x.key: addrs})
assert resp == {"status": "OK"}
assert a.data[x.key] == b.data[x.key] == "x"
@gen_cluster(nthreads=[])
async def test_io_loop(s):
async with Worker(s.address, loop=s.loop) as w:
assert w.io_loop is s.loop
@gen_cluster(client=True, nthreads=[])
async def test_spill_to_disk(c, s):
np = pytest.importorskip("numpy")
w = await Worker(
s.address,
loop=s.loop,
memory_limit=1200 / 0.6,
memory_pause_fraction=None,
memory_spill_fraction=None,
)
x = c.submit(np.random.randint, 0, 255, size=500, dtype="u1", key="x")
await wait(x)
y = c.submit(np.random.randint, 0, 255, size=500, dtype="u1", key="y")
await wait(y)
assert set(w.data) == {x.key, y.key}
assert set(w.data.memory) == {x.key, y.key}
z = c.submit(np.random.randint, 0, 255, size=500, dtype="u1", key="z")
await wait(z)
assert set(w.data) == {x.key, y.key, z.key}
assert set(w.data.memory) == {y.key, z.key}
assert set(w.data.disk) == {x.key}
await x
assert set(w.data.memory) == {x.key, z.key}
assert set(w.data.disk) == {y.key}
await w.close()
@gen_cluster(client=True)
async def test_access_key(c, s, a, b):
def f(i):
from distributed.worker import thread_state
return thread_state.key
futures = [c.submit(f, i, key="x-%d" % i) for i in range(20)]
results = await c._gather(futures)
assert list(results) == ["x-%d" % i for i in range(20)]
@gen_cluster(client=True)
async def test_run_dask_worker(c, s, a, b):
def f(dask_worker=None):
return dask_worker.id
response = await c._run(f)
assert response == {a.address: a.id, b.address: b.id}
@gen_cluster(client=True)
async def test_run_coroutine_dask_worker(c, s, a, b):
async def f(dask_worker=None):
await asyncio.sleep(0.001)
return dask_worker.id
response = await c.run(f)
assert response == {a.address: a.id, b.address: b.id}
@gen_cluster(client=True, nthreads=[])
async def test_Executor(c, s):
with ThreadPoolExecutor(2) as e:
async with Worker(s.address, executor=e) as w:
assert w.executor is e
future = c.submit(inc, 1)
result = await future
assert result == 2
assert e._threads # had to do some work
@pytest.mark.skip(
reason="Other tests leak memory, so process-level checks trigger immediately"
)
@gen_cluster(
client=True,
nthreads=[("127.0.0.1", 1)],
timeout=30,
worker_kwargs={"memory_limit": 10e6},
)
async def test_spill_by_default(c, s, w):
da = pytest.importorskip("dask.array")
x = da.ones(int(10e6 * 0.7), chunks=1e6, dtype="u1")
y = c.persist(x)
await wait(y)
assert len(w.data.disk) # something is on disk
@gen_cluster(nthreads=[("127.0.0.1", 1)], worker_kwargs={"reconnect": False})
async def test_close_on_disconnect(s, w):
await s.close()
start = time()
while w.status != Status.closed:
await asyncio.sleep(0.01)
assert time() < start + 5
@gen_cluster(nthreads=[])
async def test_memory_limit_auto(s):
async with Worker(s.address, nthreads=1) as a, Worker(
s.address, nthreads=2
) as b, Worker(s.address, nthreads=100) as c, Worker(s.address, nthreads=200) as d:
assert isinstance(a.memory_limit, Number)
assert isinstance(b.memory_limit, Number)
if CPU_COUNT > 1:
assert a.memory_limit < b.memory_limit
assert c.memory_limit == d.memory_limit
@gen_cluster(client=True)
async def test_inter_worker_communication(c, s, a, b):
[x, y] = await c._scatter([1, 2], workers=a.address)
future = c.submit(add, x, y, workers=b.address)
result = await future
assert result == 3
@gen_cluster(client=True)
async def test_clean(c, s, a, b):
x = c.submit(inc, 1, workers=a.address)
y = c.submit(inc, x, workers=b.address)
await y
collections = [
a.tasks,
a.data,
a.threads,
]
for c in collections:
assert c
x.release()
y.release()
while x.key in a.tasks:
await asyncio.sleep(0.01)
for c in collections:
assert not c
@gen_cluster(client=True)
async def test_message_breakup(c, s, a, b):
n = 100000
a.target_message_size = 10 * n
b.target_message_size = 10 * n
xs = [c.submit(mul, b"%d" % i, n, workers=a.address) for i in range(30)]
y = c.submit(lambda *args: None, xs, workers=b.address)
await y
assert 2 <= len(b.incoming_transfer_log) <= 20
assert 2 <= len(a.outgoing_transfer_log) <= 20
assert all(msg["who"] == b.address for msg in a.outgoing_transfer_log)
assert all(msg["who"] == a.address for msg in a.incoming_transfer_log)
@gen_cluster(client=True)
async def test_types(c, s, a, b):
assert all(ts.type is None for ts in a.tasks.values())
assert all(ts.type is None for ts in b.tasks.values())
x = c.submit(inc, 1, workers=a.address)
await wait(x)
assert a.tasks[x.key].type == int
y = c.submit(inc, x, workers=b.address)
await wait(y)
assert b.tasks[x.key].type == int
assert b.tasks[y.key].type == int
await c._cancel(y)
start = time()
while y.key in b.data:
await asyncio.sleep(0.01)
assert time() < start + 5
assert y.key not in b.tasks
@gen_cluster()
async def test_system_monitor(s, a, b):
assert b.monitor
b.monitor.update()
@gen_cluster(
client=True, nthreads=[("127.0.0.1", 2, {"resources": {"A": 1}}), ("127.0.0.1", 1)]
)
async def test_restrictions(c, s, a, b):
# Worker has resource available
assert a.available_resources == {"A": 1}
# Resource restrictions
x = c.submit(inc, 1, resources={"A": 1})
await x
ts = a.tasks[x.key]
assert ts.resource_restrictions == {"A": 1}
await c._cancel(x)
while ts.state != "memory":
# Resource should be unavailable while task isn't finished
assert a.available_resources == {"A": 0}
await asyncio.sleep(0.01)
# Resource restored after task is in memory
assert a.available_resources["A"] == 1
@gen_cluster(client=True)
async def test_clean_nbytes(c, s, a, b):
L = [delayed(inc)(i) for i in range(10)]
for i in range(5):
L = [delayed(add)(x, y) for x, y in sliding_window(2, L)]
total = delayed(sum)(L)
future = c.compute(total)
await wait(future)
await asyncio.sleep(1)
assert (
len(list(filter(None, [ts.nbytes for ts in a.tasks.values()])))
+ len(list(filter(None, [ts.nbytes for ts in b.tasks.values()])))
== 1
)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 20)
async def test_gather_many_small(c, s, a, *workers):
"""If the dependencies of a given task are very small, do not limit the
number of concurrent outgoing connections
"""
a.total_out_connections = 2
futures = await c._scatter(list(range(100)))
assert all(w.data for w in workers)
def f(*args):
return 10
future = c.submit(f, *futures, workers=a.address)
await wait(future)
types = list(pluck(0, a.log))
req = [i for i, t in enumerate(types) if t == "request-dep"]
recv = [i for i, t in enumerate(types) if t == "receive-dep"]
assert min(recv) > max(req)
assert a.comm_nbytes == 0
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 3)
async def test_multiple_transfers(c, s, w1, w2, w3):
x = c.submit(inc, 1, workers=w1.address)
y = c.submit(inc, 2, workers=w2.address)
z = c.submit(add, x, y, workers=w3.address)
await wait(z)
r = w3.tasks[z.key].startstops
transfers = [t for t in r if t["action"] == "transfer"]
assert len(transfers) == 2
@pytest.mark.xfail(reason="very high flakiness")
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 3)
async def test_share_communication(c, s, w1, w2, w3):
x = c.submit(mul, b"1", int(w3.target_message_size + 1), workers=w1.address)
y = c.submit(mul, b"2", int(w3.target_message_size + 1), workers=w2.address)
await wait([x, y])
await c._replicate([x, y], workers=[w1.address, w2.address])
z = c.submit(add, x, y, workers=w3.address)
await wait(z)
assert len(w3.incoming_transfer_log) == 2
assert w1.outgoing_transfer_log
assert w2.outgoing_transfer_log
@pytest.mark.xfail(reason="very high flakiness")
@gen_cluster(client=True)
async def test_dont_overlap_communications_to_same_worker(c, s, a, b):
x = c.submit(mul, b"1", int(b.target_message_size + 1), workers=a.address)
y = c.submit(mul, b"2", int(b.target_message_size + 1), workers=a.address)
await wait([x, y])
z = c.submit(add, x, y, workers=b.address)
await wait(z)
assert len(b.incoming_transfer_log) == 2
l1, l2 = b.incoming_transfer_log
assert l1["stop"] < l2["start"]
@gen_cluster(client=True)
async def test_log_exception_on_failed_task(c, s, a, b):
with captured_logger("distributed.worker") as logger:
future = c.submit(div, 1, 0)
await wait(future)
await asyncio.sleep(0.1)
text = logger.getvalue()
assert "ZeroDivisionError" in text
assert "Exception" in text
@gen_cluster(client=True)
async def test_clean_up_dependencies(c, s, a, b):
x = delayed(inc)(1)
y = delayed(inc)(2)
xx = delayed(inc)(x)
yy = delayed(inc)(y)
z = delayed(add)(xx, yy)
zz = c.persist(z)
await wait(zz)
while len(a.data) + len(b.data) > 1:
await asyncio.sleep(0.01)
assert set(a.data) | set(b.data) == {zz.key}
@gen_cluster(client=True)
async def test_hold_onto_dependents(c, s, a, b):
x = c.submit(inc, 1, workers=a.address)
y = c.submit(inc, x, workers=b.address)
await wait(y)
assert x.key in b.data
await c._cancel(y)
while x.key not in b.data:
await asyncio.sleep(0.1)
# Normally takes >2s but it has been observed to take >30s occasionally
@pytest.mark.slow
@gen_test(timeout=120)
async def test_worker_death_timeout():
w = Worker("tcp://127.0.0.1:12345", death_timeout=0.1)
with pytest.raises(TimeoutError) as info:
await w
assert "Worker" in str(info.value)
assert "timed out" in str(info.value) or "failed to start" in str(info.value)
assert w.status == Status.closed
@gen_cluster(client=True)
async def test_stop_doing_unnecessary_work(c, s, a, b):
futures = c.map(slowinc, range(1000), delay=0.01)
await asyncio.sleep(0.1)
del futures
start = time()
while a.executing_count:
await asyncio.sleep(0.01)
assert time() - start < 0.5
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)])
async def test_priorities(c, s, w):
values = []
for i in range(10):
a = delayed(slowinc)(i, dask_key_name="a-%d" % i, delay=0.01)
a1 = delayed(inc)(a, dask_key_name="a1-%d" % i)
a2 = delayed(inc)(a1, dask_key_name="a2-%d" % i)
b1 = delayed(dec)(a, dask_key_name="b1-%d" % i) # <<-- least favored
values.append(a2)
values.append(b1)
futures = c.compute(values)
await wait(futures)
log = [
t[0]
for t in w.log
if t[1] == "executing" and t[2] == "memory" and not t[0].startswith("finalize")
]
assert any(key.startswith("b1") for key in log[: len(log) // 2])
@gen_cluster(client=True)
async def test_heartbeats(c, s, a, b):
x = s.workers[a.address].last_seen
start = time()
await asyncio.sleep(a.periodic_callbacks["heartbeat"].callback_time / 1000 + 0.1)
while s.workers[a.address].last_seen == x:
await asyncio.sleep(0.01)
assert time() < start + 2
assert a.periodic_callbacks["heartbeat"].callback_time < 1000
@pytest.mark.parametrize("worker", [Worker, Nanny])
def test_worker_dir(worker):
with tmpfile() as fn:
@gen_cluster(client=True, worker_kwargs={"local_directory": fn})
async def test_worker_dir(c, s, a, b):
directories = [w.local_directory for w in s.workers.values()]
assert all(d.startswith(fn) for d in directories)
assert len(set(directories)) == 2 # distinct
test_worker_dir()
@gen_cluster(nthreads=[])
async def test_false_worker_dir(s):
async with Worker(s.address, local_directory="") as w:
local_directory = w.local_directory
cwd = os.getcwd()
assert os.path.dirname(local_directory) == os.path.join(cwd, "dask-worker-space")
@gen_cluster(client=True)
async def test_dataframe_attribute_error(c, s, a, b):
class BadSize:
def __init__(self, data):
self.data = data
def __sizeof__(self):
raise TypeError("Hello")
future = c.submit(BadSize, 123)
result = await future
assert result.data == 123
@gen_cluster(client=True)
async def test_fail_write_to_disk(c, s, a, b):
class Bad:
def __getstate__(self):
raise TypeError()
def __sizeof__(self):
return int(100e9)
future = c.submit(Bad)
await wait(future)
assert future.status == "error"
with pytest.raises(TypeError):
await future
futures = c.map(inc, range(10))
results = await c._gather(futures)
assert results == list(map(inc, range(10)))
@pytest.mark.skip(reason="Our logic here is faulty")
@gen_cluster(
nthreads=[("127.0.0.1", 2)], client=True, worker_kwargs={"memory_limit": 10e9}
)
async def test_fail_write_many_to_disk(c, s, a):
a.validate = False
await asyncio.sleep(0.1)
assert a.status == Status.running
class Bad:
def __init__(self, x):
pass
def __getstate__(self):
raise TypeError()
def __sizeof__(self):
return int(2e9)
futures = c.map(Bad, range(11))
future = c.submit(lambda *args: 123, *futures)
await wait(future)
with pytest.raises(Exception) as info:
await future
# workers still operational
result = await c.submit(inc, 1, workers=a.address)
assert result == 2
@gen_cluster()
async def test_pid(s, a, b):
assert s.workers[a.address].pid == os.getpid()
@gen_cluster(client=True)
async def test_get_client(c, s, a, b):
def f(x):
cc = get_client()
future = cc.submit(inc, x)
return future.result()
assert default_client() is c
future = c.submit(f, 10, workers=a.address)
result = await future
assert result == 11
assert a._client
assert not b._client
assert a._client is c
assert default_client() is c
a_client = a._client
for i in range(10):
await wait(c.submit(f, i))
assert a._client is a_client
def test_get_client_sync(client):
def f(x):
cc = get_client()
future = cc.submit(inc, x)
return future.result()
future = client.submit(f, 10)
assert future.result() == 11
@gen_cluster(client=True)
async def test_get_client_coroutine(c, s, a, b):
async def f():
client = await get_client()
future = client.submit(inc, 10)
result = await future
return result
results = await c.run(f)
assert results == {a.address: 11, b.address: 11}
def test_get_client_coroutine_sync(client, s, a, b):
async def f():
client = await get_client()
future = client.submit(inc, 10)
result = await future
return result
results = client.run(f)
assert results == {a["address"]: 11, b["address"]: 11}
@gen_cluster()
async def test_global_workers(s, a, b):
n = len(Worker._instances)
w = first(Worker._instances)
assert w is a or w is b
@pytest.mark.skipif(WINDOWS, reason="num_fds not supported on windows")
@gen_cluster(nthreads=[])
async def test_worker_fds(s):
proc = psutil.Process()
before = psutil.Process().num_fds()
async with Worker(s.address, loop=s.loop):
assert proc.num_fds() > before
while proc.num_fds() > before:
await asyncio.sleep(0.01)
@gen_cluster(nthreads=[])
async def test_service_hosts_match_worker(s):
async with Worker(s.address, host="tcp://0.0.0.0") as w:
sock = first(w.http_server._sockets.values())
assert sock.getsockname()[0] in ("::", "0.0.0.0")
async with Worker(
s.address, host="tcp://127.0.0.1", dashboard_address="0.0.0.0:0"
) as w:
sock = first(w.http_server._sockets.values())
assert sock.getsockname()[0] in ("::", "0.0.0.0")
async with Worker(s.address, host="tcp://127.0.0.1") as w:
sock = first(w.http_server._sockets.values())
assert sock.getsockname()[0] in ("::", "0.0.0.0")
# See what happens with e.g. `dask-worker --listen-address tcp://:8811`
async with Worker(s.address, host="") as w:
sock = first(w.http_server._sockets.values())
assert sock.getsockname()[0] in ("::", "0.0.0.0")
# Address must be a connectable address. 0.0.0.0 is not!
address_all = w.address.rsplit(":", 1)[0]
assert address_all in ("tcp://[::1]", "tcp://127.0.0.1")
# Check various malformed IPv6 addresses
# Since these hostnames get passed to distributed.comm.address_from_user_args,
# bracketing is mandatory for IPv6.
with pytest.raises(ValueError) as exc:
async with Worker(s.address, host="::") as w:
pass
assert "bracketed" in str(exc)
with pytest.raises(ValueError) as exc:
async with Worker(s.address, host="tcp://::1") as w:
pass
assert "bracketed" in str(exc)
@gen_cluster(nthreads=[])
async def test_start_services(s):
async with Worker(s.address, dashboard_address=1234) as w:
assert w.http_server.port == 1234
@gen_test()
async def test_scheduler_file():
with tmpfile() as fn:
async with Scheduler(scheduler_file=fn, dashboard_address=":0") as s:
async with Worker(scheduler_file=fn) as w:
assert set(s.workers) == {w.address}
@gen_cluster(client=True)
async def test_scheduler_delay(c, s, a, b):
old = a.scheduler_delay
assert abs(a.scheduler_delay) < 0.6
assert abs(b.scheduler_delay) < 0.6
await asyncio.sleep(a.periodic_callbacks["heartbeat"].callback_time / 1000 + 0.6)
assert a.scheduler_delay != old
@pytest.mark.flaky(reruns=10, reruns_delay=5)
@gen_cluster(client=True)
async def test_statistical_profiling(c, s, a, b):
futures = c.map(slowinc, range(10), delay=0.1)
await wait(futures)
profile = a.profile_keys["slowinc"]
assert profile["count"]
@pytest.mark.slow
@nodebug
@gen_cluster(
client=True,
timeout=30,
config={
"distributed.worker.profile.interval": "1ms",
"distributed.worker.profile.cycle": "100ms",
},
)
async def test_statistical_profiling_2(c, s, a, b):
da = pytest.importorskip("dask.array")
while True:
x = da.random.random(1000000, chunks=(10000,))
y = (x + x * 2) - x.sum().persist()
await wait(y)
profile = await a.get_profile()
text = str(profile)
if profile["count"] and "sum" in text and "random" in text:
break
@gen_cluster(
nthreads=[("127.0.0.1", 1)],
client=True,
worker_kwargs={"memory_monitor_interval": 10},
)
async def test_robust_to_bad_sizeof_estimates(c, s, a):
np = pytest.importorskip("numpy")
memory = psutil.Process().memory_info().rss
a.memory_limit = memory / 0.7 + 400e6
class BadAccounting:
def __init__(self, data):
self.data = data
def __sizeof__(self):
return 10
def f(n):
x = np.ones(int(n), dtype="u1")
result = BadAccounting(x)
return result
futures = c.map(f, [100e6] * 8, pure=False)
start = time()
while not a.data.disk:
await asyncio.sleep(0.1)
assert time() < start + 5
@pytest.mark.slow
@gen_cluster(
nthreads=[("127.0.0.1", 2)],
client=True,
worker_kwargs={
"memory_monitor_interval": 10,
"memory_spill_fraction": False, # don't spill
"memory_target_fraction": False,
"memory_pause_fraction": 0.5,
},
)
async def test_pause_executor(c, s, a):
memory = psutil.Process().memory_info().rss
a.memory_limit = memory / 0.5 + 200e6
np = pytest.importorskip("numpy")
def f():
x = np.ones(int(400e6), dtype="u1")
sleep(1)
with captured_logger(logging.getLogger("distributed.worker")) as logger:
future = c.submit(f)
futures = c.map(slowinc, range(30), delay=0.1)
while a.status != Status.paused:
await asyncio.sleep(0.01)
out = logger.getvalue()
assert "memory" in out.lower()
assert "pausing" in out.lower()
assert sum(f.status == "finished" for f in futures) < 4
await wait(futures)
@gen_cluster(client=True, worker_kwargs={"profile_cycle_interval": "50 ms"})
async def test_statistical_profiling_cycle(c, s, a, b):
futures = c.map(slowinc, range(20), delay=0.05)
await wait(futures)
await asyncio.sleep(0.01)
end = time()
assert len(a.profile_history) > 3
x = await a.get_profile(start=time() + 10, stop=time() + 20)
assert not x["count"]
x = await a.get_profile(start=0, stop=time() + 10)
recent = a.profile_recent["count"]
actual = sum(p["count"] for _, p in a.profile_history) + a.profile_recent["count"]
x2 = await a.get_profile(start=0, stop=time() + 10)
assert x["count"] <= actual <= x2["count"]
y = await a.get_profile(start=end - 0.300, stop=time())
assert 0 < y["count"] <= x["count"]
@gen_cluster(client=True)
async def test_get_current_task(c, s, a, b):
def some_name():
return get_worker().get_current_task()
result = await c.submit(some_name)
assert result.startswith("some_name")
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 2)
async def test_reschedule(c, s, a, b):
s.extensions["stealing"]._pc.stop()
a_address = a.address
def f(x):
sleep(0.1)
if get_worker().address == a_address:
raise Reschedule()
futures = c.map(f, range(4))
futures2 = c.map(slowinc, range(10), delay=0.1, workers=a.address)
await wait(futures)
assert all(f.key in b.data for f in futures)
@gen_cluster(nthreads=[])
async def test_deque_handler(s):
from distributed.worker import logger
async with Worker(s.address) as w:
deque_handler = w._deque_handler
logger.info("foo456")
assert deque_handler.deque
msg = deque_handler.deque[-1]
assert "distributed.worker" in deque_handler.format(msg)
assert any(msg.msg == "foo456" for msg in deque_handler.deque)
@gen_cluster(nthreads=[], client=True)
async def test_avoid_memory_monitor_if_zero_limit(c, s):
worker = await Worker(
s.address, loop=s.loop, memory_limit=0, memory_monitor_interval=10
)
assert type(worker.data) is dict
assert "memory" not in worker.periodic_callbacks
future = c.submit(inc, 1)
assert (await future) == 2
await asyncio.sleep(worker.memory_monitor_interval / 1000)
await c.submit(inc, 2) # worker doesn't pause
await worker.close()
@gen_cluster(
nthreads=[("127.0.0.1", 1)],
config={
"distributed.worker.memory.spill": False,
"distributed.worker.memory.target": False,
},
)
async def test_dict_data_if_no_spill_to_disk(s, w):
assert type(w.data) is dict
def test_get_worker_name(client):
def f():
get_client().submit(inc, 1).result()
client.run(f)
def func(dask_scheduler):
return list(dask_scheduler.clients)
start = time()
while not any("worker" in n for n in client.run_on_scheduler(func)):
sleep(0.1)
assert time() < start + 10
@gen_cluster(nthreads=[("127.0.0.1", 1)], worker_kwargs={"memory_limit": "2e3 MB"})
async def test_parse_memory_limit(s, w):
assert w.memory_limit == 2e9
@gen_cluster(nthreads=[], client=True)
async def test_scheduler_address_config(c, s):
with dask.config.set({"scheduler-address": s.address}):
worker = await Worker(loop=s.loop)
assert worker.scheduler.address == s.address
await worker.close()
@pytest.mark.xfail(reason="very high flakiness")
@pytest.mark.slow
@gen_cluster(client=True)
async def test_wait_for_outgoing(c, s, a, b):
np = pytest.importorskip("numpy")
x = np.random.random(10000000)
future = await c.scatter(x, workers=a.address)
y = c.submit(inc, future, workers=b.address)
await wait(y)
assert len(b.incoming_transfer_log) == len(a.outgoing_transfer_log) == 1
bb = b.incoming_transfer_log[0]["duration"]
aa = a.outgoing_transfer_log[0]["duration"]
ratio = aa / bb
assert 1 / 3 < ratio < 3
@pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
@gen_cluster(
nthreads=[("127.0.0.1", 1), ("127.0.0.1", 1), ("127.0.0.2", 1)], client=True
)
async def test_prefer_gather_from_local_address(c, s, w1, w2, w3):
x = await c.scatter(123, workers=[w1.address, w3.address], broadcast=True)
y = c.submit(inc, x, workers=[w2.address])
await wait(y)
assert any(d["who"] == w2.address for d in w1.outgoing_transfer_log)
assert not any(d["who"] == w2.address for d in w3.outgoing_transfer_log)
@gen_cluster(
client=True,
nthreads=[("127.0.0.1", 1)] * 20,
timeout=30,
config={"distributed.worker.connections.incoming": 1},
)
async def test_avoid_oversubscription(c, s, *workers):
np = pytest.importorskip("numpy")
x = c.submit(np.random.random, 1000000, workers=[workers[0].address])
await wait(x)
futures = [c.submit(len, x, pure=False, workers=[w.address]) for w in workers[1:]]
await wait(futures)
# Original worker not responsible for all transfers
assert len(workers[0].outgoing_transfer_log) < len(workers) - 2
# Some other workers did some work
assert len([w for w in workers if len(w.outgoing_transfer_log) > 0]) >= 3
@gen_cluster(client=True, worker_kwargs={"metrics": {"my_port": lambda w: w.port}})
async def test_custom_metrics(c, s, a, b):
assert s.workers[a.address].metrics["my_port"] == a.port
assert s.workers[b.address].metrics["my_port"] == b.port
@gen_cluster(client=True)
async def test_register_worker_callbacks(c, s, a, b):
# preload function to run
def mystartup(dask_worker):
dask_worker.init_variable = 1
def mystartup2():
import os
os.environ["MY_ENV_VALUE"] = "WORKER_ENV_VALUE"
return "Env set."
# Check that preload function has been run
def test_import(dask_worker):
return hasattr(dask_worker, "init_variable")
# and dask_worker.init_variable == 1
def test_startup2():
import os
return os.getenv("MY_ENV_VALUE", None) == "WORKER_ENV_VALUE"
# Nothing has been run yet
result = await c.run(test_import)
assert list(result.values()) == [False] * 2
result = await c.run(test_startup2)
assert list(result.values()) == [False] * 2
# Start a worker and check that startup is not run
worker = await Worker(s.address, loop=s.loop)
result = await c.run(test_import, workers=[worker.address])
assert list(result.values()) == [False]
await worker.close()
# Add a preload function
response = await c.register_worker_callbacks(setup=mystartup)
assert len(response) == 2
# Check it has been ran on existing worker
result = await c.run(test_import)
assert list(result.values()) == [True] * 2
# Start a worker and check it is ran on it
worker = await Worker(s.address, loop=s.loop)
result = await c.run(test_import, workers=[worker.address])
assert list(result.values()) == [True]
await worker.close()
# Register another preload function
response = await c.register_worker_callbacks(setup=mystartup2)
assert len(response) == 2
# Check it has been run
result = await c.run(test_startup2)
assert list(result.values()) == [True] * 2
# Start a worker and check it is ran on it
worker = await Worker(s.address, loop=s.loop)
result = await c.run(test_import, workers=[worker.address])
assert list(result.values()) == [True]
result = await c.run(test_startup2, workers=[worker.address])
assert list(result.values()) == [True]
await worker.close()
@gen_cluster(client=True)
async def test_register_worker_callbacks_err(c, s, a, b):
with pytest.raises(ZeroDivisionError):
await c.register_worker_callbacks(setup=lambda: 1 / 0)
@gen_cluster(nthreads=[])
async def test_data_types(s):
w = await Worker(s.address, data=dict)
assert isinstance(w.data, dict)
await w.close()
data = dict()
w = await Worker(s.address, data=data)
assert w.data is data
await w.close()
class Data(dict):
def __init__(self, x, y):
self.x = x
self.y = y
w = await Worker(s.address, data=(Data, {"x": 123, "y": 456}))
assert w.data.x == 123
assert w.data.y == 456
await w.close()
@gen_cluster(nthreads=[])
async def test_local_directory(s):
with tmpfile() as fn:
with dask.config.set(temporary_directory=fn):
w = await Worker(s.address)
assert w.local_directory.startswith(fn)
assert "dask-worker-space" in w.local_directory
@gen_cluster(nthreads=[])
async def test_local_directory_make_new_directory(s):
with tmpfile() as fn:
w = await Worker(s.address, local_directory=os.path.join(fn, "foo", "bar"))
assert w.local_directory.startswith(fn)
assert "foo" in w.local_directory
assert "dask-worker-space" in w.local_directory
@pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
@gen_cluster(nthreads=[], client=True)
async def test_host_address(c, s):
w = await Worker(s.address, host="127.0.0.2")
assert "127.0.0.2" in w.address
await w.close()
n = await Nanny(s.address, host="127.0.0.3")
assert "127.0.0.3" in n.address
assert "127.0.0.3" in n.worker_address
await n.close()
def test_resource_limit(monkeypatch):
assert parse_memory_limit("250MiB", 1, total_cores=1) == 1024 * 1024 * 250
new_limit = 1024 * 1024 * 200
import distributed.worker
monkeypatch.setattr(distributed.system, "MEMORY_LIMIT", new_limit)
assert parse_memory_limit("250MiB", 1, total_cores=1) == new_limit
@pytest.mark.asyncio
@pytest.mark.parametrize("Worker", [Worker, Nanny])
async def test_interface_async(cleanup, loop, Worker):
from distributed.utils import get_ip_interface
psutil = pytest.importorskip("psutil")
if_names = sorted(psutil.net_if_addrs())
for if_name in if_names:
try:
ipv4_addr = get_ip_interface(if_name)
except ValueError:
pass
else:
if ipv4_addr == "127.0.0.1":
break
else:
pytest.skip(
"Could not find loopback interface. "
"Available interfaces are: %s." % (if_names,)
)
async with Scheduler(dashboard_address=":0", interface=if_name) as s:
assert s.address.startswith("tcp://127.0.0.1")
async with Worker(s.address, interface=if_name) as w:
assert w.address.startswith("tcp://127.0.0.1")
assert w.ip == "127.0.0.1"
async with Client(s.address, asynchronous=True) as c:
info = c.scheduler_info()
assert "tcp://127.0.0.1" in info["address"]
assert all("127.0.0.1" == d["host"] for d in info["workers"].values())
@pytest.mark.asyncio
@pytest.mark.parametrize("Worker", [Worker, Nanny])
async def test_protocol_from_scheduler_address(cleanup, Worker):
pytest.importorskip("ucp")
async with Scheduler(protocol="ucx", dashboard_address=":0") as s:
assert s.address.startswith("ucx://")
async with Worker(s.address) as w:
assert w.address.startswith("ucx://")
async with Client(s.address, asynchronous=True) as c:
info = c.scheduler_info()
assert info["address"].startswith("ucx://")
@pytest.mark.asyncio
async def test_host_uses_scheduler_protocol(cleanup, monkeypatch):
# Ensure worker uses scheduler's protocol to determine host address, not the default scheme
# See https://github.com/dask/distributed/pull/4883
class BadBackend(TCPBackend):
def get_address_host(self, loc):
raise ValueError("asdf")
monkeypatch.setitem(backends, "foo", BadBackend())
with dask.config.set({"distributed.comm.default-scheme": "foo"}):
async with Scheduler(protocol="tcp", dashboard_address=":0") as s:
async with Worker(s.address):
# Ensure that worker is able to properly start up
# without BadBackend.get_address_host raising a ValueError
pass
@pytest.mark.asyncio
@pytest.mark.parametrize("Worker", [Worker, Nanny])
async def test_worker_listens_on_same_interface_by_default(cleanup, Worker):
async with Scheduler(host="localhost", dashboard_address=":0") as s:
assert s.ip in {"127.0.0.1", "localhost"}
async with Worker(s.address) as w:
assert s.ip == w.ip
@gen_cluster(client=True)
async def test_close_gracefully(c, s, a, b):
futures = c.map(slowinc, range(200), delay=0.1)
while not b.data:
await asyncio.sleep(0.1)
mem = set(b.data)
proc = [ts for ts in b.tasks.values() if ts.state == "executing"]
await b.close_gracefully()
assert b.status == Status.closed
assert b.address not in s.workers
assert mem.issubset(set(a.data))
for ts in proc:
assert ts.state in ("executing", "memory")
@pytest.mark.slow
@gen_cluster(client=True, nthreads=[])
async def test_lifetime(c, s):
async with Worker(s.address) as a, Worker(s.address, lifetime="1 seconds") as b:
futures = c.map(slowinc, range(200), delay=0.1, worker=[b.address])
await asyncio.sleep(1.5)
assert b.status not in (Status.running, Status.paused)
await b.finished()
assert set(b.data) == set(a.data) # successfully moved data over
@gen_cluster(worker_kwargs={"lifetime": "10s", "lifetime_stagger": "2s"})
async def test_lifetime_stagger(s, a, b):
assert a.lifetime != b.lifetime
assert 8 <= a.lifetime <= 12
assert 8 <= b.lifetime <= 12
@gen_cluster(nthreads=[])
async def test_bad_metrics(s):
def bad_metric(w):
raise Exception("Hello")
async with Worker(s.address, metrics={"bad": bad_metric}) as w:
assert "bad" not in s.workers[w.address].metrics
@gen_cluster(nthreads=[])
async def test_bad_startup(s):
def bad_startup(w):
raise Exception("Hello")
try:
await Worker(s.address, startup_information={"bad": bad_startup})
except Exception:
pytest.fail("Startup exception was raised")
@gen_cluster(client=True)
async def test_pip_install(c, s, a, b):
with mock.patch(
"distributed.diagnostics.plugin.subprocess.Popen.communicate",
return_value=(b"", b""),
) as p1:
with mock.patch(
"distributed.diagnostics.plugin.subprocess.Popen", return_value=p1
) as p2:
p1.communicate.return_value = b"", b""
p1.wait.return_value = 0
await c.register_worker_plugin(
PipInstall(packages=["requests"], pip_options=["--upgrade"])
)
args = p2.call_args[0][0]
assert "python" in args[0]
assert args[1:] == ["-m", "pip", "install", "--upgrade", "requests"]
@gen_cluster(client=True)
async def test_pip_install_fails(c, s, a, b):
with captured_logger(
"distributed.diagnostics.plugin", level=logging.ERROR
) as logger:
with mock.patch(
"distributed.diagnostics.plugin.subprocess.Popen.communicate",
return_value=(b"", b"error"),
) as p1:
with mock.patch(
"distributed.diagnostics.plugin.subprocess.Popen", return_value=p1
) as p2:
p1.communicate.return_value = (
b"",
b"Could not find a version that satisfies the requirement not-a-package",
)
p1.wait.return_value = 1
await c.register_worker_plugin(PipInstall(packages=["not-a-package"]))
assert "not-a-package" in logger.getvalue()
# args = p2.call_args[0][0]
# assert "python" in args[0]
# assert args[1:] == ["-m", "pip", "--upgrade", "install", "requests"]
@gen_cluster(nthreads=[])
async def test_update_latency(s):
async with await Worker(s.address) as w:
original = w.latency
await w.heartbeat()
assert original != w.latency
if w.digests is not None:
assert w.digests["latency"].size() > 0
@pytest.mark.slow
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)])
async def test_workerstate_executing(c, s, a):
ws = s.workers[a.address]
# Initially there are no active tasks
assert not ws.executing
# Submit a task and ensure the WorkerState is updated with the task
# it's executing
f = c.submit(slowinc, 1, delay=3)
while not ws.executing:
assert f.status == "pending"
await asyncio.sleep(0.01)
assert s.tasks[f.key] in ws.executing
await f
@pytest.mark.parametrize("reconnect", [True, False])
@gen_cluster(nthreads=[])
async def test_heartbeat_comm_closed(s, monkeypatch, reconnect):
with captured_logger("distributed.worker", level=logging.WARNING) as logger:
def bad_heartbeat_worker(*args, **kwargs):
raise CommClosedError()
async with await Worker(s.address, reconnect=reconnect) as w:
# Trigger CommClosedError during worker heartbeat
monkeypatch.setattr(w.scheduler, "heartbeat_worker", bad_heartbeat_worker)
await w.heartbeat()
if reconnect:
assert w.status == Status.running
else:
assert w.status == Status.closed
assert "Heartbeat to scheduler failed" in logger.getvalue()
@gen_cluster(nthreads=[])
async def test_bad_local_directory(s):
try:
async with Worker(s.address, local_directory="/not/a/valid-directory"):
pass
except OSError:
# On Linux: [Errno 13] Permission denied: '/not'
# On MacOSX: [Errno 30] Read-only file system: '/not'
pass
else:
assert WINDOWS
assert not any("error" in log for log in s.get_logs())
@gen_cluster(client=True, nthreads=[])
async def test_taskstate_metadata(c, s):
async with await Worker(s.address) as w:
await c.register_worker_plugin(TaskStateMetadataPlugin())
f = c.submit(inc, 1)
await f
ts = w.tasks[f.key]
assert "start_time" in ts.metadata
assert "stop_time" in ts.metadata
assert ts.metadata["stop_time"] > ts.metadata["start_time"]
# Check that Scheduler TaskState.metadata was also updated
assert s.tasks[f.key].metadata == ts.metadata
@gen_cluster(client=True, nthreads=[])
async def test_executor_offload(c, s, monkeypatch):
class SameThreadClass:
def __getstate__(self):
return ()
def __setstate__(self, state):
self._thread_ident = threading.get_ident()
return self
monkeypatch.setattr("distributed.worker.OFFLOAD_THRESHOLD", 1)
async with Worker(s.address, executor="offload") as w:
from distributed.utils import _offload_executor
assert w.executor is _offload_executor
x = SameThreadClass()
def f(x):
return threading.get_ident() == x._thread_ident
assert await c.submit(f, x)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)])
async def test_story(c, s, w):
future = c.submit(inc, 1)
await future
ts = w.tasks[future.key]
assert ts.state in str(w.story(ts))
assert w.story(ts) == w.story(ts.key)
@gen_cluster(client=True)
async def test_story_with_deps(c, s, a, b):
"""
Assert that the structure of the story does not change unintentionally and
expected subfields are actually filled
"""
dep = c.submit(inc, 1, workers=[a.address])
res = c.submit(inc, dep, workers=[b.address])
await res
key = res.key
story = a.story(key)
assert story == []
story = b.story(key)
pruned_story = []
stimulus_ids = set()
# Story now includes randomized stimulus_ids and timestamps.
for msg in story:
assert isinstance(msg, tuple), msg
assert isinstance(msg[-1], float), msg
assert msg[-1] > time() - 60, msg
pruned_msg = list(msg)
stimulus_ids.add(msg[-2])
pruned_story.append(tuple(pruned_msg[:-2]))
assert len(stimulus_ids) == 3, stimulus_ids
stimulus_id = pruned_story[0][-1]
assert isinstance(stimulus_id, str)
assert stimulus_id.startswith("compute-task")
# This is a simple transition log
expected_story = [
(key, "compute-task"),
(key, "released", "waiting", "waiting", {dep.key: "fetch"}),
(key, "waiting", "ready", "ready", {}),
(key, "ready", "executing", "executing", {}),
(key, "put-in-memory"),
(key, "executing", "memory", "memory", {}),
]
assert pruned_story == expected_story
dep_story = dep.key
story = b.story(dep_story)
pruned_story = []
stimulus_ids = set()
for msg in story:
assert isinstance(msg, tuple), msg
assert isinstance(msg[-1], float), msg
assert msg[-1] > time() - 60, msg
pruned_msg = list(msg)
stimulus_ids.add(msg[-2])
pruned_story.append(tuple(pruned_msg[:-2]))
assert len(stimulus_ids) == 2, stimulus_ids
stimulus_id = pruned_story[0][-1]
assert isinstance(stimulus_id, str)
expected_story = [
(dep_story, "ensure-task-exists", "released"),
(dep_story, "released", "fetch", "fetch", {}),
(
"gather-dependencies",
a.address,
{dep.key},
),
(dep_story, "fetch", "flight", "flight", {}),
(
"request-dep",
a.address,
{dep.key},
),
(
"receive-dep",
a.address,
{dep.key},
),
(dep_story, "put-in-memory"),
(dep_story, "flight", "memory", "memory", {res.key: "ready"}),
]
assert pruned_story == expected_story
@gen_cluster(client=True)
async def test_gather_dep_one_worker_always_busy(c, s, a, b):
# Ensure that both dependencies for H are on another worker than H itself.
# The worker where the dependencies are on is then later blocked such that
# the data cannot be fetched
# In the past it was important that there is more than one key on the
# worker. This should be kept to avoid any edge case specific to one
f = c.submit(inc, 1, workers=[a.address])
g = c.submit(
inc,
2,
workers=[a.address],
)
await f
await g
# We will block A for any outgoing communication. This simulates an
# overloaded worker which will always return "busy" for get_data requests,
# effectively blocking H indefinitely
a.outgoing_current_count = 10000000
assert f.key in a.tasks
assert g.key in a.tasks
# Ensure there are actually two distinct tasks and not some pure=True
# caching
assert f.key != g.key
h = c.submit(add, f, g, workers=[b.address])
fut = asyncio.wait_for(h, 0.1)
while h.key not in b.tasks:
await asyncio.sleep(0.01)
ts_h = b.tasks[h.key]
ts_f = b.tasks[f.key]
ts_g = b.tasks[g.key]
with pytest.raises(asyncio.TimeoutError):
assert ts_h.state == "waiting"
assert ts_f.state in ["flight", "fetch"]
assert ts_g.state in ["flight", "fetch"]
await fut
# Ensure B wasn't lazy but tried at least once
assert b.repetitively_busy
x = await Worker(s.address, name="x")
# We "scatter" the data to another worker which is able to serve this data.
# In reality this could be another worker which fetched this dependency and
# got through to A or another worker executed the task using work stealing
# or any other. To avoid cross effects, we'll just put the data onto the
# worker ourselves
x.update_data(data={key: a.data[key] for key in [f.key, g.key]})
assert await h == 5
# Since we put the data onto the worker ourselves, the gather_dep might
# still be mid execution and we'll get a dangling task. Let it finish
# naturally
while any(["Worker.gather_dep" in str(t) for t in asyncio.all_tasks()]):
await asyncio.sleep(0.05)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 0)])
async def test_worker_client_uses_default_no_close(c, s, a):
"""
If a default client is available in the process, the worker will pick this
one and will not close it if it is closed
"""
assert not Worker._initialized_clients
assert default_client() is c
existing_client = c.id
def get_worker_client_id():
def_client = get_client()
return def_client.id
worker_client = await c.submit(get_worker_client_id)
assert worker_client == existing_client
assert not Worker._initialized_clients
await a.close()
assert len(Client._instances) == 1
assert c.status == "running"
c_def = default_client()
assert c is c_def
@gen_cluster(nthreads=[("127.0.0.1", 0)])
async def test_worker_client_closes_if_created_on_worker_one_worker(s, a):
async with Client(s.address, set_as_default=False, asynchronous=True) as c:
with pytest.raises(ValueError):
default_client()
def get_worker_client_id():
def_client = get_client()
return def_client.id
new_client_id = await c.submit(get_worker_client_id)
default_client_id = await c.submit(get_worker_client_id)
assert new_client_id != c.id
assert new_client_id == default_client_id
new_client = default_client()
assert new_client_id == new_client.id
assert new_client.status == "running"
# If a worker closes, all clients created on it should close as well
await a.close()
assert new_client.status == "closed"
assert len(Client._instances) == 2
assert c.status == "running"
with pytest.raises(ValueError):
default_client()
@gen_cluster()
async def test_worker_client_closes_if_created_on_worker_last_worker_alive(s, a, b):
async with Client(s.address, set_as_default=False, asynchronous=True) as c:
with pytest.raises(ValueError):
default_client()
def get_worker_client_id():
def_client = get_client()
return def_client.id
new_client_id = await c.submit(get_worker_client_id, workers=[a.address])
default_client_id = await c.submit(get_worker_client_id, workers=[a.address])
default_client_id_b = await c.submit(get_worker_client_id, workers=[b.address])
assert not b._comms
assert new_client_id != c.id
assert new_client_id == default_client_id
assert new_client_id == default_client_id_b
new_client = default_client()
assert new_client_id == new_client.id
assert new_client.status == "running"
# We'll close A. This should *not* close the client since the client is also used by B
await a.close()
assert new_client.status == "running"
client_id_b_after = await c.submit(get_worker_client_id, workers=[b.address])
assert client_id_b_after == default_client_id_b
assert len(Client._instances) == 2
await b.close()
assert new_client.status == "closed"
assert c.status == "running"
with pytest.raises(ValueError):
default_client()
@gen_cluster(client=True, nthreads=[])
async def test_multiple_executors(c, s):
def get_thread_name():
return threading.current_thread().name
async with Worker(
s.address,
nthreads=2,
executor={"foo": ThreadPoolExecutor(1, thread_name_prefix="Dask-Foo-Threads")},
):
futures = []
with dask.annotate(executor="default"):
futures.append(c.submit(get_thread_name, pure=False))
with dask.annotate(executor="foo"):
futures.append(c.submit(get_thread_name, pure=False))
default_result, gpu_result = await c.gather(futures)
assert "Dask-Default-Threads" in default_result
assert "Dask-Foo-Threads" in gpu_result
@gen_cluster(client=True)
async def test_process_executor(c, s, a, b):
with ProcessPoolExecutor() as e:
a.executors["processes"] = e
b.executors["processes"] = e
future = c.submit(os.getpid, pure=False)
assert (await future) == os.getpid()
with dask.annotate(executor="processes"):
future = c.submit(os.getpid, pure=False)
assert (await future) != os.getpid()
def kill_process():
import os
import signal
if WINDOWS:
# There's no SIGKILL on Windows
sig = signal.SIGTERM
else:
# With SIGTERM there may be several seconds worth of delay before the worker
# actually shuts down - particularly on slow CI. Use SIGKILL for instant
# termination.
sig = signal.SIGKILL
os.kill(os.getpid(), sig)
sleep(60) # Cope with non-instantaneous termination
@gen_cluster(nthreads=[("127.0.0.1", 1)], client=True)
async def test_process_executor_kills_process(c, s, a):
with ProcessPoolExecutor() as e:
a.executors["processes"] = e
with dask.annotate(executor="processes", retries=1):
future = c.submit(kill_process)
msg = "A child process terminated abruptly, the process pool is not usable anymore"
with pytest.raises(BrokenProcessPool, match=msg):
await future
with dask.annotate(executor="processes", retries=1):
future = c.submit(inc, 1)
# The process pool is now unusable and the worker is effectively dead
with pytest.raises(BrokenProcessPool, match=msg):
await future
def raise_exc():
raise RuntimeError("foo")
@gen_cluster(client=True)
async def test_process_executor_raise_exception(c, s, a, b):
with ProcessPoolExecutor() as e:
a.executors["processes"] = e
b.executors["processes"] = e
with dask.annotate(executor="processes", retries=1):
future = c.submit(raise_exc)
with pytest.raises(RuntimeError, match="foo"):
await future
@pytest.mark.gpu
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)])
async def test_gpu_executor(c, s, w):
if nvml.device_get_count() > 0:
e = w.executors["gpu"]
assert isinstance(e, distributed.threadpoolexecutor.ThreadPoolExecutor)
assert e._max_workers == 1
else:
assert "gpu" not in w.executors
def assert_task_states_on_worker(expected, worker):
for dep_key, expected_state in expected.items():
assert dep_key in worker.tasks, (worker.name, dep_key, worker.tasks)
dep_ts = worker.tasks[dep_key]
assert dep_ts.state == expected_state, (worker.name, dep_ts, expected_state)
assert set(expected) == set(worker.tasks)
@gen_cluster(client=True)
async def test_worker_state_error_release_error_last(c, s, a, b):
"""
Create a chain of tasks and err one of them. Then release tasks in a certain
order and ensure the tasks are released and/or kept in memory as appropriate
F -- RES (error)
/
/
G
Free error last
"""
def raise_exc(*args):
raise RuntimeError()
f = c.submit(inc, 1, workers=[a.address], key="f")
g = c.submit(inc, 1, workers=[b.address], key="g")
res = c.submit(raise_exc, f, g, workers=[a.address])
with pytest.raises(RuntimeError):
await res
# Nothing bad happened on B, therefore B should hold on to G
assert len(b.tasks) == 1
assert g.key in b.tasks
# A raised the exception therefore we should hold on to the erroneous task
assert res.key in a.tasks
ts = a.tasks[res.key]
assert ts.state == "error"
expected_states = {
# A was instructed to compute this result and we're still holding a ref via `f`
f.key: "memory",
# This was fetched from another worker. While we hold a ref via `g`, the
# scheduler only instructed to compute this on B
g.key: "memory",
res.key: "error",
}
assert_task_states_on_worker(expected_states, a)
# Expected states after we release references to the futures
f.release()
g.release()
# We no longer hold any refs to f or g and B didn't have any erros. It
# releases everything as expected
while b.tasks:
await asyncio.sleep(0.01)
expected_states = {
f.key: "released",
g.key: "released",
res.key: "error",
}
assert_task_states_on_worker(expected_states, a)
res.release()
# We no longer hold any refs. Cluster should reset completely
# This is not happening
for server in [s, a, b]:
while server.tasks:
await asyncio.sleep(0.01)
@gen_cluster(client=True)
async def test_worker_state_error_release_error_first(c, s, a, b):
"""
Create a chain of tasks and err one of them. Then release tasks in a certain
order and ensure the tasks are released and/or kept in memory as appropriate
F -- RES (error)
/
/
G
Free error first
"""
def raise_exc(*args):
raise RuntimeError()
f = c.submit(inc, 1, workers=[a.address], key="f")
g = c.submit(inc, 1, workers=[b.address], key="g")
res = c.submit(raise_exc, f, g, workers=[a.address])
with pytest.raises(RuntimeError):
await res
# Nothing bad happened on B, therefore B should hold on to G
assert len(b.tasks) == 1
assert g.key in b.tasks
# A raised the exception therefore we should hold on to the erroneous task
assert res.key in a.tasks
ts = a.tasks[res.key]
assert ts.state == "error"
expected_states = {
# A was instructed to compute this result and we're still holding a ref
# via `f`
f.key: "memory",
# This was fetched from another worker. While we hold a ref via `g`, the
# scheduler only instructed to compute this on B
g.key: "memory",
res.key: "error",
}
assert_task_states_on_worker(expected_states, a)
# Expected states after we release references to the futures
res.release()
# We no longer hold any refs to f or g and B didn't have any erros. It
# releases everything as expected
while res.key in a.tasks:
await asyncio.sleep(0.01)
expected_states = {
f.key: "memory",
g.key: "memory",
}
assert_task_states_on_worker(expected_states, a)
f.release()
g.release()
for server in [s, a, b]:
while server.tasks:
await asyncio.sleep(0.01)
@gen_cluster(client=True)
async def test_worker_state_error_release_error_int(c, s, a, b):
"""
Create a chain of tasks and err one of them. Then release tasks in a certain
order and ensure the tasks are released and/or kept in memory as appropriate
F -- RES (error)
/
/
G
Free one successful task, then error, then last task
"""
def raise_exc(*args):
raise RuntimeError()
f = c.submit(inc, 1, workers=[a.address], key="f")
g = c.submit(inc, 1, workers=[b.address], key="g")
res = c.submit(raise_exc, f, g, workers=[a.address])
with pytest.raises(RuntimeError):
await res
# Nothing bad happened on B, therefore B should hold on to G
assert len(b.tasks) == 1
assert g.key in b.tasks
# A raised the exception therefore we should hold on to the erroneous task
assert res.key in a.tasks
ts = a.tasks[res.key]
assert ts.state == "error"
expected_states = {
# A was instructed to compute this result and we're still holding a ref via `f`
f.key: "memory",
# This was fetched from another worker. While we hold a ref via `g`, the
# scheduler only instructed to compute this on B
g.key: "memory",
res.key: "error",
}
assert_task_states_on_worker(expected_states, a)
# Expected states after we release references to the futures
f.release()
res.release()
# We no longer hold any refs to f or g and B didn't have any erros. It
# releases everything as expected
while len(a.tasks) > 1:
await asyncio.sleep(0.01)
expected_states = {
g.key: "memory",
}
assert_task_states_on_worker(expected_states, a)
assert_task_states_on_worker(expected_states, b)
g.release()
# We no longer hold any refs. Cluster should reset completely
for server in [s, a, b]:
while server.tasks:
await asyncio.sleep(0.01)
@gen_cluster(client=True)
async def test_worker_state_error_long_chain(c, s, a, b):
def raise_exc(*args):
raise RuntimeError()
# f (A) --------> res (B)
# /
# g (B) -> h (A)
f = c.submit(inc, 1, workers=[a.address], key="f", allow_other_workers=False)
g = c.submit(inc, 1, workers=[b.address], key="g", allow_other_workers=False)
h = c.submit(inc, g, workers=[a.address], key="h", allow_other_workers=False)
res = c.submit(
raise_exc, f, h, workers=[b.address], allow_other_workers=False, key="res"
)
with pytest.raises(RuntimeError):
await res
expected_states_A = {
f.key: "memory",
g.key: "memory",
h.key: "memory",
}
await asyncio.sleep(0.05)
assert_task_states_on_worker(expected_states_A, a)
expected_states_B = {
f.key: "memory",
g.key: "memory",
h.key: "memory",
res.key: "error",
}
await asyncio.sleep(0.05)
assert_task_states_on_worker(expected_states_B, b)
f.release()
expected_states_A = {
g.key: "memory",
h.key: "memory",
}
await asyncio.sleep(0.05)
assert_task_states_on_worker(expected_states_A, a)
expected_states_B = {
f.key: "released",
g.key: "memory",
h.key: "memory",
res.key: "error",
}
await asyncio.sleep(0.05)
assert_task_states_on_worker(expected_states_B, b)
g.release()
expected_states_A = {
g.key: "released",
h.key: "memory",
}
await asyncio.sleep(0.05)
assert_task_states_on_worker(expected_states_A, a)
# B must not forget a task since all have a still valid dependent
expected_states_B = {
f.key: "released",
h.key: "memory",
res.key: "error",
}
assert_task_states_on_worker(expected_states_B, b)
h.release()
await asyncio.sleep(0.05)
expected_states_A = {}
assert_task_states_on_worker(expected_states_A, a)
expected_states_B = {
f.key: "released",
h.key: "released",
res.key: "error",
}
assert_task_states_on_worker(expected_states_B, b)
res.release()
# We no longer hold any refs. Cluster should reset completely
for server in [s, a, b]:
while server.tasks:
await asyncio.sleep(0.01)
@gen_cluster(client=True, nthreads=[("127.0.0.1", x) for x in range(4)])
async def test_hold_on_to_replicas(c, s, *workers):
f1 = c.submit(inc, 1, workers=[workers[0].address], key="f1")
f2 = c.submit(inc, 2, workers=[workers[1].address], key="f2")
sum_1 = c.submit(
slowsum, [f1, f2], delay=0.1, workers=[workers[2].address], key="sum"
)
sum_2 = c.submit(
slowsum, [f1, sum_1], delay=0.2, workers=[workers[3].address], key="sum_2"
)
f1.release()
f2.release()
while sum_2.key not in workers[3].tasks:
await asyncio.sleep(0.01)
while not workers[3].tasks[sum_2.key].state == "memory":
assert len(s.tasks[f1.key].who_has) >= 2
assert s.tasks[f2.key].state == "released"
await asyncio.sleep(0.01)
while len(workers[2].data) > 1:
await asyncio.sleep(0.01)
@gen_cluster(client=True)
async def test_worker_reconnects_mid_compute(c, s, a, b):
"""Ensure that, if a worker disconnects while computing a result, the scheduler will
still accept the result.
There is also an edge case tested which ensures that the reconnect is
successful if a task is currently executing; see
https://github.com/dask/distributed/issues/5078
See also distributed.tests.test_scheduler.py::test_gather_allow_worker_reconnect
"""
with captured_logger("distributed.scheduler") as s_logs:
# Let's put one task in memory to ensure the reconnect has tasks in
# different states
f1 = c.submit(inc, 1, workers=[a.address], allow_other_workers=True)
await f1
a_address = a.address
a.periodic_callbacks["heartbeat"].stop()
await a.heartbeat()
a.heartbeat_active = True
from distributed import Lock
def fast_on_a(lock):
w = get_worker()
import time
if w.address != a_address:
lock.acquire()
else:
time.sleep(1)
lock = Lock()
# We want to be sure that A is the only one computing this result
async with lock:
f2 = c.submit(
fast_on_a, lock, workers=[a.address], allow_other_workers=True
)
while f2.key not in a.tasks:
await asyncio.sleep(0.01)
await s.stream_comms[a.address].close()
assert len(s.workers) == 1
a.heartbeat_active = False
await a.heartbeat()
assert len(s.workers) == 2
# Since B is locked, this is ensured to originate from A
await f2
assert "Unexpected worker completed task" in s_logs.getvalue()
# Ensure that all in-memory tasks on A have been restored on the
# scheduler after reconnect
for ts in a.tasks.values():
if ts.state == "memory":
assert a.address in {ws.address for ws in s.tasks[ts.key].who_has}
# Ensure that all keys have been properly registered and will also be
# cleaned up nicely.
del f1, f2
while any(w.tasks for w in [a, b]):
await asyncio.sleep(0.001)
@gen_cluster(client=True)
async def test_worker_reconnects_mid_compute_multiple_states_on_scheduler(c, s, a, b):
"""
Ensure that a reconnecting worker does not break the scheduler regardless of
what state the keys of the worker are in when it connects back
See also test_worker_reconnects_mid_compute which uses a smaller chain of
tasks and does not release f1 in between
"""
with captured_logger("distributed.scheduler") as s_logs:
# Let's put one task in memory to ensure the reconnect has tasks in
# different states
f1 = c.submit(inc, 1, workers=[a.address], allow_other_workers=True)
f2 = c.submit(inc, f1, workers=[a.address], allow_other_workers=True)
a_address = a.address
a.periodic_callbacks["heartbeat"].stop()
await a.heartbeat()
a.heartbeat_active = True
from distributed import Lock
def fast_on_a(lock):
w = get_worker()
import time
if w.address != a_address:
lock.acquire()
else:
time.sleep(1)
lock = Lock()
# We want to be sure that A is the only one computing this result
async with lock:
f3 = c.submit(
fast_on_a, lock, workers=[a.address], allow_other_workers=True
)
while f3.key not in a.tasks:
await asyncio.sleep(0.01)
await s.stream_comms[a.address].close()
f1.release()
assert len(s.workers) == 1
while s.tasks[f1.key].state != "released":
await asyncio.sleep(0)
a.heartbeat_active = False
await a.heartbeat()
assert len(s.workers) == 2
# Since B is locked, this is ensured to originate from A
await f3
assert "Unexpected worker completed task" in s_logs.getvalue()
# Ensure that all in-memory tasks on A have been restored on the
# scheduler after reconnect
for ts in a.tasks.values():
if ts.state == "memory":
assert a.address in {ws.address for ws in s.tasks[ts.key].who_has}
del f1, f2, f3
while any(w.tasks for w in [a, b]):
await asyncio.sleep(0.001)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)])
async def test_forget_dependents_after_release(c, s, a):
fut = c.submit(inc, 1, key="f-1")
fut2 = c.submit(inc, fut, key="f-2")
await asyncio.wait([fut, fut2])
assert fut.key in a.tasks
assert fut2.key in a.tasks
assert fut2.key in {d.key for d in a.tasks[fut.key].dependents}
fut2.release()
while fut2.key in a.tasks:
await asyncio.sleep(0.001)
assert fut2.key not in {d.key for d in a.tasks[fut.key].dependents}
@gen_cluster(client=True)
async def test_steal_during_task_deserialization(c, s, a, b, monkeypatch):
stealing_ext = s.extensions["stealing"]
stealing_ext._pc.stop()
from distributed.utils import ThreadPoolExecutor
class CountingThreadPool(ThreadPoolExecutor):
counter = 0
def submit(self, *args, **kwargs):
CountingThreadPool.counter += 1
return super().submit(*args, **kwargs)
# Ensure we're always offloading
monkeypatch.setattr("distributed.worker.OFFLOAD_THRESHOLD", 1)
threadpool = CountingThreadPool(
max_workers=1, thread_name_prefix="Counting-Offload-Threadpool"
)
try:
monkeypatch.setattr("distributed.utils._offload_executor", threadpool)
class SlowDeserializeCallable:
def __init__(self, delay=0.1):
self.delay = delay
def __getstate__(self):
return self.delay
def __setstate__(self, state):
delay = state
import time
time.sleep(delay)
return SlowDeserializeCallable(delay)
def __call__(self, *args, **kwargs):
return 41
slow_deserialized_func = SlowDeserializeCallable()
fut = c.submit(
slow_deserialized_func, 1, workers=[a.address], allow_other_workers=True
)
while CountingThreadPool.counter == 0:
await asyncio.sleep(0)
ts = s.tasks[fut.key]
a.handle_steal_request(fut.key, stimulus_id="test")
stealing_ext.scheduler.send_task_to_worker(b.address, ts)
fut2 = c.submit(inc, fut, workers=[a.address])
fut3 = c.submit(inc, fut2, workers=[a.address])
assert await fut2 == 42
await fut3
finally:
threadpool.shutdown()
@gen_cluster(client=True)
async def test_gather_dep_exception_one_task(c, s, a, b):
"""Ensure an exception in a single task does not tear down an entire batch of gather_dep
See also https://github.com/dask/distributed/issues/5152
See also test_gather_dep_exception_one_task_2
"""
fut = c.submit(inc, 1, workers=[a.address], key="f1")
fut2 = c.submit(inc, 2, workers=[a.address], key="f2")
fut3 = c.submit(inc, 3, workers=[a.address], key="f3")
import asyncio
event = asyncio.Event()
write_queue = asyncio.Queue()
event.clear()
b.rpc = _LockedCommPool(b.rpc, write_event=event, write_queue=write_queue)
b.rpc.remove(a.address)
def sink(a, b, *args):
return a + b
res1 = c.submit(sink, fut, fut2, fut3, workers=[b.address])
res2 = c.submit(sink, fut, fut2, workers=[b.address])
# Wait until we're sure the worker is attempting to fetch the data
while True:
peer_addr, msg = await write_queue.get()
if peer_addr == a.address and msg["op"] == "get_data":
break
# Provoke an "impossible transision exception"
# By choosing a state which doesn't exist we're not running into validation
# errors and the state machine should raise if we want to transition from
# fetch to memory
b.validate = False
b.tasks[fut3.key].state = "fetch"
event.set()
assert await res1 == 5
assert await res2 == 5
del res1, res2, fut, fut2
fut3.release()
while a.tasks and b.tasks:
await asyncio.sleep(0.1)
@gen_cluster(client=True)
async def test_gather_dep_exception_one_task_2(c, s, a, b):
"""Ensure an exception in a single task does not tear down an entire batch of gather_dep
The below triggers an fetch->memory transition
See also https://github.com/dask/distributed/issues/5152
See also test_gather_dep_exception_one_task
"""
# This test does not trigger the condition reliably but is a very easy case
# which should function correctly regardles
fut1 = c.submit(inc, 1, workers=[a.address], key="f1")
fut2 = c.submit(inc, fut1, workers=[b.address], key="f2")
while fut1.key not in b.tasks or b.tasks[fut1.key].state == "flight":
await asyncio.sleep(0)
s.handle_missing_data(key="f1", errant_worker=a.address)
await fut2
def _acquire_replicas(scheduler, worker, *futures):
keys = [f.key for f in futures]
scheduler.stream_comms[worker.address].send(
{
"op": "acquire-replicas",
"keys": keys,
"stimulus_id": f"acquire-replicas-{time()}",
"priorities": {key: scheduler.tasks[key].priority for key in keys},
"who_has": {
key: {w.address for w in scheduler.tasks[key].who_has} for key in keys
},
},
)
def _remove_replicas(scheduler, worker, *futures):
keys = [f.key for f in futures]
ws = scheduler.workers[worker.address]
for k in keys:
ts = scheduler.tasks[k]
if ws in ts.who_has:
scheduler.remove_replica(ts, ws)
scheduler.stream_comms[ws.address].send(
{
"op": "remove-replicas",
"keys": keys,
"stimulus_id": f"remove-replicas-{time()}",
}
)
@gen_cluster(client=True)
async def test_acquire_replicas(c, s, a, b):
fut = c.submit(inc, 1, workers=[a.address])
await fut
_acquire_replicas(s, b, fut)
while len(s.who_has[fut.key]) != 2:
await asyncio.sleep(0.005)
for w in (a, b):
assert w.data[fut.key] == 2
assert w.tasks[fut.key].state == "memory"
fut.release()
while b.tasks or a.tasks:
await asyncio.sleep(0.005)
@gen_cluster(client=True)
async def test_acquire_replicas_same_channel(c, s, a, b):
fut = c.submit(inc, 1, workers=[a.address], key="f-replica")
futB = c.submit(inc, 2, workers=[a.address], key="f-B")
futC = c.submit(inc, futB, workers=[b.address], key="f-C")
await fut
_acquire_replicas(s, b, fut)
await futC
while fut.key not in b.tasks or not b.tasks[fut.key].state == "memory":
await asyncio.sleep(0.005)
while len(s.who_has[fut.key]) != 2:
await asyncio.sleep(0.005)
# Ensure that both the replica and an ordinary dependency pass through the
# same communication channel
for f in [fut, futB]:
assert any("request-dep" in msg for msg in b.story(f.key))
assert any("gather-dependencies" in msg for msg in b.story(f.key))
assert any(f.key in msg["keys"] for msg in b.incoming_transfer_log)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 3)
async def test_acquire_replicas_many(c, s, *workers):
futs = c.map(inc, range(10), workers=[workers[0].address])
res = c.submit(sum, futs, workers=[workers[1].address])
final = c.submit(slowinc, res, delay=0.5, workers=[workers[1].address])
await wait(futs)
_acquire_replicas(s, workers[2], *futs)
# Worker 2 should normally not even be involved if there was no replication
while not all(
f.key in workers[2].tasks and workers[2].tasks[f.key].state == "memory"
for f in futs
):
await asyncio.sleep(0.01)
assert all(ts.state == "memory" for ts in workers[2].tasks.values())
assert await final == sum(map(inc, range(10))) + 1
# All workers have a replica
assert all(len(s.tasks[f.key].who_has) == 3 for f in futs)
del futs, res, final
while any(w.tasks for w in workers):
await asyncio.sleep(0.001)
@gen_cluster(client=True)
async def test_remove_replica_simple(c, s, a, b):
futs = c.map(inc, range(10), workers=[a.address])
await wait(futs)
_acquire_replicas(s, b, *futs)
while not all(len(s.tasks[f.key].who_has) == 2 for f in futs):
await asyncio.sleep(0.01)
_remove_replicas(s, b, *futs)
assert all(len(s.tasks[f.key].who_has) == 1 for f in futs)
while b.tasks:
await asyncio.sleep(0.01)
# Ensure there is no delayed reply to re-register the key
await asyncio.sleep(0.01)
assert all(s.tasks[f.key].who_has == {s.workers[a.address]} for f in futs)
@gen_cluster(
client=True,
config={"distributed.comm.recent-messages-log-length": 1_000},
)
async def test_remove_replica_while_computing(c, s, *workers):
futs = c.map(inc, range(10), workers=[workers[0].address])
# All interesting things will happen on that worker
w = workers[1]
intermediate = c.map(slowinc, futs, delay=0.05, workers=[w.address])
def reduce(*args, **kwargs):
import time
time.sleep(0.5)
return
final = c.submit(reduce, intermediate, workers=[w.address], key="final")
while not any(f.key in w.tasks for f in intermediate):
await asyncio.sleep(0.001)
# The scheduler removes keys from who_has/has_what immediately
# Make sure the worker responds to the rejection and the scheduler corrects
# the state
ws = s.workers[w.address]
while not any(s.tasks[fut.key] in ws.has_what for fut in futs):
await asyncio.sleep(0.001)
_remove_replicas(s, w, *futs)
# Scheduler removed keys immediately...
assert not any(s.tasks[fut.key] in ws.has_what for fut in futs)
# ... but the state is properly restored
while not any(s.tasks[fut.key] in ws.has_what for fut in futs):
await asyncio.sleep(0.01)
# The worker should reject all of these since they are required
while not all(fut.done() for fut in intermediate):
_remove_replicas(s, w, *futs)
await asyncio.sleep(0.01)
await wait(intermediate)
# If a request is rejected, the worker responds with an add-keys message to
# reenlist the key in the schedulers state system to avoid race conditions,
# see also https://github.com/dask/distributed/issues/5265
rejections = set()
for msg in w.log:
if msg[0] == "remove-replica-rejected":
rejections.update(msg[1])
for rejected_key in rejections:
def answer_sent(key):
for batch in w.batched_stream.recent_message_log:
for msg in batch:
if "op" in msg and msg["op"] == "add-keys" and key in msg["keys"]:
return True
return False
assert answer_sent(rejected_key)
# Since intermediate is done, futs replicas may be removed.
# They might be already gone due to the above remove replica calls
_remove_replicas(s, w, *futs)
while any(w.tasks[f.key].state != "released" for f in futs if f.key in w.tasks):
await asyncio.sleep(0.001)
# The scheduler actually gets notified about the removed replica
while not all(len(s.tasks[f.key].who_has) == 1 for f in futs):
await asyncio.sleep(0.001)
await final
del final, intermediate, futs
while any(w.tasks for w in workers):
await asyncio.sleep(0.001)
@gen_cluster(client=True, nthreads=[("", 1)] * 3)
async def test_who_has_consistent_remove_replica(c, s, *workers):
a = workers[0]
other_workers = {w for w in workers if w != a}
f1 = c.submit(inc, 1, key="f1", workers=[w.address for w in other_workers])
await wait(f1)
for w in other_workers:
_acquire_replicas(s, w, f1)
while not len(s.tasks[f1.key].who_has) == len(other_workers):
await asyncio.sleep(0)
f2 = c.submit(inc, f1, workers=[a.address])
# Wait just until the moment the worker received the task and scheduled the
# task to be fetched, then remove the replica from the worker this one is
# trying to get the data from. Ensure this is handled gracefully and no
# suspicious counters are raised since this is expected behaviour when
# removing replicas
while f1.key not in a.tasks or a.tasks[f1.key].state != "flight":
await asyncio.sleep(0)
coming_from = None
for w in other_workers:
coming_from = w
if w.address == a.tasks[f1.key].coming_from:
break
coming_from.handle_remove_replicas([f1.key], "test")
await f2
assert (f1.key, "missing-dep") in a.story(f1.key)
assert a.tasks[f1.key].suspicious_count == 0
assert s.tasks[f1.key].suspicious == 0
@gen_cluster(client=True)
async def test_missing_released_zombie_tasks(c, s, a, b):
"""
Ensure that no fetch/flight tasks are left in the task dict of a
worker after everything was released
"""
a.total_in_connections = 0
f1 = c.submit(inc, 1, key="f1", workers=[a.address])
f2 = c.submit(inc, f1, key="f2", workers=[b.address])
key = f1.key
while key not in b.tasks or b.tasks[key].state != "fetch":
await asyncio.sleep(0.01)
await a.close(report=False)
del f1, f2
while b.tasks:
await asyncio.sleep(0.01)
@gen_cluster(client=True)
async def test_missing_released_zombie_tasks_2(c, s, a, b):
a.total_in_connections = 0
f1 = c.submit(inc, 1, key="f1", workers=[a.address])
f2 = c.submit(inc, f1, key="f2", workers=[b.address])
while f1.key not in b.tasks:
await asyncio.sleep(0)
ts = b.tasks[f1.key]
assert ts.state == "fetch"
# A few things can happen to clear who_has. The dominant process is upon
# connection failure to a worker. Regardless of how the set was cleared, the
# task will be transitioned to missing where the worker is trying to
# reaquire this information from the scheduler. While this is happening on
# worker side, the tasks are released and we want to ensure that no dangling
# zombie tasks are left on the worker
ts.who_has.clear()
del f1, f2
while b.tasks:
await asyncio.sleep(0.01)
story = b.story(ts)
assert any("missing" in msg for msg in story)
@pytest.mark.slow
@gen_cluster(
client=True,
Worker=Nanny,
nthreads=[("", 1)],
config={"distributed.worker.memory.pause": 0.5},
worker_kwargs={"memory_limit": 2 ** 29}, # 500 MiB
)
async def test_worker_status_sync(c, s, a):
(ws,) = s.workers.values()
while ws.status != Status.running:
await asyncio.sleep(0.01)
def leak():
distributed._test_leak = "x" * 2 ** 28 # 250 MiB
def clear_leak():
del distributed._test_leak
await c.run(leak)
while ws.status != Status.paused:
await asyncio.sleep(0.01)
await c.run(clear_leak)
while ws.status != Status.running:
await asyncio.sleep(0.01)
await s.retire_workers()
while ws.status != Status.closed:
await asyncio.sleep(0.01)
events = [ev for _, ev in s.events[ws.address] if ev["action"] != "heartbeat"]
assert events == [
{"action": "add-worker"},
{
"action": "worker-status-change",
"prev-status": "undefined",
"status": "running",
},
{
"action": "worker-status-change",
"prev-status": "running",
"status": "paused",
},
{
"action": "worker-status-change",
"prev-status": "paused",
"status": "running",
},
{"action": "remove-worker", "processing-tasks": {}},
{"action": "retired"},
]
async def _wait_for_state(key: str, worker: Worker, state: str):
# Keep the sleep interval at 0 since the tests using this are very sensitive
# about timing. they intend to capture loop cycles after this specific
# condition was set
while key not in worker.tasks or worker.tasks[key].state != state:
await asyncio.sleep(0)
@gen_cluster(client=True)
async def test_gather_dep_cancelled_rescheduled(c, s, a, b):
"""At time of writing, the gather_dep implementation filtered tasks again
for in-flight state. The response parser, however, did not distinguish
resulting in unwanted missing-data signals to the scheduler, causing
potential rescheduling or data leaks.
If a cancelled key is rescheduled for fetching while gather_dep waits
internally for get_data, the response parser would misclassify this key and
cause the key to be recommended for a release causing deadlocks and/or lost
keys.
At time of writing, this transition was implemented wrongly and caused a
flight->cancelled transition which should be recoverable but the cancelled
state was corrupted by this transition since ts.done==True. This attribute
setting would cause a cancelled->fetch transition to actually drop the key
instead, causing https://github.com/dask/distributed/issues/5366
See also test_gather_dep_do_not_handle_response_of_not_requested_tasks
"""
import distributed
with mock.patch.object(distributed.worker.Worker, "gather_dep") as mocked_gather:
fut1 = c.submit(inc, 1, workers=[a.address], key="f1")
fut2 = c.submit(inc, fut1, workers=[a.address], key="f2")
await fut2
fut4 = c.submit(sum, fut1, fut2, workers=[b.address], key="f4")
fut3 = c.submit(inc, fut1, workers=[b.address], key="f3")
fut2_key = fut2.key
await _wait_for_state(fut2_key, b, "flight")
while not mocked_gather.call_args:
await asyncio.sleep(0)
fut4.release()
while fut4.key in b.tasks:
await asyncio.sleep(0)
assert b.tasks[fut2.key].state == "cancelled"
args, kwargs = mocked_gather.call_args
assert fut2.key in kwargs["to_gather"]
# The below synchronization and mock structure allows us to intercept the
# state after gather_dep has been scheduled and is waiting for the
# get_data_from_worker to finish. If state transitions happen during this
# time, the response parser needs to handle this properly
lock = asyncio.Lock()
event = asyncio.Event()
async with lock:
async def wait_get_data(*args, **kwargs):
event.set()
async with lock:
return await distributed.worker.get_data_from_worker(*args, **kwargs)
with mock.patch.object(
distributed.worker,
"get_data_from_worker",
side_effect=wait_get_data,
):
gather_dep_fut = asyncio.ensure_future(
Worker.gather_dep(b, *args, **kwargs)
)
await event.wait()
fut4 = c.submit(sum, [fut1, fut2], workers=[b.address], key="f4")
while b.tasks[fut2.key].state != "flight":
await asyncio.sleep(0.1)
await gather_dep_fut
f2_story = b.story(fut2.key)
assert f2_story
await fut3
await fut4
@gen_cluster(client=True)
async def test_gather_dep_do_not_handle_response_of_not_requested_tasks(c, s, a, b):
"""At time of writing, the gather_dep implementation filtered tasks again
for in-flight state. The response parser, however, did not distinguish
resulting in unwanted missing-data signals to the scheduler, causing
potential rescheduling or data leaks.
This test may become obsolete if the implementation changes significantly.
"""
import distributed
with mock.patch.object(distributed.worker.Worker, "gather_dep") as mocked_gather:
fut1 = c.submit(inc, 1, workers=[a.address], key="f1")
fut2 = c.submit(inc, fut1, workers=[a.address], key="f2")
await fut2
fut4 = c.submit(sum, fut1, fut2, workers=[b.address], key="f4")
fut3 = c.submit(inc, fut1, workers=[b.address], key="f3")
fut2_key = fut2.key
await _wait_for_state(fut2_key, b, "flight")
while not mocked_gather.call_args:
await asyncio.sleep(0)
fut4.release()
while fut4.key in b.tasks:
await asyncio.sleep(0)
assert b.tasks[fut2.key].state == "cancelled"
args, kwargs = mocked_gather.call_args
assert fut2.key in kwargs["to_gather"]
await Worker.gather_dep(b, *args, **kwargs)
assert fut2.key not in b.tasks
f2_story = b.story(fut2.key)
assert f2_story
assert not any("missing-dep" in msg for msg in b.story(fut2.key))
await fut3
@gen_cluster(
client=True,
config={
"distributed.comm.recent-messages-log-length": 1000,
},
)
async def test_gather_dep_no_longer_in_flight_tasks(c, s, a, b):
import distributed
with mock.patch.object(distributed.worker.Worker, "gather_dep") as mocked_gather:
fut1 = c.submit(inc, 1, workers=[a.address], key="f1")
fut2 = c.submit(sum, fut1, fut1, workers=[b.address], key="f2")
fut1_key = fut1.key
await _wait_for_state(fut1_key, b, "flight")
while not mocked_gather.call_args:
await asyncio.sleep(0)
fut2.release()
while fut2.key in b.tasks:
await asyncio.sleep(0)
assert b.tasks[fut1.key].state == "cancelled"
args, kwargs = mocked_gather.call_args
await Worker.gather_dep(b, *args, **kwargs)
assert fut2.key not in b.tasks
f1_story = b.story(fut1.key)
assert f1_story
assert not any("missing-dep" in msg for msg in b.story(fut2.key))
@pytest.mark.parametrize("intermediate_state", ["resumed", "cancelled"])
@pytest.mark.parametrize("close_worker", [False, True])
@gen_cluster(client=True, nthreads=[("", 1)] * 3)
async def test_deadlock_cancelled_after_inflight_before_gather_from_worker(
c, s, a, b, x, intermediate_state, close_worker
):
"""If a task was transitioned to in-flight, the gather-dep coroutine was
scheduled but a cancel request came in before gather_data_from_worker was
issued this might corrupt the state machine if the cancelled key is not
properly handled"""
fut1 = c.submit(slowinc, 1, workers=[a.address], key="f1")
fut1B = c.submit(slowinc, 2, workers=[x.address], key="f1B")
fut2 = c.submit(sum, [fut1, fut1B], workers=[x.address], key="f2")
await fut2
with mock.patch.object(distributed.worker.Worker, "gather_dep") as mocked_gather:
fut3 = c.submit(inc, fut2, workers=[b.address], key="f3")
fut2_key = fut2.key
await _wait_for_state(fut2_key, b, "flight")
s.set_restrictions(worker={fut1B.key: a.address, fut2.key: b.address})
while not mocked_gather.call_args:
await asyncio.sleep(0)
await s.remove_worker(address=x.address, safe=True, close=close_worker)
await _wait_for_state(fut2_key, b, intermediate_state)
args, kwargs = mocked_gather.call_args
await Worker.gather_dep(b, *args, **kwargs)
await fut3
|
the-stack_0_16191 | # Copyright 2018 Capital One Services, LLC
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import logging
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
from c7n_azure import constants
from c7n_azure.actions.logic_app import LogicAppAction
from azure.mgmt.resourcegraph.models import QueryRequest
from c7n_azure.actions.notify import Notify
from c7n_azure.filters import ParentFilter
from c7n_azure.provider import resources
from c7n.actions import ActionRegistry
from c7n.exceptions import PolicyValidationError
from c7n.filters import FilterRegistry
from c7n.manager import ResourceManager
from c7n.query import sources, MaxResourceLimit
from c7n.utils import local_session
log = logging.getLogger('custodian.azure.query')
class ResourceQuery:
def __init__(self, session_factory):
self.session_factory = session_factory
def filter(self, resource_manager, **params):
m = resource_manager.resource_type
enum_op, list_op, extra_args = m.enum_spec
if extra_args:
params.update(extra_args)
params.update(m.extra_args(resource_manager))
try:
op = getattr(getattr(resource_manager.get_client(), enum_op), list_op)
result = op(**params)
if isinstance(result, Iterable):
return [r.serialize(True) for r in result]
elif hasattr(result, 'value'):
return [r.serialize(True) for r in result.value]
except Exception as e:
log.error("Failed to query resource.\n"
"Type: azure.{0}.\n"
"Error: {1}".format(resource_manager.type, e))
raise
raise TypeError("Enumerating resources resulted in a return"
"value which could not be iterated.")
@staticmethod
def resolve(resource_type):
if not isinstance(resource_type, type):
raise ValueError(resource_type)
else:
m = resource_type
return m
@sources.register('describe-azure')
class DescribeSource:
resource_query_factory = ResourceQuery
def __init__(self, manager):
self.manager = manager
self.query = self.resource_query_factory(self.manager.session_factory)
def validate(self):
pass
def get_resources(self, query):
return self.query.filter(self.manager)
def get_permissions(self):
return ()
def augment(self, resources):
return resources
@sources.register('resource-graph')
class ResourceGraphSource:
def __init__(self, manager):
self.manager = manager
def validate(self):
if not hasattr(self.manager.resource_type, 'resource_type'):
raise PolicyValidationError(
"%s is not supported with the Azure Resource Graph source."
% self.manager.data['resource'])
def get_resources(self, _):
log.warning('The Azure Resource Graph source '
'should not be used in production scenarios at this time.')
session = self.manager.get_session()
client = session.client('azure.mgmt.resourcegraph.ResourceGraphClient')
# empty scope will return all resource
query_scope = ""
if self.manager.resource_type.resource_type != 'armresource':
query_scope = "where type =~ '%s'" % self.manager.resource_type.resource_type
query = QueryRequest(
query=query_scope,
subscriptions=[session.get_subscription_id()]
)
res = client.resources(query)
cols = [c['name'] for c in res.data['columns']]
data = [dict(zip(cols, r)) for r in res.data['rows']]
return data
def get_permissions(self):
return ()
def augment(self, resources):
return resources
class ChildResourceQuery(ResourceQuery):
"""A resource query for resources that must be queried with parent information.
Several resource types can only be queried in the context of their
parents identifiers. ie. SQL and Cosmos databases
"""
def filter(self, resource_manager, **params):
"""Query a set of resources."""
m = self.resolve(resource_manager.resource_type) # type: ChildTypeInfo
parents = resource_manager.get_parent_manager()
# Have to query separately for each parent's children.
results = []
for parent in parents.resources():
try:
subset = resource_manager.enumerate_resources(parent, m, **params)
if subset:
# If required, append parent resource ID to all child resources
if m.annotate_parent:
for r in subset:
r[m.parent_key] = parent[parents.resource_type.id]
results.extend(subset)
except Exception as e:
log.warning('Child enumeration failed for {0}. {1}'
.format(parent[parents.resource_type.id], e))
if m.raise_on_exception:
raise e
return results
@sources.register('describe-child-azure')
class ChildDescribeSource(DescribeSource):
resource_query_factory = ChildResourceQuery
class TypeMeta(type):
def __repr__(cls):
return "<Type info service:%s client: %s>" % (
cls.service,
cls.client)
class TypeInfo(metaclass=TypeMeta):
doc_groups = None
"""api client construction information"""
service = ''
client = ''
# Default id field, resources should override if different (used for meta filters, report etc)
id = 'id'
resource = constants.RESOURCE_ACTIVE_DIRECTORY
@classmethod
def extra_args(cls, resource_manager):
return {}
class ChildTypeInfo(TypeInfo, metaclass=TypeMeta):
"""api client construction information for child resources"""
parent_manager_name = ''
annotate_parent = True
raise_on_exception = True
parent_key = 'c7n:parent-id'
@classmethod
def extra_args(cls, parent_resource):
return {}
class QueryMeta(type):
"""metaclass to have consistent action/filter registry for new resources."""
def __new__(cls, name, parents, attrs):
if 'filter_registry' not in attrs:
attrs['filter_registry'] = FilterRegistry(
'%s.filters' % name.lower())
if 'action_registry' not in attrs:
attrs['action_registry'] = ActionRegistry(
'%s.actions' % name.lower())
return super(QueryMeta, cls).__new__(cls, name, parents, attrs)
class QueryResourceManager(ResourceManager, metaclass=QueryMeta):
class resource_type(TypeInfo):
pass
def __init__(self, data, options):
super(QueryResourceManager, self).__init__(data, options)
self.source = self.get_source(self.source_type)
self._session = None
def augment(self, resources):
return resources
def get_permissions(self):
return ()
def get_source(self, source_type):
return sources.get(source_type)(self)
def get_session(self):
if self._session is None:
self._session = local_session(self.session_factory)
return self._session
def get_client(self, service=None):
if not service:
return self.get_session().client(
"%s.%s" % (self.resource_type.service, self.resource_type.client))
return self.get_session().client(service)
def get_cache_key(self, query):
return {'source_type': self.source_type,
'query': query,
'resource': str(self.__class__.__name__)}
@classmethod
def get_model(cls):
return ResourceQuery.resolve(cls.resource_type)
@property
def source_type(self):
return self.data.get('source', 'describe-azure')
def resources(self, query=None):
cache_key = self.get_cache_key(query)
resources = None
if self._cache.load():
resources = self._cache.get(cache_key)
if resources is not None:
self.log.debug("Using cached %s: %d" % (
"%s.%s" % (self.__class__.__module__,
self.__class__.__name__),
len(resources)))
if resources is None:
resources = self.augment(self.source.get_resources(query))
self._cache.save(cache_key, resources)
resource_count = len(resources)
resources = self.filter_resources(resources)
# Check if we're out of a policies execution limits.
if self.data == self.ctx.policy.data:
self.check_resource_limit(len(resources), resource_count)
return resources
def check_resource_limit(self, selection_count, population_count):
"""Check if policy's execution affects more resources then its limit.
"""
p = self.ctx.policy
max_resource_limits = MaxResourceLimit(p, selection_count, population_count)
return max_resource_limits.check_resource_limits()
def get_resources(self, resource_ids, **params):
resource_client = self.get_client()
m = self.resource_type
get_client, get_op, extra_args = m.get_spec
if extra_args:
params.update(extra_args)
op = getattr(getattr(resource_client, get_client), get_op)
data = [
op(rid, **params)
for rid in resource_ids
]
return [r.serialize(True) for r in data]
@staticmethod
def register_actions_and_filters(registry, resource_class):
resource_class.action_registry.register('notify', Notify)
if 'logic-app' not in resource_class.action_registry:
resource_class.action_registry.register('logic-app', LogicAppAction)
def validate(self):
self.source.validate()
class ChildResourceManager(QueryResourceManager, metaclass=QueryMeta):
child_source = 'describe-child-azure'
parent_manager = None
@property
def source_type(self):
source = self.data.get('source', self.child_source)
if source == 'describe':
source = self.child_source
return source
def get_parent_manager(self):
if not self.parent_manager:
self.parent_manager = self.get_resource_manager(self.resource_type.parent_manager_name)
return self.parent_manager
def get_session(self):
if self._session is None:
session = super(ChildResourceManager, self).get_session()
if self.resource_type.resource != constants.RESOURCE_ACTIVE_DIRECTORY:
session = session.get_session_for_resource(self.resource_type.resource)
self._session = session
return self._session
def enumerate_resources(self, parent_resource, type_info, **params):
client = self.get_client()
enum_op, list_op, extra_args = self.resource_type.enum_spec
# There are 2 types of extra_args:
# - static values stored in 'extra_args' dict (e.g. some type)
# - dynamic values are retrieved via 'extra_args' method (e.g. parent name)
if extra_args:
params.update({key: extra_args[key](parent_resource) for key in extra_args.keys()})
params.update(type_info.extra_args(parent_resource))
# Some resources might not have enum_op piece (non-arm resources)
if enum_op:
op = getattr(getattr(client, enum_op), list_op)
else:
op = getattr(client, list_op)
result = op(**params)
if isinstance(result, Iterable):
return [r.serialize(True) for r in result]
elif hasattr(result, 'value'):
return [r.serialize(True) for r in result.value]
raise TypeError("Enumerating resources resulted in a return"
"value which could not be iterated.")
@staticmethod
def register_child_specific(registry, resource_class):
if not issubclass(resource_class, ChildResourceManager):
return
# If Child Resource doesn't annotate parent, there is no way to filter based on
# parent properties.
if resource_class.resource_type.annotate_parent:
resource_class.filter_registry.register('parent', ParentFilter)
resources.subscribe(QueryResourceManager.register_actions_and_filters)
resources.subscribe(ChildResourceManager.register_child_specific)
|
the-stack_0_16193 | model = dict(
type='PAN',
backbone=dict(
type='resnet18',
pretrained=True
),
neck=dict(
type='FPEM_v1',
in_channels=(64, 128, 256, 512),
out_channels=128
),
detection_head=dict(
type='PA_Head',
in_channels=512,
hidden_dim=128,
num_classes=6,
loss_text=dict(
type='DiceLoss',
loss_weight=1.0
),
loss_kernel=dict(
type='DiceLoss',
loss_weight=0.5
),
loss_emb=dict(
type='EmbLoss_v1',
feature_dim=4,
loss_weight=0.25
)
)
)
data = dict(
batch_size=8,
train=dict(
type='PAN_IC15',
split='train',
is_transform=True,
img_size=736,
short_size=736,
kernel_scale=0.5,
read_type='cv2'
),
test=dict(
type='PAN_IC15',
split='test',
short_size=736,
read_type='cv2'
)
)
train_cfg = dict(
lr=1e-3,
schedule='polylr',
epoch=600,
optimizer='Adam'
)
test_cfg = dict(
min_score=0.7,
min_area=10,
bbox_type='rect',
result_path='outputs/submit_ic15.zip'
)
|
the-stack_0_16194 | from azure.cosmosdb.table.tableservice import TableService
from azure.cosmosdb.table.models import Entity, EntityProperty, EdmType
from database.models.Datum import Datum
from string import Template
import uuid
def generateRowKey():
return str(uuid.uuid4())
class DatumRepository:
def __init__(self):
self.tableService = TableService(connection_string='DefaultEndpointsProtocol=https;AccountName=styles-db;AccountKey=GKnYYUiWGAPVQuu7qjqPDUrfESoMQLrQ2YZmAahqW6WnSkwICAxd8yj3G2OlZMA27VPVmAECrcrBwq8bJfmjXg==;TableEndpoint=https://styles-db.table.cosmos.azure.com:443/;')
self.tableName = 'dataset'
self.PartitionKey = 'dataset'
# Returns the created Entity object
def create(self, datum):
entity = Entity()
entity.PartitionKey = self.PartitionKey
entity.RowKey = generateRowKey()
entity.blobName = EntityProperty(EdmType.STRING, datum.blobName)
entity.contrast = EntityProperty(EdmType.DOUBLE, datum.contrast)
entity.brightness = EntityProperty(EdmType.DOUBLE, datum.brightness)
entity.temperature = EntityProperty(EdmType.DOUBLE, datum.temperature)
entity.saturation = EntityProperty(EdmType.DOUBLE, datum.saturation)
return self.tableService.insert_entity(self.tableName, entity)
# Returns either an Entity or a list of Entity objects
def read(self, RowKey = None):
if RowKey is None:
# Get all
queryTemplate = Template("PartitionKey eq '$PartitionKey'")
result = self.tableService.query_entities(self.tableName, filter=queryTemplate.substitute(PartitionKey=self.PartitionKey))
result = [Datum(item) for item in result]
return result
# Get by id
result = self.tableService.get_entity(self.tableName, self.PartitionKey, RowKey)
result = Datum(result)
return result
# Returns the updated Entity object
def update(self, entity):
self.tableService.update_entity(self.tableName, entity)
# Returns a succeeded bool
def delete(self, RowKey):
self.tableService.delete_entity(self.tableName, self.PartitionKey, RowKey)
|
the-stack_0_16195 | """Implementations for torch.nn.functional equivalent for MPC."""
# stdlib
from typing import Optional
from typing import Tuple
from typing import Union
# third party
import numpy as np
import torch
from sympc.session import get_session
from sympc.tensor import MPCTensor
from sympc.tensor import ShareTensor
from sympc.utils import parallel_execution
def relu(x: MPCTensor) -> MPCTensor:
"""Rectified linear unit function.
Args:
x (MPCTensor): The tensor on which we apply the function
Returns:
An MPCTensor which represents the ReLu applied on the input tensor
"""
res = x * (x >= 0)
return res
def mse_loss(pred: MPCTensor, target: MPCTensor, reduction: str = "mean") -> MPCTensor:
"""Mean Squared Error loss.
Args:
pred (MPCTensor): The predictions obtained
target (MPCTensor): The target values
reduction (str): the reduction method, default is `mean`
Returns:
The loss
Raises:
ValueError: If `reduction` not in supported methods
"""
if reduction == "mean":
result = (pred - target).pow(2).sum() / pred.shape[0]
elif reduction == "sum":
result = (pred - target).pow(2).sum()
else:
raise ValueError("do not support reduction method: %s" % reduction)
return result
Kernel2D = Tuple[int, int]
Stride2D = Tuple[int, int]
Padding2D = Tuple[int, int]
Dilation2D = Tuple[int, int]
MaxPool2DArgs = Tuple[Kernel2D, Stride2D, Padding2D, Dilation2D]
def _sanity_check_max_pool2d(
kernel_size: Union[int, Tuple[int, int]],
stride: Optional[Union[int, Tuple[int, int]]] = None,
padding: Union[int, Tuple[int, int]] = 0,
dilation: Union[int, Tuple[int, int]] = 1,
) -> MaxPool2DArgs:
"""Sanity check the parameters required for max_pool2d (backward and forward pass).
Args:
kernel_size (Union[int, Tuple[int, int]]): the kernel size
in case it is passed as an integer then that specific value is used for height and width
stride (Union[int, Tuple[int, int]]): the stride size
in case it is passed as an integer then that specific value is used for height and width
padding (Union[int, Tuple[int, int]]): the padding size
in case it is passed as an integer then that specific value is used for height and width
dilation (Union[int, Tuple[int, int]]): the dilation size
in case it is passed as an integer then that specific value is used for height and width
Returns:
A 4 element type with types Tuple[int, int] representing the converted parameters.
Raises:
ValueError: if the parameters are not passing the sanity check
"""
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
if len(kernel_size) != 2:
raise ValueError("Kernel_size should have only 2 dimensions")
if stride is None:
stride = kernel_size
if isinstance(stride, int):
stride = (stride, stride)
if len(stride) != 2:
raise ValueError("Stride should have only 2 dimensions")
if isinstance(padding, int):
padding = (padding, padding)
if padding[0] > kernel_size[0] or padding[1] > kernel_size[1]:
raise ValueError("Padding should be <= kernel_size / 2")
if len(padding) != 2:
raise ValueError("Padding should have only 2 dimensions")
if isinstance(dilation, int):
dilation = (dilation, dilation)
if len(dilation) != 2:
raise ValueError("Dilation should have only 2 dimensions")
if dilation[0] != 1 or dilation[1] != 1:
raise ValueError("Supported only dilation == 1")
return kernel_size, stride, padding, dilation
def _reshape_max_pool2d(
x: MPCTensor,
kernel_size: Tuple[int, int],
stride: Tuple[int, int],
padding: Tuple[int, int],
dilation: Tuple[int, int],
) -> MPCTensor:
"""Prepare the share tensors by calling the reshape function in parallel at each party.
Args:
x (MPCTensor): the MPCTensor on which to apply the reshape operation
kernel_size (Tuple[int, int]): the kernel size
stride (Tuple[int, int]): the stride size
padding (Tuple[int, int]): the padding size
dilation (Tuple[int, int]): the dilation size
Returns:
The reshaped MPCTensor.
"""
session = x.session
args = [[share, kernel_size, stride, padding, dilation] for share in x.share_ptrs]
shares = parallel_execution(helper_max_pool2d_reshape, session.parties)(args)
res_shape = shares[0].shape.get()
res = MPCTensor(shares=shares, session=session, shape=res_shape)
return res
def helper_max_pool2d_reshape(
x: ShareTensor,
kernel_size: Tuple[int, int],
stride: Tuple[int, int],
padding: Tuple[int, int],
dilation: Tuple[int, int],
) -> ShareTensor:
"""Function that runs at each party for preparing the share.
Reshape each share tensor to prepare it for calling 'argmax'.
The new share would have "each element" as the input on which we
will run the max_pool2d kernel.
Args:
x (ShareTensor): the ShareTensor on which to apply the reshaping
kernel_size (Tuple[int, int]): the kernel size
stride (Tuple[int, int]): the stride size
padding (Tuple[int, int]): the padding size
dilation (Tuple[int, int]): the dilation size
Returns:
The prepared share tensor (reshaped)
"""
session = get_session(x.session_uuid)
tensor = x.tensor.numpy()
padding = [(0, 0)] * len(tensor.shape[:-2]) + [
(padding[0], padding[0]),
(padding[1], padding[1]),
]
tensor_type = session.tensor_type
padding_value = 0
if session.rank == 0:
# ATTENTION: Min value for max_pool2d that works -25
padding_value = -25
tensor = np.pad(tensor, padding, mode="constant", constant_values=padding_value)
output_shape = tensor.shape[:-2]
output_shape += (
(tensor.shape[-2] - kernel_size[0]) // stride[0] + 1,
(tensor.shape[-1] - kernel_size[1]) // stride[1] + 1,
)
output_shape += kernel_size
output_strides = tensor.strides[:-2]
output_strides += (stride[0] * tensor.strides[-2], stride[1] * tensor.strides[-1])
output_strides += tensor.strides[-2:]
window_view_share = torch.tensor(
np.lib.stride_tricks.as_strided(
tensor, shape=output_shape, strides=output_strides
),
dtype=tensor_type,
)
window_view_share = window_view_share.reshape(-1, *kernel_size)
res_share = ShareTensor(config=x.config)
res_share.tensor = window_view_share
return res_share
def max_pool2d(
x: MPCTensor,
kernel_size: Union[int, Tuple[int, int]],
stride: Optional[Union[int, Tuple[int, int]]] = None,
padding: Union[int, Tuple[int, int]] = 0,
dilation: Union[int, Tuple[int, int]] = 1,
return_indices: bool = False,
) -> Union[MPCTensor, Tuple[MPCTensor, MPCTensor]]:
"""Compute the max pool for a tensor with 2 dimension.
Args:
x (MPCTensor): the MPCTensor on which to apply the operation
kernel_size (Union[int, Tuple[int, int]]): the kernel size
in case it is passed as an integer then that specific value is used for height and width
stride (Union[int, Tuple[int, int]]): the stride size
in case it is passed as an integer then that specific value is used for height and width
padding (Union[int, Tuple[int, int]]): the padding size
in case it is passed as an integer then that specific value is used for height and width
dilation (Union[int, Tuple[int, int]]): the dilation size
in case it is passed as an integer then that specific value is used for height and width
return_indices (bool): to return the indices of the max values
Returns:
A tuple representing maximum values and the indices (as a one hot encoding
Raises:
ValueError: if the kernel size is bigger than the input
"""
kernel_size, stride, padding, dilation = _sanity_check_max_pool2d(
kernel_size, stride, padding, dilation
)
if (
x.shape[-2] + 2 * padding[0] < kernel_size[0]
or x.shape[-1] + 2 * padding[1] < kernel_size[1]
):
raise ValueError(
f"Kernel size ({kernel_size}) has more elements on an axis than "
f"input shape ({x.shape}) considering padding of {padding}"
)
x_reshaped = _reshape_max_pool2d(
x, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation
)
res_max_columns, columns = x_reshaped.max(dim=-1, one_hot=True)
res_max, rows = res_max_columns.max(dim=-1, one_hot=True)
output_shape = x.shape[:-2] + (
(x.shape[-2] - kernel_size[0] + 2 * padding[0]) // stride[0] + 1,
(x.shape[-1] - kernel_size[1] + 2 * padding[1]) // stride[1] + 1,
)
res = res_max.reshape(*output_shape)
if return_indices:
indices = columns * rows.unsqueeze(-1)
res = (res, indices.reshape(output_shape + kernel_size))
return res
def max_pool2d_backward_helper(
input_shape: Tuple[int],
grads_share: ShareTensor,
kernel_size: Tuple[int, int],
stride: Tuple[int, int],
padding: Tuple[int, int],
) -> ShareTensor:
"""Helper function to compute the gradient needed to be passed to the parent node.
Args:
input_shape (Tuple[int]): the size of the input tensor when running max_pool2d
grads_share (ShareTensor): the share for the output gradient specific to this party
kernel_size (Tuple[int, int]): the kernel size
stride (Tuple[int, int]): the stride size
padding (Tuple[int, int]): the padding size
Returns:
A ShareTensor specific for the computed gradient
Raises:
ValueError: if the input shape (taken into consideration the padding) is smaller than the
kernel shape
"""
session = get_session(str(grads_share.session_uuid))
res_shape = input_shape[:-2]
res_shape += (input_shape[-2] + 2 * padding[0], input_shape[-1] + 2 * padding[1])
if res_shape[-2] < kernel_size[0] or res_shape[-1] < kernel_size[1]:
raise ValueError(
f"Kernel size ({kernel_size}) has more elements on an axis than "
f"input shape ({res_shape}) considering padding of {padding}"
)
tensor_type = session.tensor_type
tensor = torch.zeros(res_shape, dtype=tensor_type)
for i in range((res_shape[-2] - kernel_size[0]) // stride[0] + 1):
row_idx = i * stride[0]
for j in range((res_shape[-1] - kernel_size[1]) // stride[1] + 1):
col_idx = j * stride[1]
if len(res_shape) == 4:
tensor[
:,
:,
row_idx : row_idx + kernel_size[0],
col_idx : col_idx + kernel_size[1],
] += grads_share.tensor[:, :, i, j]
else:
tensor[
:,
row_idx : row_idx + kernel_size[0],
col_idx : col_idx + kernel_size[1],
] += grads_share.tensor[:, i, j]
if len(res_shape) == 4:
tensor = tensor[
:, :, padding[0] : input_shape[-2], padding[1] : input_shape[-1]
]
else:
tensor = tensor[
:,
padding[0] : res_shape[-2] - padding[0],
padding[1] : res_shape[-1] - padding[1],
]
res = ShareTensor(config=grads_share.config)
res.tensor = tensor
return res
def max_pool2d_backward(
grad: MPCTensor,
input_shape: Tuple[int],
indices: MPCTensor,
kernel_size: Union[int, Tuple[int, int]],
stride: Optional[Union[int, Tuple[int, int]]] = None,
padding: Union[int, Tuple[int, int]] = 0,
dilation: Union[int, Tuple[int, int]] = 1,
) -> MPCTensor:
"""Helper function for the backwards step for max_pool2d.
Credits goes to the CrypTen team.
Args:
grad (MPCTensor): gradient that comes from the child node
input_shape (Tuple[int]): the shape of the input when the max_pool2d was run
indices (MPCTensor): the indices where the maximum value was found in the input
kernel_size (Union[int, Tuple[int, int]]): the kernel size
in case it is passed as an integer then that specific value is used for height and width
stride (Union[int, Tuple[int, int]]): the stride size
in case it is passed as an integer then that specific value is used for height and width
padding (Union[int, Tuple[int, int]]): the padding size
in case it is passed as an integer then that specific value is used for height and width
dilation (Union[int, Tuple[int, int]]): the dilation size
in case it is passed as an integer then that specific value is used for height and width
Returns:
The gradient that should be backpropagated (MPCTensor)
Raises:
ValueError: In case some of the values for the parameters are not supported
"""
kernel_size, stride, padding, dilation = _sanity_check_max_pool2d(
kernel_size, stride, padding, dilation
)
if len(grad.shape) != 4 and len(grad.shape) != 3:
raise ValueError(
f"Expected gradient to have 3/4 dimensions (4 with batch). Found {len(grad.shape)}"
)
if len(indices.shape) != len(grad.shape) + 2:
raise ValueError(
"Expected indices shape to have 2 extra dimensions because of "
f"(kernel_size, kernel_size), but has {len(indices.shape)}"
)
session = grad.session
mappings = grad.view(grad.shape + (1, 1)) * indices
args = [
[tuple(input_shape), grads_share, kernel_size, stride, padding]
for grads_share in mappings.share_ptrs
]
shares = parallel_execution(max_pool2d_backward_helper, session.parties)(args)
res = MPCTensor(shares=shares, shape=input_shape, session=session)
return res
|
the-stack_0_16196 | # Checks for an absolute error
# with an error of at most 1e-7
# Don't edit this file. Edit real_abs_rel_template.py instead, and then run _real_check_gen.py
from itertools import zip_longest
from decimal import Decimal, InvalidOperation
from kg.checkers import * ### @import
EPS = Decimal('1e-7')
EPS *= 1+Decimal('1e-5') # add some leniency
@set_checker()
@default_score
def checker(input_file, output_file, judge_file, **kwargs):
worst = 0
for line1, line2 in zip_longest(output_file, judge_file):
if (line1 is None) != (line2 is None): raise WA("Unequal number of lines")
p1 = line1.rstrip().split(" ")
p2 = line2.rstrip().split(" ")
if len(p1) != len(p2): raise WA("Incorrect number of values in line")
for v1, v2 in zip(p1, p2):
if v1 != v2: # they're different as tokens. try considering them as numbers
try:
err = abs_error(Decimal(v1), Decimal(v2))
except InvalidOperation:
raise WA(f"Unequal tokens that are not numbers: {v1!r} != {v2!r}")
worst = max(worst, err)
if err > EPS:
print('Found an error of', worst) ### @if format not in ('hr', 'cms')
raise WA("Bad precision.")
print('Worst error:', worst) ### @if format not in ('pg', 'hr', 'cms')
help_ = ('Compare if two sequences of real numbers are "close enough" (by 1e-7). '
"Uses absolute error.")
if __name__ == '__main__': chk(help=help_)
|
the-stack_0_16197 | # Copyright (C) 2020-2021 Intel Corporation
#
# SPDX-License-Identifier: MIT
from enum import Enum, auto
from math import gcd
import copy
import logging as log
import numpy as np
from datumaro.components.cli_plugin import CliPlugin
from datumaro.components.extractor import (
DEFAULT_SUBSET_NAME, AnnotationType, Transform,
)
from datumaro.util import cast
NEAR_ZERO = 1e-7
class SplitTask(Enum):
classification = auto()
detection = auto()
segmentation = auto()
reid = auto()
class Split(Transform, CliPlugin):
"""
- classification split |n
Splits dataset into subsets(train/val/test) in class-wise manner. |n
Splits dataset images in the specified ratio, keeping the initial class
distribution.|n
|n
- detection & segmentation split |n
Each image can have multiple object annotations -
(bbox, mask, polygon). Since an image shouldn't be included
in multiple subsets at the same time, and image annotations
shouldn't be split, in general, dataset annotations are unlikely
to be split exactly in the specified ratio. |n
This split tries to split dataset images as close as possible
to the specified ratio, keeping the initial class distribution.|n
|n
- reidentification split |n
In this task, the test set should consist of images of unseen
people or objects during the training phase. |n
This function splits a dataset in the following way:|n
1. Splits the dataset into 'train + val' and 'test' sets|n
|s|sbased on person or object ID.|n
2. Splits 'test' set into 'test-gallery' and 'test-query' sets|n
|s|sin class-wise manner.|n
3. Splits the 'train + val' set into 'train' and 'val' sets|n
|s|sin the same way.|n
The final subsets would be
'train', 'val', 'test-gallery' and 'test-query'. |n
|n
Notes:|n
- Each image is expected to have only one Annotation. Unlabeled or
multi-labeled images will be split into subsets randomly. |n
- If Labels also have attributes, also splits by attribute values.|n
- If there is not enough images in some class or attributes group,
the split ratio can't be guaranteed.|n
In reidentification task, |n
- Object ID can be described by Label, or by attribute (--attr parameter)|n
- The splits of the test set are controlled by '--query' parameter |n
|s|sGallery ratio would be 1.0 - query.|n
|n
Example:|n
|s|s%(prog)s -t classification --subset train:.5 --subset val:.2 --subset test:.3 |n
|s|s%(prog)s -t detection --subset train:.5 --subset val:.2 --subset test:.3 |n
|s|s%(prog)s -t segmentation --subset train:.5 --subset val:.2 --subset test:.3 |n
|s|s%(prog)s -t reid --subset train:.5 --subset val:.2 --subset test:.3 --query .5 |n
Example: use 'person_id' attribute for splitting|n
|s|s%(prog)s --attr person_id
"""
_default_split = [("train", 0.5), ("val", 0.2), ("test", 0.3)]
_default_query_ratio = 0.5
@classmethod
def build_cmdline_parser(cls, **kwargs):
parser = super().build_cmdline_parser(**kwargs)
parser.add_argument(
"-t",
"--task",
default=SplitTask.classification.name,
choices=[t.name for t in SplitTask],
help="(one of {}; default: %(default)s)".format(
", ".join(t.name for t in SplitTask)
),
)
parser.add_argument(
"-s",
"--subset",
action="append",
type=cls._split_arg,
dest="splits",
help="Subsets in the form: '<subset>:<ratio>' "
"(repeatable, default: %s)" % dict(cls._default_split),
)
parser.add_argument(
"--query",
type=float,
default=None,
help="Query ratio in the test set (default: %.3f)"
% cls._default_query_ratio,
)
parser.add_argument(
"--attr",
type=str,
dest="attr_for_id",
default=None,
help="Attribute name representing the ID (default: use label)",
)
parser.add_argument("--seed", type=int, help="Random seed")
return parser
@staticmethod
def _split_arg(s):
parts = s.split(":")
if len(parts) != 2:
import argparse
raise argparse.ArgumentTypeError()
return (parts[0], float(parts[1]))
def __init__(self, dataset, task, splits, query=None, attr_for_id=None, seed=None):
super().__init__(dataset)
if splits is None:
splits = self._default_split
self.task = task
self.splitter = self._get_splitter(
task, dataset, splits, seed, query, attr_for_id
)
self._initialized = False
self._subsets = self.splitter._subsets
@staticmethod
def _get_splitter(task, dataset, splits, seed, query, attr_for_id):
if task == SplitTask.classification.name:
splitter = _ClassificationSplit(dataset=dataset, splits=splits, seed=seed)
elif task in {SplitTask.detection.name, SplitTask.segmentation.name}:
splitter = _InstanceSpecificSplit(
dataset=dataset, splits=splits, seed=seed, task=task
)
elif task == SplitTask.reid.name:
splitter = _ReidentificationSplit(
dataset=dataset,
splits=splits,
seed=seed,
query=query,
attr_for_id=attr_for_id,
)
else:
raise Exception(
f"Unknown task '{task}', available "
f"splitter format: {[a.name for a in SplitTask]}"
)
return splitter
def __iter__(self):
# lazy splitting
if self._initialized is False:
self.splitter._split_dataset()
self._initialized = True
for i, item in enumerate(self._extractor):
yield self.wrap_item(item, subset=self.splitter._find_split(i))
def get_subset(self, name):
# lazy splitting
if self._initialized is False:
self.splitter._split_dataset()
self._initialized = True
return super().get_subset(name)
def subsets(self):
# lazy splitting
if self._initialized is False:
self.splitter._split_dataset()
self._initialized = True
return super().subsets()
class _TaskSpecificSplit:
def __init__(self, dataset, splits, seed, restrict=False):
self._extractor = dataset
snames, sratio, subsets = self._validate_splits(splits, restrict)
self._snames = snames
self._sratio = sratio
self._seed = seed
# remove subset name restriction
# https://github.com/openvinotoolkit/datumaro/issues/194
self._subsets = subsets
self._parts = []
self._length = "parent"
self._initialized = False
def _set_parts(self, by_splits):
self._parts = []
for subset in self._subsets:
self._parts.append((set(by_splits[subset]), subset))
@staticmethod
def _get_uniq_annotations(dataset):
annotations = []
unlabeled_or_multi = []
for idx, item in enumerate(dataset):
labels = [a for a in item.annotations if a.type == AnnotationType.label]
if len(labels) == 1:
annotations.append(labels[0])
else:
unlabeled_or_multi.append(idx)
return annotations, unlabeled_or_multi
@staticmethod
def _validate_splits(splits, restrict=False):
snames = []
ratios = []
subsets = set()
valid = ["train", "val", "test"]
for subset, ratio in splits:
# remove subset name restriction
# https://github.com/openvinotoolkit/datumaro/issues/194
if restrict:
assert subset in valid, "Subset name must be one of %s, got %s" % (
valid,
subset,
)
assert (
0.0 <= ratio and ratio <= 1.0
), "Ratio is expected to be in the range " "[0, 1], but got %s for %s" % (
ratio,
subset,
)
# ignore near_zero ratio because it may produce partition error.
if ratio > NEAR_ZERO:
# handling duplication
if subset in snames:
raise Exception("Subset (%s) is duplicated" % subset)
snames.append(subset)
ratios.append(float(ratio))
subsets.add(subset)
ratios = np.array(ratios)
total_ratio = np.sum(ratios)
if not abs(total_ratio - 1.0) <= NEAR_ZERO:
raise Exception(
"Sum of ratios is expected to be 1, got %s, which is %s"
% (splits, total_ratio)
)
return snames, ratios, subsets
@staticmethod
def _get_required(ratio):
if len(ratio) < 2:
return 1
for scale in [10, 100]:
farray = np.array(ratio) * scale
iarray = farray.astype(int)
if np.array_equal(iarray, farray):
break
# find gcd
common_divisor = iarray[0]
for val in iarray[1:]:
common_divisor = gcd(common_divisor, val)
required = np.sum(np.array(iarray / common_divisor).astype(int))
return required
@staticmethod
def _get_sections(dataset_size, ratio):
n_splits = [int(np.around(dataset_size * r)) for r in ratio[:-1]]
n_splits.append(dataset_size - np.sum(n_splits))
# if there are splits with zero samples even if ratio is not 0,
# borrow one from the split who has one or more.
for ii, num_split in enumerate(n_splits):
if num_split == 0 and NEAR_ZERO < ratio[ii]:
midx = np.argmax(n_splits)
if n_splits[midx] > 0:
n_splits[ii] += 1
n_splits[midx] -= 1
sections = np.add.accumulate(n_splits[:-1])
return sections, n_splits
@staticmethod
def _group_by_attr(items):
"""
Args:
items: list of (idx_img, ann). ann is the annotation from Label object.
Returns:
by_attributes: dict of { combination-of-attrs : list of index }
"""
# float--> numerical, others(int, string, bool) --> categorical
def _is_float(value):
if isinstance(value, str):
casted = cast(value, float)
if casted is not None:
if cast(casted, str) == value:
return True
return False
elif isinstance(value, float):
cast(value, float)
return True
return False
# group by attributes
by_attributes = dict()
for idx_img, ann in items:
# ignore numeric attributes
filtered = {}
for attr, value in ann.attributes.items():
if _is_float(value):
continue
filtered[attr] = value
attributes = tuple(sorted(filtered.items()))
if attributes not in by_attributes:
by_attributes[attributes] = []
by_attributes[attributes].append(idx_img)
return by_attributes
def _split_by_attr(
self, datasets, snames, ratio, out_splits, merge_small_classes=True
):
def _split_indice(indice):
sections, _ = self._get_sections(len(indice), ratio)
splits = np.array_split(indice, sections)
for subset, split in zip(snames, splits):
if 0 < len(split):
out_splits[subset].extend(split)
required = self._get_required(ratio)
rest = []
for _, items in datasets.items():
np.random.shuffle(items)
by_attributes = self._group_by_attr(items)
attr_combinations = list(by_attributes.keys())
np.random.shuffle(attr_combinations) # add randomness
for attr in attr_combinations:
indice = by_attributes[attr]
quo = len(indice) // required
if quo > 0:
filtered_size = quo * required
_split_indice(indice[:filtered_size])
rest.extend(indice[filtered_size:])
else:
rest.extend(indice)
quo = len(rest) // required
if quo > 0:
filtered_size = quo * required
_split_indice(rest[:filtered_size])
rest = rest[filtered_size:]
if not merge_small_classes and len(rest) > 0:
_split_indice(rest)
rest = []
if len(rest) > 0:
_split_indice(rest)
def _split_unlabeled(self, unlabeled, by_splits):
"""
split unlabeled data into subsets (detection, classification)
Args:
unlabeled: list of index of unlabeled or multi-labeled data
by_splits: splits up to now
Returns:
by_splits: final splits
"""
dataset_size = len(self._extractor)
_, n_splits = list(self._get_sections(dataset_size, self._sratio))
counts = [len(by_splits[sname]) for sname in self._snames]
expected = [max(0, v) for v in np.subtract(n_splits, counts)]
sections = np.add.accumulate(expected[:-1])
np.random.shuffle(unlabeled)
splits = np.array_split(unlabeled, sections)
for subset, split in zip(self._snames, splits):
if 0 < len(split):
by_splits[subset].extend(split)
def _find_split(self, index):
for subset_indices, subset in self._parts:
if index in subset_indices:
return subset
return DEFAULT_SUBSET_NAME # all the possible remainder --> default
def _split_dataset(self):
raise NotImplementedError()
class _ClassificationSplit(_TaskSpecificSplit):
"""
Splits dataset into subsets(train/val/test) in class-wise manner. |n
Splits dataset images in the specified ratio, keeping the initial class
distribution.|n
|n
Notes:|n
- Each image is expected to have only one Label. Unlabeled or
multi-labeled images will be split into subsets randomly. |n
- If Labels also have attributes, also splits by attribute values.|n
- If there is not enough images in some class or attributes group,
the split ratio can't be guaranteed.|n
|n
Example:|n
|s|s%(prog)s -t classification --subset train:.5 --subset val:.2 --subset test:.3
"""
def __init__(self, dataset, splits, seed=None):
"""
Parameters
----------
dataset : Dataset
splits : list
A list of (subset(str), ratio(float))
The sum of ratios is expected to be 1.
seed : int, optional
"""
super().__init__(dataset, splits, seed)
def _split_dataset(self):
np.random.seed(self._seed)
# support only single label for a DatasetItem
# 1. group by label
by_labels = dict()
annotations, unlabeled = self._get_uniq_annotations(self._extractor)
for idx, ann in enumerate(annotations):
label = getattr(ann, "label", None)
if label not in by_labels:
by_labels[label] = []
by_labels[label].append((idx, ann))
by_splits = dict()
for subset in self._subsets:
by_splits[subset] = []
# 2. group by attributes
self._split_by_attr(by_labels, self._snames, self._sratio, by_splits)
# 3. split unlabeled data
if len(unlabeled) > 0:
self._split_unlabeled(unlabeled, by_splits)
# 4. set parts
self._set_parts(by_splits)
class _ReidentificationSplit(_TaskSpecificSplit):
"""
Splits a dataset for re-identification task.|n
Produces a split with a specified ratio of images, avoiding having same
labels in different subsets.|n
|n
In this task, the test set should consist of images of unseen
people or objects during the training phase. |n
This function splits a dataset in the following way:|n
1. Splits the dataset into 'train + val' and 'test' sets|n
|s|sbased on person or object ID.|n
2. Splits 'test' set into 'test-gallery' and 'test-query' sets|n
|s|sin class-wise manner.|n
3. Splits the 'train + val' set into 'train' and 'val' sets|n
|s|sin the same way.|n
The final subsets would be
'train', 'val', 'test-gallery' and 'test-query'. |n
|n
Notes:|n
- Each image is expected to have a single Label. Unlabeled or multi-labeled
images will be split into 'not-supported'.|n
- Object ID can be described by Label, or by attribute (--attr parameter)|n
- The splits of the test set are controlled by '--query' parameter. |n
|s|sGallery ratio would be 1.0 - query.|n
|n
Example: split a dataset in the specified ratio, split the test set|n
|s|s|s|sinto gallery and query in 1:1 ratio|n
|s|s%(prog)s -t reidentification --subset train:.5 --subset val:.2 --subset test:.3 --query .5|n
Example: use 'person_id' attribute for splitting|n
|s|s%(prog)s --attr person_id
"""
_default_query_ratio = 0.5
def __init__(self, dataset, splits, query=None, attr_for_id=None, seed=None):
"""
Parameters
----------
dataset : Dataset
splits : list
A list of (subset(str), ratio(float))
Subset is expected to be one of ["train", "val", "test"].
The sum of ratios is expected to be 1.
query : float
The ratio of 'test-query' set.
The ratio of 'test-gallery' set would be 1.0 - query.
attr_for_id: str
attribute name representing the person/object id.
if this is not specified, label would be used.
seed : int, optional
"""
super().__init__(dataset, splits, seed, restrict=True)
if query is None:
query = self._default_query_ratio
assert 0.0 <= query and query <= 1.0, (
"Query ratio is expected to be in the range " "[0, 1], but got %f" % query
)
test_splits = [("test-query", query), ("test-gallery", 1.0 - query)]
# remove subset name restriction
self._subsets = {"train", "val", "test-gallery", "test-query"}
self._test_splits = test_splits
self._attr_for_id = attr_for_id
def _split_dataset(self):
np.random.seed(self._seed)
id_snames, id_ratio = self._snames, self._sratio
attr_for_id = self._attr_for_id
dataset = self._extractor
# group by ID(attr_for_id)
by_id = dict()
annotations, unlabeled = self._get_uniq_annotations(dataset)
if attr_for_id is None: # use label
for idx, ann in enumerate(annotations):
ID = getattr(ann, "label", None)
if ID not in by_id:
by_id[ID] = []
by_id[ID].append((idx, ann))
else: # use attr_for_id
for idx, ann in enumerate(annotations):
attributes = dict(ann.attributes.items())
assert attr_for_id in attributes, (
"'%s' is expected as an attribute name" % attr_for_id
)
ID = attributes[attr_for_id]
if ID not in by_id:
by_id[ID] = []
by_id[ID].append((idx, ann))
required = self._get_required(id_ratio)
if len(by_id) < required:
log.warning(
"There's not enough IDs, which is %s, "
"so train/val/test ratio can't be guaranteed." % len(by_id)
)
# 1. split dataset into trval and test
# IDs in test set should not exist in train/val set.
test = id_ratio[id_snames.index("test")] if "test" in id_snames else 0
if NEAR_ZERO < test: # has testset
split_ratio = np.array([test, 1.0 - test])
IDs = list(by_id.keys())
np.random.shuffle(IDs)
sections, _ = self._get_sections(len(IDs), split_ratio)
splits = np.array_split(IDs, sections)
testset = {pid: by_id[pid] for pid in splits[0]}
trval = {pid: by_id[pid] for pid in splits[1]}
# follow the ratio of datasetitems as possible.
# naive heuristic: exchange the best item one by one.
expected_count = int(
(len(self._extractor) - len(unlabeled)) * split_ratio[0]
)
testset_total = int(np.sum([len(v) for v in testset.values()]))
self._rebalancing(testset, trval, expected_count, testset_total)
else:
testset = dict()
trval = by_id
by_splits = dict()
for subset in self._subsets:
by_splits[subset] = []
# 2. split 'test' into 'test-gallery' and 'test-query'
if 0 < len(testset):
test_snames = []
test_ratio = []
for sname, ratio in self._test_splits:
test_snames.append(sname)
test_ratio.append(float(ratio))
self._split_by_attr(
testset, test_snames, test_ratio, by_splits, merge_small_classes=False
)
# 3. split 'trval' into 'train' and 'val'
trval_snames = ["train", "val"]
trval_ratio = []
for subset in trval_snames:
if subset in id_snames:
val = id_ratio[id_snames.index(subset)]
else:
val = 0.0
trval_ratio.append(val)
trval_ratio = np.array(trval_ratio)
total_ratio = np.sum(trval_ratio)
if total_ratio < NEAR_ZERO:
trval_splits = list(zip(["train", "val"], trval_ratio))
log.warning(
"Sum of ratios is expected to be positive, "
"got %s, which is %s" % (trval_splits, total_ratio)
)
else:
trval_ratio /= total_ratio # normalize
self._split_by_attr(
trval, trval_snames, trval_ratio, by_splits, merge_small_classes=False
)
# split unlabeled data into 'not-supported'.
if len(unlabeled) > 0:
self._subsets.add("not-supported")
by_splits["not-supported"] = unlabeled
self._set_parts(by_splits)
@staticmethod
def _rebalancing(test, trval, expected_count, testset_total):
diffs = dict()
for id_test, items_test in test.items():
count_test = len(items_test)
for id_trval, items_trval in trval.items():
count_trval = len(items_trval)
diff = count_trval - count_test
if diff == 0:
continue # exchange has no effect
if diff not in diffs:
diffs[diff] = [(id_test, id_trval)]
else:
diffs[diff].append((id_test, id_trval))
if len(diffs) == 0: # nothing would be changed by exchange
return
exchanges = []
while True:
target_diff = expected_count - testset_total
# find nearest diff.
keys = np.array(list(diffs.keys()))
idx = (np.abs(keys - target_diff)).argmin()
nearest = keys[idx]
if abs(target_diff) <= abs(target_diff - nearest):
break
choice = np.random.choice(range(len(diffs[nearest])))
id_test, id_trval = diffs[nearest][choice]
testset_total += nearest
new_diffs = dict()
for diff, IDs in diffs.items():
new_list = []
for id1, id2 in IDs:
if id1 == id_test or id2 == id_trval:
continue
new_list.append((id1, id2))
if 0 < len(new_list):
new_diffs[diff] = new_list
diffs = new_diffs
exchanges.append((id_test, id_trval))
# exchange
for id_test, id_trval in exchanges:
test[id_trval] = trval.pop(id_trval)
trval[id_test] = test.pop(id_test)
class _InstanceSpecificSplit(_TaskSpecificSplit):
"""
Splits a dataset into subsets(train/val/test),
using object annotations as a basis for splitting.|n
Tries to produce an image split with the specified ratio, keeping the
initial distribution of class objects.|n
|n
each image can have multiple object annotations -
(instance bounding boxes, masks, polygons). Since an image shouldn't be included
in multiple subsets at the same time, and image annotations
shouldn't be split, in general, dataset annotations are unlikely to be split
exactly in the specified ratio. |n
This split tries to split dataset images as close as possible
to the specified ratio, keeping the initial class distribution.|n
|n
Notes:|n
- Each image is expected to have one or more annotations.|n
- Only bbox annotations are considered in detection task.|n
- Mask or Polygon annotations are considered in segmentation task.|n
|n
Example: split dataset so that each object class annotations were split|n
|s|s|s|sin the specified ratio between subsets|n
|s|s%(prog)s -t detection --subset train:.5 --subset val:.2 --subset test:.3 |n
|s|s%(prog)s -t segmentation --subset train:.5 --subset val:.2 --subset test:.3
"""
def __init__(self, dataset, splits, task, seed=None):
"""
Parameters
----------
dataset : Dataset
splits : list
A list of (subset(str), ratio(float))
The sum of ratios is expected to be 1.
seed : int, optional
"""
super().__init__(dataset, splits, seed)
if task == SplitTask.detection.name:
self.annotation_type = [AnnotationType.bbox]
elif task == SplitTask.segmentation.name:
self.annotation_type = [AnnotationType.mask, AnnotationType.polygon]
def _group_by_labels(self, dataset):
by_labels = dict()
unlabeled = []
for idx, item in enumerate(dataset):
instance_anns = [a for a in item.annotations if a.type in self.annotation_type]
if len(instance_anns) == 0:
unlabeled.append(idx)
continue
for instance_ann in instance_anns:
label = getattr(instance_ann, "label", None)
if label not in by_labels:
by_labels[label] = [(idx, instance_ann)]
else:
by_labels[label].append((idx, instance_ann))
return by_labels, unlabeled
def _split_dataset(self):
np.random.seed(self._seed)
subsets, sratio = self._snames, self._sratio
# 1. group by bbox label
by_labels, unlabeled = self._group_by_labels(self._extractor)
# 2. group by attributes
required = self._get_required(sratio)
by_combinations = list()
for _, items in by_labels.items():
by_attributes = self._group_by_attr(items)
# merge groups which have too small samples.
attr_combinations = list(by_attributes.keys())
np.random.shuffle(attr_combinations) # add randomless
cluster = []
min_cluster = max(required, len(items) * 0.01) # temp solution
for attr in attr_combinations:
indice = by_attributes[attr]
if len(indice) >= min_cluster:
by_combinations.append(indice)
else:
cluster.extend(indice)
if len(cluster) >= min_cluster:
by_combinations.append(cluster)
cluster = []
if len(cluster) > 0:
by_combinations.append(cluster)
cluster = []
total = len(self._extractor)
# total number of GT samples per label-attr combinations
n_combs = [len(v) for v in by_combinations]
# 3-1. initially count per-image GT samples
counts_all = {}
for idx_img in range(total):
if idx_img not in unlabeled:
counts_all[idx_img] = dict()
for idx_comb, indice in enumerate(by_combinations):
for idx_img in indice:
if idx_comb not in counts_all[idx_img]:
counts_all[idx_img][idx_comb] = 1
else:
counts_all[idx_img][idx_comb] += 1
by_splits = dict()
for sname in self._subsets:
by_splits[sname] = []
target_ins = [] # target instance numbers to be split
for sname, ratio in zip(subsets, sratio):
target_ins.append([sname, np.array(n_combs) * ratio])
init_scores = {}
for idx_img, distributions in counts_all.items():
norm_sum = 0.0
for idx_comb, dis in distributions.items():
norm_sum += dis / n_combs[idx_comb]
init_scores[idx_img] = norm_sum
by_scores = dict()
for idx_img, score in init_scores.items():
if score not in by_scores:
by_scores[score] = [idx_img]
else:
by_scores[score].append(idx_img)
# functions for keep the # of annotations not exceed the target_ins num
def compute_penalty(counts, n_combs):
p = 0
for idx_comb, v in counts.items():
if n_combs[idx_comb] <= 0:
p += 1
else:
p += max(0, (v / n_combs[idx_comb]) - 1.0)
return p
def update_nc(counts, n_combs):
for idx_comb, v in counts.items():
n_combs[idx_comb] = n_combs[idx_comb] - v
# 3-2. assign each DatasetItem to a split, one by one
actual_ins = copy.deepcopy(target_ins)
for score in sorted(by_scores.keys(), reverse=True):
indice = by_scores[score]
np.random.shuffle(indice) # add randomness for the same score
for idx in indice:
counts = counts_all[idx]
# shuffling split order to add randomness
# when two or more splits have the same penalty value
np.random.shuffle(actual_ins)
pp = []
for sname, nc in actual_ins:
if np.sum(nc) <= 0:
# the split has enough instances,
# stop adding more images to this split
pp.append(1e08)
else:
# compute penalty based on the number of GT samples
# added in the split
pp.append(compute_penalty(counts, nc))
# we push an image to a split with the minimum penalty
midx = np.argmin(pp)
sname, nc = actual_ins[midx]
by_splits[sname].append(idx)
update_nc(counts, nc)
# split unlabeled data
if len(unlabeled) > 0:
self._split_unlabeled(unlabeled, by_splits)
self._set_parts(by_splits)
|
the-stack_0_16198 | from django.shortcuts import render
from django.utils.safestring import mark_safe
import json
from user.models import User
def room(request, room_name, user_name):
print('****')
room_json = mark_safe(json.dumps(room_name))
user_json = mark_safe(json.dumps(user_name))
online = User.objects.filter(is_login=1)
offline = User.objects.filter(is_login=0)
data = User.objects.get(username=user_name)
return render(request, 'chat/room.html', {
'room_json': room_json,
'user_json': user_json,
'roomname': room_name,
'username': user_name,
'online': online,
'offline': offline,
'data':data,
})
|
the-stack_0_16201 | import urllib.request,json
from .models import News_Sources, News_Articles
apiKey = None
base_url = None
news_article_url = None
def configure_request(app):
global apiKey, base_url,news_article_url
apiKey = app.config['NEWS_API_KEY']
base_url = app.config['NEWS_SOURCE_API_BASE_URL']
news_article_url = app.config['NEWS_ARTICLE_API_BASE_URL']
def get_news(category):
"""
Function that gets the json response to our url request
"""
get_news_url = base_url.format(category,apiKey)
with urllib.request.urlopen(get_news_url) as url:
get_news_data = url.read()
get_news_response = json.loads(get_news_data)
news_results = None
if get_news_response['sources']:
news_results_list = get_news_response['sources']
news_results = process_news_sources(news_results_list)
return news_results
def process_news_sources(news_list):
"""
Function that processes the news source result and transform them to a list of Objects
Args:
news_source_list: A list of dictionaries that contain news source details
Returns :
news_source_results: A list of news source objects
"""
news_results=[]
for news_item in news_list:
id = news_item.get('id')
name= news_item.get('name')
description = news_item.get('description')
url = news_item.get('url')
category = news_item.get('category')
country = news_item.get('country')
news_object= News_Sources(id,name,description, url, category, country)
news_results.append(news_object)
return news_results
def get_articles(id):
'''
Function that gets the json response to the url request
'''
get_articles_url = news_article_url.format(id,apiKey)
with urllib.request.urlopen(get_articles_url) as url:
get_articles_data = url.read()
get_articles_response = json.loads(get_articles_data)
news_articles_results = None
if get_articles_response['articles']:
articles_results_list = get_articles_response['articles']
news_articles_results = process_articles(articles_results_list)
return news_articles_results
def process_articles(article_list):
"""
Function that processes the news article result and transform them to a list of Objects
Args:
news_article_list: A list of dictionaries that contain news article details
Returns :
news_article_results: A list of news article objects
"""
news_articles_results = []
for article_item in article_list:
title = article_item.get('title')
author = article_item.get('author')
description = article_item.get('description')
url= article_item.get('url')
urlToImage = article_item.get('urlToImage')
publishedAt = article_item.get('publishedAt')
articles_object = News_Articles(title,author,description, url, urlToImage, publishedAt)
news_articles_results.append(articles_object)
return news_articles_results |
the-stack_0_16203 | import _plotly_utils.basevalidators
class XbinsValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="xbins", parent_name="histogram2dcontour", **kwargs):
super(XbinsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "XBins"),
data_docs=kwargs.pop(
"data_docs",
"""
end
Sets the end value for the x axis bins. The
last bin may not end exactly at this value, we
increment the bin edge by `size` from `start`
until we reach or exceed `end`. Defaults to the
maximum data value. Like `start`, for dates use
a date string, and for category data `end` is
based on the category serial numbers.
size
Sets the size of each x axis bin. Default
behavior: If `nbinsx` is 0 or omitted, we
choose a nice round bin size such that the
number of bins is about the same as the typical
number of samples in each bin. If `nbinsx` is
provided, we choose a nice round bin size
giving no more than that many bins. For date
data, use milliseconds or "M<n>" for months, as
in `axis.dtick`. For category data, the number
of categories to bin together (always defaults
to 1).
start
Sets the starting value for the x axis bins.
Defaults to the minimum data value, shifted
down if necessary to make nice round values and
to remove ambiguous bin edges. For example, if
most of the data is integers we shift the bin
edges 0.5 down, so a `size` of 5 would have a
default `start` of -0.5, so it is clear that
0-4 are in the first bin, 5-9 in the second,
but continuous data gets a start of 0 and bins
[0,5), [5,10) etc. Dates behave similarly, and
`start` should be a date string. For category
data, `start` is based on the category serial
numbers, and defaults to -0.5.
""",
),
**kwargs
)
|
the-stack_0_16204 | # Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encoders for the speech model."""
import lingvo.compat as tf
from lingvo.core import base_layer
from lingvo.core import py_utils
from lingvo.core import spectrum_augmenter
from lingvo.tasks.asr import blocks
class AsrEncoder(base_layer.BaseLayer):
"""Speech encoder version 2."""
@classmethod
def Params(cls):
"""Configs for AsrEncoder."""
p = super().Params()
# spec-augment
p.Define('specaugment_network',
spectrum_augmenter.SpectrumAugmenter.Params(),
'Configs template for the augmentation network.')
p.Define('use_specaugment', False, 'Use specaugmentation or not.')
# temporal downsampling, use one of the two
p.Define('conv_subsampler', blocks.ConvolutionalDownsampler.Params(),
'Convolution subsampling layer params')
p.Define('stacking_subsampler', blocks.InputStackingDownsampler.Params(),
'Stacking subsampling layer params')
p.Define('use_conv_subsampler', False, 'Enable p.conv_subsampler')
p.Define('use_stacking_subsampler', False, 'Enable p.stacking_subsampler')
# actual encoding layers, use one of these
p.Define('lstm_block', blocks.LSTMBlock.Params(), 'LSTM layer params')
# p.Define('conformer_block', blocks.ConformerBlock.Params(), 'Conformer specs')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
##### Use specAugment or not ####
if p.use_specaugment:
self.CreateChild('specaugment', p.specaugment_network.Copy())
##### handle sub-sampling ####
assert not (p.use_conv_subsampler and p.use_stacking_subsampler), \
'Please use only one form of time subsampling'
if p.use_conv_subsampler:
self.CreateChild('sub', p.conv_subsampler.Copy())
else:
assert p.use_stacking_subsampler, 'Need one stacking module'
self.CreateChild('sub', p.stacking_subsampler.Copy())
stack_out_feats = self.sub.output_dim
##### handle encoding #####
if p.lstm_block is not None:
if p.lstm_block.input_feats is None:
p.lstm_block.input_feats = stack_out_feats
assert p.lstm_block.input_feats == stack_out_feats
self.CreateChildren('enc', p.lstm_block.Copy())
@property
def output_dim(self):
return self.enc.output_dim
@property
def _use_functional(self):
return True
@property
def supports_streaming(self):
return False
def zero_state(self, theta, batch_size):
return py_utils.NestedMap()
def FProp(self, theta, batch, state0=None):
"""Encodes source as represented by 'inputs' and 'paddings'.
Args:
theta: A NestedMap object containing weights' values of this
layer and its children layers.
batch: A NestedMap with fields:
- src_inputs - The inputs tensor. It is expected to be of shape [batch,
time, feature_dim, channels].
- paddings - The paddings tensor. It is expected to be of shape [batch,
time].
state0: Recurrent input state. Not supported/ignored by this encoder.
Returns:
A NestedMap containing
- 'encoded': a feature tensor of shape [time, batch, depth]
- 'padding': a 0/1 tensor of shape [time, batch]
- 'state': the updated recurrent state
"""
p = self.params
inputs, paddings = batch.src_inputs, batch.paddings
with tf.name_scope(p.name):
if p.use_specaugment and not self.do_eval:
inputs, paddings = self.specaugment.FProp(theta.specaugment, inputs,
paddings)
inputs, paddings = self.sub.FProp(theta.sub, inputs, paddings)
encoded, padding = self.enc.FProp(theta.enc, inputs, paddings)
return py_utils.NestedMap(encoded=encoded,
padding=padding,
state=py_utils.NestedMap())
|
the-stack_0_16205 | import random
from colour import Color
import numpy as np
from manimlib.constants import PALETTE
from manimlib.constants import WHITE
from manimlib.utils.bezier import interpolate
from manimlib.utils.simple_functions import clip_in_place
from manimlib.utils.space_ops import normalize
def color_to_rgb(color):
if isinstance(color, str):
return hex_to_rgb(color)
elif isinstance(color, Color):
return np.array(color.get_rgb())
else:
raise Exception("Invalid color type")
def color_to_rgba(color, alpha=1):
return np.array([*color_to_rgb(color), alpha])
def rgb_to_color(rgb):
try:
return Color(rgb=rgb)
except:
return Color(WHITE)
def rgba_to_color(rgba):
return rgb_to_color(rgba[:3])
def rgb_to_hex(rgb):
return "#" + "".join('%02x' % int(255 * x) for x in rgb)
def hex_to_rgb(hex_code):
hex_part = hex_code[1:]
if len(hex_part) == 3:
"".join([2 * c for c in hex_part])
return np.array([
int(hex_part[i:i + 2], 16) / 255
for i in range(0, 6, 2)
])
def invert_color(color):
return rgb_to_color(1.0 - color_to_rgb(color))
def color_to_int_rgb(color):
return (255 * color_to_rgb(color)).astype('uint8')
def color_to_int_rgba(color, opacity=1.0):
alpha = int(255 * opacity)
return np.append(color_to_int_rgb(color), alpha)
def color_gradient(reference_colors, length_of_output):
if length_of_output == 0:
return reference_colors[0]
rgbs = list(map(color_to_rgb, reference_colors))
alphas = np.linspace(0, (len(rgbs) - 1), length_of_output)
floors = alphas.astype('int')
alphas_mod1 = alphas % 1
# End edge case
alphas_mod1[-1] = 1
floors[-1] = len(rgbs) - 2
return [
rgb_to_color(interpolate(rgbs[i], rgbs[i + 1], alpha))
for i, alpha in zip(floors, alphas_mod1)
]
def interpolate_color(color1, color2, alpha):
rgb = interpolate(color_to_rgb(color1), color_to_rgb(color2), alpha)
return rgb_to_color(rgb)
def average_color(*colors):
rgbs = np.array(list(map(color_to_rgb, colors)))
mean_rgb = np.apply_along_axis(np.mean, 0, rgbs)
return rgb_to_color(mean_rgb)
def random_bright_color():
color = random_color()
curr_rgb = color_to_rgb(color)
new_rgb = interpolate(
curr_rgb, np.ones(len(curr_rgb)), 0.5
)
return Color(rgb=new_rgb)
def random_color():
return random.choice(PALETTE)
def get_shaded_rgb(rgb, point, unit_normal_vect, light_source):
to_sun = normalize(light_source - point)
factor = 0.5 * np.dot(unit_normal_vect, to_sun)**3
if factor < 0:
factor *= 0.5
result = rgb + factor
clip_in_place(rgb + factor, 0, 1)
return result
|
the-stack_0_16207 | # coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Implements the adaptive form of the loss.
You should only use this function if 1) you want the loss to change it's shape
during training (otherwise use general.py) or 2) you want to impose the loss on
a wavelet or DCT image representation, a only this function has easy support for
that.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from robust_loss import distribution
from robust_loss import util
from robust_loss import wavelet
def _check_scale(scale_lo, scale_init):
"""Helper function for checking `scale_lo` and `scale_init`."""
if not np.isscalar(scale_lo):
raise ValueError('`scale_lo` must be a scalar, but is of type {}'.format(
type(scale_lo)))
if not np.isscalar(scale_init):
raise ValueError('`scale_init` must be a scalar, but is of type {}'.format(
type(scale_init)))
if not scale_lo > 0:
raise ValueError('`scale_lo` must be > 0, but is {}'.format(scale_lo))
if not scale_init >= scale_lo:
raise ValueError('`scale_init` = {} must be >= `scale_lo` = {}'.format(
scale_init, scale_lo))
def _construct_scale(x, scale_lo, scale_init, float_dtype, var_suffix=''):
"""Helper function for constructing scale variables."""
if scale_lo == scale_init:
# If the difference between the minimum and initial scale is zero, then
# we just fix `scale` to be a constant.
scale = tf.tile(
tf.cast(scale_init, float_dtype)[tf.newaxis, tf.newaxis],
(1, x.shape[1]))
else:
# Otherwise we construct a "latent" scale variable and define `scale`
# As an affine function of a softplus on that latent variable.
latent_scale = tf.get_variable(
'LatentScale' + var_suffix, initializer=tf.zeros((1, x.shape[1]), float_dtype))
scale = util.affine_softplus(latent_scale, lo=scale_lo, ref=scale_init)
return scale
def lossfun(x,
alpha_lo=0.001,
alpha_hi=1.999,
alpha_init=None,
scale_lo=1e-5,
scale_init=1.,
var_suffix='',
**kwargs):
"""Computes the adaptive form of the robust loss on a matrix.
This function behaves differently from general.lossfun() and
distribution.nllfun(), which are "stateless", allow the caller to specify the
shape and scale of the loss, and allow for arbitrary sized inputs. This
function only allows for rank-2 inputs for the residual `x`, and expects that
`x` is of the form [batch_index, dimension_index]. This function then
constructs free parameters (TF variables) that define the alpha and scale
parameters for each dimension of `x`, such that all alphas are in
(`alpha_lo`, `alpha_hi`) and all scales are in (`scale_lo`, Infinity).
The assumption is that `x` is, say, a matrix where x[i,j] corresponds to a
pixel at location j for image i, with the idea being that all pixels at
location j should be modeled with the same shape and scale parameters across
all images in the batch. This function also returns handles to the scale and
shape parameters being optimized over, mostly for debugging and introspection.
If the user wants to fix alpha or scale to be a constant, this can be done by
setting alpha_lo=alpha_hi or scale_lo=scale_init respectively.
Args:
x: The residual for which the loss is being computed. Must be a rank-2
tensor, where the innermost dimension is the batch index, and the
outermost dimension corresponds to different "channels", where this
function will assign each channel its own variable shape (alpha) and scale
parameters that are constructed as TF variables and can be optimized over.
Must be a TF tensor or numpy array of single or double precision floats.
The precision of `x` will determine the precision of the latent variables
used to model scale and alpha internally.
alpha_lo: The lowest possible value for loss's alpha parameters, must be >=
0 and a scalar. Should probably be in (0, 2).
alpha_hi: The highest possible value for loss's alpha parameters, must be >=
alpha_lo and a scalar. Should probably be in (0, 2).
alpha_init: The value that the loss's alpha parameters will be initialized
to, must be in (`alpha_lo`, `alpha_hi`), unless `alpha_lo` == `alpha_hi`
in which case this will be ignored. Defaults to (`alpha_lo` + `alpha_hi`)
/ 2
scale_lo: The lowest possible value for the loss's scale parameters. Must be
> 0 and a scalar. This value may have more of an effect than you think, as
the loss is unbounded as scale approaches zero (say, at a delta function).
scale_init: The initial value used for the loss's scale parameters. This
also defines the zero-point of the latent representation of scales, so SGD
may cause optimization to gravitate towards producing scales near this
value.
**kwargs: Arguments to be passed to the underlying distribution.nllfun().
Returns:
A tuple of the form (`loss`, `alpha`, `scale`).
`loss`: a TF tensor of the same type and shape as input `x`, containing
the loss at each element of `x` as a function of `x`, `alpha`, and
`scale`. These "losses" are actually negative log-likelihoods (as produced
by distribution.nllfun()) and so they are not actually bounded from below
by zero. You'll probably want to minimize their sum or mean.
`scale`: a TF tensor of the same type as x, of size (1, x.shape[1]), as we
construct a scale variable for each dimension of `x` but not for each
batch element. This contains the current estimated scale parameter for
each dimension, and will change during optimization.
`alpha`: a TF tensor of the same type as x, of size (1, x.shape[1]), as we
construct an alpha variable for each dimension of `x` but not for each
batch element. This contains the current estimated alpha parameter for
each dimension, and will change during optimization.
Raises:
ValueError: If any of the arguments are invalid.
"""
_check_scale(scale_lo, scale_init)
if not np.isscalar(alpha_lo):
raise ValueError('`alpha_lo` must be a scalar, but is of type {}'.format(
type(alpha_lo)))
if not np.isscalar(alpha_hi):
raise ValueError('`alpha_hi` must be a scalar, but is of type {}'.format(
type(alpha_hi)))
if alpha_init is not None and not np.isscalar(alpha_init):
raise ValueError(
'`alpha_init` must be None or a scalar, but is of type {}'.format(
type(alpha_init)))
if not alpha_lo >= 0:
raise ValueError('`alpha_lo` must be >= 0, but is {}'.format(alpha_lo))
if not alpha_hi >= alpha_lo:
raise ValueError('`alpha_hi` = {} must be >= `alpha_lo` = {}'.format(
alpha_hi, alpha_lo))
if alpha_init is not None and alpha_lo != alpha_hi:
if not (alpha_init > alpha_lo and alpha_init < alpha_hi):
raise ValueError(
'`alpha_init` = {} must be in (`alpha_lo`, `alpha_hi`) = ({} {})'
.format(alpha_init, alpha_lo, alpha_hi))
float_dtype = x.dtype
assert_ops = [tf.Assert(tf.equal(tf.rank(x), 2), [tf.rank(x)])]
with tf.control_dependencies(assert_ops):
if alpha_lo == alpha_hi:
# If the range of alphas is a single item, then we just fix `alpha` to be
# a constant.
alpha = tf.tile(
tf.cast(alpha_lo, float_dtype)[tf.newaxis, tf.newaxis],
(1, x.shape[1]))
else:
# Otherwise we construct a "latent" alpha variable and define `alpha`
# As an affine function of a sigmoid on that latent variable, initialized
# such that `alpha` starts off as `alpha_init`.
if alpha_init is None:
alpha_init = (alpha_lo + alpha_hi) / 2.
latent_alpha_init = util.inv_affine_sigmoid(
alpha_init, lo=alpha_lo, hi=alpha_hi)
latent_alpha = tf.get_variable(
'LatentAlpha' + var_suffix,
initializer=tf.fill((1, x.shape[1]),
tf.cast(latent_alpha_init, dtype=float_dtype)))
alpha = util.affine_sigmoid(latent_alpha, lo=alpha_lo, hi=alpha_hi)
scale = _construct_scale(x, scale_lo, scale_init, float_dtype, var_suffix=var_suffix)
loss = distribution.nllfun(x, alpha, scale, **kwargs)
return loss, alpha, scale
def lossfun_students(x, scale_lo=1e-5, scale_init=1., var_suffix=''):
"""A variant of lossfun() that uses the NLL of a Student's t-distribution.
Args:
x: The residual for which the loss is being computed. Must be a rank-2
tensor, where the innermost dimension is the batch index, and the
outermost dimension corresponds to different "channels", where this
function will assign each channel its own variable shape (log-df) and
scale parameters that are constructed as TF variables and can be optimized
over. Must be a TF tensor or numpy array of single or double precision
floats. The precision of `x` will determine the precision of the latent
variables used to model scale and log-df internally.
scale_lo: The lowest possible value for the loss's scale parameters. Must be
> 0 and a scalar. This value may have more of an effect than you think, as
the loss is unbounded as scale approaches zero (say, at a delta function).
scale_init: The initial value used for the loss's scale parameters. This
also defines the zero-point of the latent representation of scales, so SGD
may cause optimization to gravitate towards producing scales near this
value.
Returns:
A tuple of the form (`loss`, `log_df`, `scale`).
`loss`: a TF tensor of the same type and shape as input `x`, containing
the loss at each element of `x` as a function of `x`, `log_df`, and
`scale`. These "losses" are actually negative log-likelihoods (as produced
by distribution.nllfun()) and so they are not actually bounded from below
by zero. You'll probably want to minimize their sum or mean.
`scale`: a TF tensor of the same type as x, of size (1, x.shape[1]), as we
construct a scale variable for each dimension of `x` but not for each
batch element. This contains the current estimated scale parameter for
each dimension, and will change during optimization.
`log_df`: a TF tensor of the same type as x, of size (1, x.shape[1]), as we
construct an log-DF variable for each dimension of `x` but not for each
batch element. This contains the current estimated log(degrees-of-freedom)
parameter for each dimension, and will change during optimization.
Raises:
ValueError: If any of the arguments are invalid.
"""
_check_scale(scale_lo, scale_init)
float_dtype = x.dtype
assert_ops = [tf.Assert(tf.equal(tf.rank(x), 2), [tf.rank(x)])]
with tf.control_dependencies(assert_ops):
log_df = tf.get_variable(
name='LogDf', initializer=tf.zeros((1, x.shape[1]), float_dtype))
scale = _construct_scale(x, scale_lo, scale_init, float_dtype, var_suffix=var_suffix)
loss = util.students_t_nll(x, tf.math.exp(log_df), scale)
return loss, log_df, scale
def image_lossfun(x,
color_space='YUV',
representation='CDF9/7',
wavelet_num_levels=5,
wavelet_scale_base=1,
use_students_t=False,
summarize_loss=True,
**kwargs):
"""Computes the adaptive form of the robust loss on a set of images.
This function is a wrapper around lossfun() above. Like lossfun(), this
function is not "stateless" --- it requires inputs of a specific shape and
size, and constructs TF variables describing each non-batch dimension in `x`.
`x` is expected to be the difference between sets of RGB images, and the other
arguments to this function allow for the color space and spatial
representation of `x` to be changed before the loss is imposed. By default,
this function uses a CDF9/7 wavelet decomposition in a YUV color space, which
often works well. This function also returns handles to the scale and
shape parameters (both in the shape of images) being optimized over,
and summarizes both parameters in TensorBoard.
Args:
x: A set of image residuals for which the loss is being computed. Must be a
rank-4 tensor of size (num_batches, width, height, color_channels). This
is assumed to be a set of differences between RGB images.
color_space: The color space that `x` will be transformed into before
computing the loss. Must be 'RGB' (in which case no transformation is
applied) or 'YUV' (in which case we actually use a volume-preserving
scaled YUV colorspace so that log-likelihoods still have meaning, see
util.rgb_to_syuv()). Note that changing this argument does not change the
assumption that `x` is the set of differences between RGB images, it just
changes what color space `x` is converted to from RGB when computing the
loss.
representation: The spatial image representation that `x` will be
transformed into after converting the color space and before computing the
loss. If this is a valid type of wavelet according to
wavelet.generate_filters() then that is what will be used, but we also
support setting this to 'DCT' which applies a 2D DCT to the images, and to
'PIXEL' which applies no transformation to the image, thereby causing the
loss to be imposed directly on pixels.
wavelet_num_levels: If `representation` is a kind of wavelet, this is the
number of levels used when constructing wavelet representations. Otherwise
this is ignored. Should probably be set to as large as possible a value
that is supported by the input resolution, such as that produced by
wavelet.get_max_num_levels().
wavelet_scale_base: If `representation` is a kind of wavelet, this is the
base of the scaling used when constructing wavelet representations.
Otherwise this is ignored. For image_lossfun() to be volume preserving (a
useful property when evaluating generative models) this value must be ==
1. If the goal of this loss isn't proper statistical modeling, then
modifying this value (say, setting it to 0.5 or 2) may significantly
improve performance.
use_students_t: If true, use the NLL of Student's T-distribution instead
of the adaptive loss. This causes all `alpha_*` inputs to be ignored.
summarize_loss: Whether or not to make TF summaries describing the latent
state of the loss function. True by default.
**kwargs: Arguments to be passed to the underlying lossfun().
Returns:
A tuple of the form (`loss`, `alpha`, `scale`). If use_students_t == True,
then `log(df)` is returned instead of `alpha`.
`loss`: a TF tensor of the same type and shape as input `x`, containing
the loss at each element of `x` as a function of `x`, `alpha`, and
`scale`. These "losses" are actually negative log-likelihoods (as produced
by distribution.nllfun()) and so they are not actually bounded from below
by zero. You'll probably want to minimize their sum or mean.
`scale`: a TF tensor of the same type as x, of size
(width, height, color_channels),
as we construct a scale variable for each spatial and color dimension of `x`
but not for each batch element. This contains the current estimated scale
parameter for each dimension, and will change during optimization.
`alpha`: a TF tensor of the same type as x, of size
(width, height, color_channels),
as we construct an alpha variable for each spatial and color dimension of
`x` but not for each batch element. This contains the current estimated
alpha parameter for each dimension, and will change during optimization.
Raises:
ValueError: if `color_space` of `representation` are unsupported color
spaces or image representations, respectively.
"""
color_spaces = ['RGB', 'YUV']
if color_space not in color_spaces:
raise ValueError('`color_space` must be in {}, but is {!r}'.format(
color_spaces, color_space))
representations = wavelet.generate_filters() + ['DCT', 'PIXEL']
if representation not in representations:
raise ValueError('`representation` must be in {}, but is {!r}'.format(
representations, representation))
assert_ops = [tf.Assert(tf.equal(tf.rank(x), 4), [tf.rank(x)])]
with tf.control_dependencies(assert_ops):
if color_space == 'YUV':
x = util.rgb_to_syuv(x)
# If `color_space` == 'RGB', do nothing.
# Reshape `x` from
# (num_batches, width, height, num_channels) to
# (num_batches * num_channels, width, height)
_, width, height, num_channels = x.shape.as_list()
x_stack = tf.reshape(tf.transpose(x, (0, 3, 1, 2)), (-1, width, height))
# Turn each channel in `x_stack` into the spatial representation specified
# by `representation`.
if representation in wavelet.generate_filters():
x_stack = wavelet.flatten(
wavelet.rescale(
wavelet.construct(x_stack, wavelet_num_levels, representation),
wavelet_scale_base))
elif representation == 'DCT':
x_stack = util.image_dct(x_stack)
# If `representation` == 'PIXEL', do nothing.
# Reshape `x_stack` from
# (num_batches * num_channels, width, height) to
# (num_batches, num_channels * width * height)
x_mat = tf.reshape(
tf.transpose(
tf.reshape(x_stack, [-1, num_channels, width, height]),
[0, 2, 3, 1]), [-1, width * height * num_channels])
# Set up the adaptive loss. Note, if `use_students_t` == True then
# `alpha_mat` actually contains "log(df)" values.
if use_students_t:
loss_mat, alpha_mat, scale_mat = lossfun_students(x_mat, **kwargs)
else:
loss_mat, alpha_mat, scale_mat = lossfun(x_mat, **kwargs)
# Reshape the loss function's outputs to have the shapes as the input.
loss = tf.reshape(loss_mat, [-1, width, height, num_channels])
alpha = tf.reshape(alpha_mat, [width, height, num_channels])
scale = tf.reshape(scale_mat, [width, height, num_channels])
if summarize_loss:
# Summarize the `alpha` and `scale` parameters as images (normalized to
# [0, 1]) and histograms.
# Note that these may look unintuitive unless the colorspace is 'RGB' and
# the image representation is 'PIXEL', as the image summaries (like most
# images) are rendered as RGB pixels.
alpha_min = tf.reduce_min(alpha)
alpha_max = tf.reduce_max(alpha)
tf.summary.image(
'robust/alpha',
(alpha[tf.newaxis] - alpha_min) / (alpha_max - alpha_min + 1e-10))
tf.summary.histogram('robust/alpha', alpha)
log_scale = tf.math.log(scale)
log_scale_min = tf.reduce_min(log_scale)
log_scale_max = tf.reduce_max(log_scale)
tf.summary.image('robust/log_scale',
(log_scale[tf.newaxis] - log_scale_min) /
(log_scale_max - log_scale_min + 1e-10))
tf.summary.histogram('robust/log_scale', log_scale)
return loss, alpha, scale
|
the-stack_0_16209 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Detect IP v4 or v6 addresses the system uses to talk to outside world.
Original code from
https://github.com/vincentbernat/puppet-workstation/blob/master/modules/system/templates/network/ddns-updater.erb
Refactored/modified by Thomas Waldmann to just detect the IP.
"""
from __future__ import print_function
import errno
import socket
IPV4 = "ipv4"
IPV6_ANY = "ipv6"
IPV6_PUBLIC = "ipv6_public"
IPV6_TMP = "ipv6_tmp"
# reserved IPs for documentation/example purposes
OUTSIDE_IPV4 = "192.0.2.1"
OUTSIDE_IPV6 = "2001:db8::1"
# Not everything is available in Python
if not hasattr(socket, "IPV6_ADDR_PREFERENCES"):
socket.IPV6_ADDR_PREFERENCES = 72
if not hasattr(socket, "IPV6_PREFER_SRC_TMP"):
socket.IPV6_PREFER_SRC_TMP = 1
if not hasattr(socket, "IPV6_PREFER_SRC_PUBLIC"):
socket.IPV6_PREFER_SRC_PUBLIC = 2
class GetIpException(Exception):
"""Generic base class for all exceptions raised here."""
def detect_ip(kind):
"""
Detect IP address.
kind can be:
IPV4 - returns IPv4 address
IPV6_ANY - returns any IPv6 address (no preference)
IPV6_PUBLIC - returns public IPv6 address
IPV6_TMP - returns temporary IPV6 address (privacy extensions)
This function either returns an IP address (str) or
raises a GetIpException.
"""
if kind not in (IPV4, IPV6_PUBLIC, IPV6_TMP, IPV6_ANY):
raise ValueError("invalid kind specified")
# We create an UDP socket and connect it to a public host.
# We query the OS to know what our address is.
# No packet will really be sent since we are using UDP.
af = socket.AF_INET if kind == IPV4 else socket.AF_INET6
s = socket.socket(af, socket.SOCK_DGRAM)
try:
if kind in [IPV6_PUBLIC, IPV6_TMP, ]:
# caller wants some specific kind of IPv6 address (not IPV6_ANY)
try:
if kind == IPV6_PUBLIC:
preference = socket.IPV6_PREFER_SRC_PUBLIC
elif kind == IPV6_TMP:
preference = socket.IPV6_PREFER_SRC_TMP
s.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_ADDR_PREFERENCES, preference)
except socket.error as e:
if e.errno == errno.ENOPROTOOPT:
raise GetIpException("Kernel doesn't support IPv6 address preference")
else:
raise GetIpException("Unable to set IPv6 address preference: %s" % e)
try:
outside_ip = OUTSIDE_IPV4 if kind == IPV4 else OUTSIDE_IPV6
s.connect((outside_ip, 9))
except (socket.error, socket.gaierror) as e:
raise GetIpException(str(e))
ip = s.getsockname()[0]
finally:
s.close()
return ip
if __name__ == "__main__":
print("IP v4:", detect_ip(IPV4)) # noqa
print("IP v6 public:", detect_ip(IPV6_PUBLIC)) # noqa
print("IP v6 tmp:", detect_ip(IPV6_TMP)) # noqa
print("IP v6 any:", detect_ip(IPV6_ANY)) # noqa
|
the-stack_0_16210 | #-*- coding:utf-8 -*-
import json
import copy
import json
from flask import render_template, abort, request, url_for, redirect, g
from flask.ext.babel import gettext
import time
import datetime
from rrd import app
from rrd.model.screen import DashboardScreen
from rrd.model.graph import DashboardGraph
from rrd.model.endpoint import Endpoint
from rrd import consts
from rrd.utils.graph_urls import generate_graph_urls
from rrd import config
@app.route("/screen", methods=["GET", "POST"])
def dash_screens():
top_screens = DashboardScreen.gets_by_pid(pid='0') or []
top_screens = sorted(top_screens, key=lambda x: x.name)
return render_template("screen/index.html", **locals())
@app.route("/screen/<int:sid>/delete")
def dash_screen_delete(sid):
screen = DashboardScreen.get(sid)
if not screen:
abort(404, "no such screen")
DashboardScreen.remove(sid)
return redirect("/screen")
@app.route("/screen/<int:sid>/edit", methods=["GET", "POST"])
def dash_screen_edit(sid):
screen = DashboardScreen.get(sid)
if not screen:
abort(404, "no such screen")
if request.method == "POST":
screen_name = request.form.get("screen_name")
screen.update(name=screen_name)
return redirect("/screen/%s" % screen.id)
else:
return render_template("screen/edit.html", **locals())
@app.route("/screen/<int:sid>/clone", methods=["GET", "POST"])
def dash_screen_clone(sid):
screen = DashboardScreen.get(sid)
if not screen:
abort(404, "no such screen")
if request.method == "POST":
screen_name = request.form.get("screen_name")
with_graph = request.form.get("with_graph")
new_s = DashboardScreen.add(screen.pid, screen_name)
if not new_s:
abort(404, gettext("screen create fail"))
if with_graph:
old_graphs = DashboardGraph.gets_by_screen_id(sid)
for o in old_graphs:
DashboardGraph.add(o.title, o.hosts, o.counters, new_s.id,
o.timespan, o.graph_type, o.method, o.position)
return redirect("/screen/%s" % new_s.id)
else:
return render_template("screen/clone.html", **locals())
@app.route("/graph/<int:gid>/delete")
def dash_graph_delete(gid):
graph = DashboardGraph.get(gid)
if not graph:
abort(404, "no such graph")
DashboardGraph.remove(gid)
return redirect("/screen/" + graph.screen_id)
@app.route("/screen/<int:sid>")
def dash_screen(sid):
start = request.args.get("start")
end = request.args.get("end")
top_screens = DashboardScreen.gets_by_pid(pid=0)
top_screens = sorted(top_screens, key=lambda x: x.name)
screen = DashboardScreen.get(sid)
if not screen:
abort(404, "no screen")
if str(screen.pid) == '0':
sub_screens = DashboardScreen.gets_by_pid(pid=sid)
sub_screens = sorted(sub_screens, key=lambda x: x.name)
return render_template("screen/top_screen.html", **locals())
pscreen = DashboardScreen.get(screen.pid)
sub_screens = DashboardScreen.gets_by_pid(pid=screen.pid)
sub_screens = sorted(sub_screens, key=lambda x: x.name)
graphs = DashboardGraph.gets_by_screen_id(screen.id)
all_graphs = []
for graph in graphs:
all_graphs.extend(generate_graph_urls(graph, start, end) or [])
all_graphs = sorted(all_graphs, key=lambda x: (x.position, x.id))
return render_template("screen/screen.html", **locals())
@app.route("/screen/embed/<int:sid>")
def dash_screen_embed(sid):
start = request.args.get("start")
end = request.args.get("end")
screen = DashboardScreen.get(sid)
if not screen:
abort(404, "no screen")
if screen.pid == '0':
abort(404, "top screen")
graphs = DashboardGraph.gets_by_screen_id(screen.id)
all_graphs = []
for graph in graphs:
all_graphs.extend(generate_graph_urls(graph, start, end) or [])
all_graphs = sorted(all_graphs, key=lambda x: (x.position, x.id))
return render_template("screen/screen_embed.html", **locals())
@app.route("/screen/add", methods=["GET", "POST"])
def dash_screen_add():
if request.method == "POST":
name = request.form.get("screen_name")
pid = request.form.get("pid", '0')
screen = DashboardScreen.add(pid, name)
return redirect("/screen/%s" % screen.id)
else:
pid = request.args.get("pid", '0')
try:
screen = DashboardScreen.get(pid)
except:
screen = None
return render_template("screen/add.html", **locals())
@app.route("/screen/<int:sid>/graph", methods=["GET", "POST"])
def dash_graph_add(sid):
all_screens = DashboardScreen.gets_all()
top_screens = [x for x in all_screens if x.pid == '0']
children = []
for t in top_screens:
children.append([x for x in all_screens if x.pid == t.id])
screen = DashboardScreen.get(sid)
if not screen:
abort(404, "no screen")
pscreen = DashboardScreen.get(screen.pid)
if request.method == "POST":
title = request.form.get("title")
hosts = request.form.get("hosts", "").strip()
hosts = hosts and hosts.split("\n") or []
hosts = [x.strip() for x in hosts]
counters = request.form.get("counters", "").strip()
counters = counters and counters.split("\n") or []
counters = [x.strip() for x in counters]
timespan = int(request.form.get("timespan", 3600))
graph_type = request.form.get("graph_type", 'h')
method = request.form.get("method", '').upper()
position = int(request.form.get("position", 0))
graph = DashboardGraph.add(title, hosts, counters, sid,
timespan, graph_type, method, position)
return redirect("/screen/%s" % sid)
else:
limit = 10000
gid = request.args.get("gid")
graph = gid and DashboardGraph.get(gid)
options = {}
options['hosts'] = Endpoint.search(''.split(), limit=limit)
ids = []
for ep in options['hosts']:
ids.append(ep.id)
options['counters'] = EndpointCounter.gets_by_endpoint_ids(ids[0:1], limit=limit)
return render_template("screen/graph_add.html", config=config, **locals())
@app.route("/graph/<int:gid>/edit", methods=["GET", "POST"])
def dash_graph_edit(gid):
error = ""
graph = DashboardGraph.get(gid)
if not graph:
abort(404, "no graph")
all_screens = DashboardScreen.gets_all()
top_screens = [x for x in all_screens if x.pid == '0']
children = []
for t in top_screens:
children.append([x for x in all_screens if x.pid == t.id])
screen = DashboardScreen.get(graph.screen_id)
if not screen:
abort(404, "no screen")
pscreen = DashboardScreen.get(screen.pid)
if request.method == "POST":
ajax = request.form.get("ajax", "")
screen_id = request.form.get("screen_id")
title = request.form.get("title", "").strip()
hosts = request.form.get("hosts", "").strip()
hosts = hosts and hosts.split("\n") or []
hosts = [x.strip() for x in hosts]
counters = request.form.get("counters", "").strip()
counters = counters and counters.split("\n") or []
counters = [x.strip() for x in counters]
timespan = request.form.get("timespan", 3600)
graph_type = request.form.get("graph_type", 'h')
method = request.form.get("method", '').upper()
position = request.form.get("position", 0)
graph = graph.update(title, hosts, counters, screen_id,
timespan, graph_type, method, position)
error = gettext("edit successful")
if not ajax:
return render_template("screen/graph_edit.html", config=config, **locals())
else:
return "ok"
else:
ajax = request.args.get("ajax", "")
return render_template("screen/graph_edit.html", **locals())
@app.route("/graph/multi_edit", methods=["GET", "POST"])
def dash_graph_multi_edit():
ret = {
"ok": False,
"msg": "",
"data": [],
}
if request.method == "POST":
d = request.data
try:
jdata = json.loads(d)
except ValueError:
jdata = None
if not jdata:
return json.dumps({
"ok": False,
"msg": "no_data_post",
})
rows = []
for x in jdata:
rows.append({"id": x["id"], "hosts": x["endpoints"], "counters": x["counters"]})
DashboardGraph.update_multi(rows)
return json.dumps({
"ok": True,
"msg": "",
})
elif request.method == "GET":
sid = request.args.get("sid")
if not sid or not DashboardScreen.get(sid):
ret["msg"] = "no_screen"
return json.dumps(ret)
ret["ok"] = True
graphs = DashboardGraph.gets_by_screen_id(sid)
ret['data'] = [{"id": x.id, "title": x.title, "endpoints": x.hosts, "counters": x.counters} for x in graphs]
return json.dumps(ret)
|
the-stack_0_16211 | import asyncio
import functools
import traceback
import unittest
from tornado.concurrent import Future
from tornado import gen
from tornado.httpclient import HTTPError, HTTPRequest
from tornado.locks import Event
from tornado.log import gen_log, app_log
from tornado.simple_httpclient import SimpleAsyncHTTPClient
from tornado.template import DictLoader
from tornado.testing import AsyncHTTPTestCase, gen_test, bind_unused_port, ExpectLog
from tornado.web import Application, RequestHandler
try:
import tornado.websocket # noqa: F401
from tornado.util import _websocket_mask_python
except ImportError:
# The unittest module presents misleading errors on ImportError
# (it acts as if websocket_test could not be found, hiding the underlying
# error). If we get an ImportError here (which could happen due to
# TORNADO_EXTENSION=1), print some extra information before failing.
traceback.print_exc()
raise
from tornado.websocket import (
WebSocketHandler,
websocket_connect,
WebSocketError,
WebSocketClosedError,
)
try:
from tornado import speedups
except ImportError:
speedups = None # type: ignore
class TestWebSocketHandler(WebSocketHandler):
"""Base class for testing handlers that exposes the on_close event.
This allows for tests to see the close code and reason on the
server side.
"""
def initialize(self, close_future=None, compression_options=None):
self.close_future = close_future
self.compression_options = compression_options
def get_compression_options(self):
return self.compression_options
def on_close(self):
if self.close_future is not None:
self.close_future.set_result((self.close_code, self.close_reason))
class EchoHandler(TestWebSocketHandler):
@gen.coroutine
def on_message(self, message):
try:
yield self.write_message(message, isinstance(message, bytes))
except asyncio.CancelledError:
pass
except WebSocketClosedError:
pass
class ErrorInOnMessageHandler(TestWebSocketHandler):
def on_message(self, message):
1 / 0
class HeaderHandler(TestWebSocketHandler):
def open(self):
methods_to_test = [
functools.partial(self.write, "This should not work"),
functools.partial(self.redirect, "http://localhost/elsewhere"),
functools.partial(self.set_header, "X-Test", ""),
functools.partial(self.set_cookie, "Chocolate", "Chip"),
functools.partial(self.set_status, 503),
self.flush,
self.finish,
]
for method in methods_to_test:
try:
# In a websocket context, many RequestHandler methods
# raise RuntimeErrors.
method()
raise Exception("did not get expected exception")
except RuntimeError:
pass
self.write_message(self.request.headers.get("X-Test", ""))
class HeaderEchoHandler(TestWebSocketHandler):
def set_default_headers(self):
self.set_header("X-Extra-Response-Header", "Extra-Response-Value")
def prepare(self):
for k, v in self.request.headers.get_all():
if k.lower().startswith("x-test"):
self.set_header(k, v)
class NonWebSocketHandler(RequestHandler):
def get(self):
self.write("ok")
class CloseReasonHandler(TestWebSocketHandler):
def open(self):
self.on_close_called = False
self.close(1001, "goodbye")
class AsyncPrepareHandler(TestWebSocketHandler):
@gen.coroutine
def prepare(self):
yield gen.moment
def on_message(self, message):
self.write_message(message)
class PathArgsHandler(TestWebSocketHandler):
def open(self, arg):
self.write_message(arg)
class CoroutineOnMessageHandler(TestWebSocketHandler):
def initialize(self, **kwargs):
super(CoroutineOnMessageHandler, self).initialize(**kwargs)
self.sleeping = 0
@gen.coroutine
def on_message(self, message):
if self.sleeping > 0:
self.write_message("another coroutine is already sleeping")
self.sleeping += 1
yield gen.sleep(0.01)
self.sleeping -= 1
self.write_message(message)
class RenderMessageHandler(TestWebSocketHandler):
def on_message(self, message):
self.write_message(self.render_string("message.html", message=message))
class SubprotocolHandler(TestWebSocketHandler):
def initialize(self, **kwargs):
super(SubprotocolHandler, self).initialize(**kwargs)
self.select_subprotocol_called = False
def select_subprotocol(self, subprotocols):
if self.select_subprotocol_called:
raise Exception("select_subprotocol called twice")
self.select_subprotocol_called = True
if "goodproto" in subprotocols:
return "goodproto"
return None
def open(self):
if not self.select_subprotocol_called:
raise Exception("select_subprotocol not called")
self.write_message("subprotocol=%s" % self.selected_subprotocol)
class OpenCoroutineHandler(TestWebSocketHandler):
def initialize(self, test, **kwargs):
super(OpenCoroutineHandler, self).initialize(**kwargs)
self.test = test
self.open_finished = False
@gen.coroutine
def open(self):
yield self.test.message_sent.wait()
yield gen.sleep(0.010)
self.open_finished = True
def on_message(self, message):
if not self.open_finished:
raise Exception("on_message called before open finished")
self.write_message("ok")
class WebSocketBaseTestCase(AsyncHTTPTestCase):
@gen.coroutine
def ws_connect(self, path, **kwargs):
ws = yield websocket_connect(
"ws://127.0.0.1:%d%s" % (self.get_http_port(), path), **kwargs
)
raise gen.Return(ws)
class WebSocketTest(WebSocketBaseTestCase):
def get_app(self):
self.close_future = Future() # type: Future[None]
return Application(
[
("/echo", EchoHandler, dict(close_future=self.close_future)),
("/non_ws", NonWebSocketHandler),
("/header", HeaderHandler, dict(close_future=self.close_future)),
(
"/header_echo",
HeaderEchoHandler,
dict(close_future=self.close_future),
),
(
"/close_reason",
CloseReasonHandler,
dict(close_future=self.close_future),
),
(
"/error_in_on_message",
ErrorInOnMessageHandler,
dict(close_future=self.close_future),
),
(
"/async_prepare",
AsyncPrepareHandler,
dict(close_future=self.close_future),
),
(
"/path_args/(.*)",
PathArgsHandler,
dict(close_future=self.close_future),
),
(
"/coroutine",
CoroutineOnMessageHandler,
dict(close_future=self.close_future),
),
("/render", RenderMessageHandler, dict(close_future=self.close_future)),
(
"/subprotocol",
SubprotocolHandler,
dict(close_future=self.close_future),
),
(
"/open_coroutine",
OpenCoroutineHandler,
dict(close_future=self.close_future, test=self),
),
],
template_loader=DictLoader({"message.html": "<b>{{ message }}</b>"}),
)
def get_http_client(self):
# These tests require HTTP/1; force the use of SimpleAsyncHTTPClient.
return SimpleAsyncHTTPClient()
def tearDown(self):
super(WebSocketTest, self).tearDown()
RequestHandler._template_loaders.clear()
def test_http_request(self):
# WS server, HTTP client.
response = self.fetch("/echo")
self.assertEqual(response.code, 400)
def test_missing_websocket_key(self):
response = self.fetch(
"/echo",
headers={
"Connection": "Upgrade",
"Upgrade": "WebSocket",
"Sec-WebSocket-Version": "13",
},
)
self.assertEqual(response.code, 400)
def test_bad_websocket_version(self):
response = self.fetch(
"/echo",
headers={
"Connection": "Upgrade",
"Upgrade": "WebSocket",
"Sec-WebSocket-Version": "12",
},
)
self.assertEqual(response.code, 426)
@gen_test
def test_websocket_gen(self):
ws = yield self.ws_connect("/echo")
yield ws.write_message("hello")
response = yield ws.read_message()
self.assertEqual(response, "hello")
def test_websocket_callbacks(self):
websocket_connect(
"ws://127.0.0.1:%d/echo" % self.get_http_port(), callback=self.stop
)
ws = self.wait().result()
ws.write_message("hello")
ws.read_message(self.stop)
response = self.wait().result()
self.assertEqual(response, "hello")
self.close_future.add_done_callback(lambda f: self.stop())
ws.close()
self.wait()
@gen_test
def test_binary_message(self):
ws = yield self.ws_connect("/echo")
ws.write_message(b"hello \xe9", binary=True)
response = yield ws.read_message()
self.assertEqual(response, b"hello \xe9")
@gen_test
def test_unicode_message(self):
ws = yield self.ws_connect("/echo")
ws.write_message(u"hello \u00e9")
response = yield ws.read_message()
self.assertEqual(response, u"hello \u00e9")
@gen_test
def test_render_message(self):
ws = yield self.ws_connect("/render")
ws.write_message("hello")
response = yield ws.read_message()
self.assertEqual(response, "<b>hello</b>")
@gen_test
def test_error_in_on_message(self):
ws = yield self.ws_connect("/error_in_on_message")
ws.write_message("hello")
with ExpectLog(app_log, "Uncaught exception"):
response = yield ws.read_message()
self.assertIs(response, None)
@gen_test
def test_websocket_http_fail(self):
with self.assertRaises(HTTPError) as cm:
yield self.ws_connect("/notfound")
self.assertEqual(cm.exception.code, 404)
@gen_test
def test_websocket_http_success(self):
with self.assertRaises(WebSocketError):
yield self.ws_connect("/non_ws")
@gen_test
def test_websocket_network_fail(self):
sock, port = bind_unused_port()
sock.close()
with self.assertRaises(IOError):
with ExpectLog(gen_log, ".*"):
yield websocket_connect(
"ws://127.0.0.1:%d/" % port, connect_timeout=3600
)
@gen_test
def test_websocket_close_buffered_data(self):
ws = yield websocket_connect("ws://127.0.0.1:%d/echo" % self.get_http_port())
ws.write_message("hello")
ws.write_message("world")
# Close the underlying stream.
ws.stream.close()
@gen_test
def test_websocket_headers(self):
# Ensure that arbitrary headers can be passed through websocket_connect.
ws = yield websocket_connect(
HTTPRequest(
"ws://127.0.0.1:%d/header" % self.get_http_port(),
headers={"X-Test": "hello"},
)
)
response = yield ws.read_message()
self.assertEqual(response, "hello")
@gen_test
def test_websocket_header_echo(self):
# Ensure that headers can be returned in the response.
# Specifically, that arbitrary headers passed through websocket_connect
# can be returned.
ws = yield websocket_connect(
HTTPRequest(
"ws://127.0.0.1:%d/header_echo" % self.get_http_port(),
headers={"X-Test-Hello": "hello"},
)
)
self.assertEqual(ws.headers.get("X-Test-Hello"), "hello")
self.assertEqual(
ws.headers.get("X-Extra-Response-Header"), "Extra-Response-Value"
)
@gen_test
def test_server_close_reason(self):
ws = yield self.ws_connect("/close_reason")
msg = yield ws.read_message()
# A message of None means the other side closed the connection.
self.assertIs(msg, None)
self.assertEqual(ws.close_code, 1001)
self.assertEqual(ws.close_reason, "goodbye")
# The on_close callback is called no matter which side closed.
code, reason = yield self.close_future
# The client echoed the close code it received to the server,
# so the server's close code (returned via close_future) is
# the same.
self.assertEqual(code, 1001)
@gen_test
def test_client_close_reason(self):
ws = yield self.ws_connect("/echo")
ws.close(1001, "goodbye")
code, reason = yield self.close_future
self.assertEqual(code, 1001)
self.assertEqual(reason, "goodbye")
@gen_test
def test_write_after_close(self):
ws = yield self.ws_connect("/close_reason")
msg = yield ws.read_message()
self.assertIs(msg, None)
with self.assertRaises(WebSocketClosedError):
ws.write_message("hello")
@gen_test
def test_async_prepare(self):
# Previously, an async prepare method triggered a bug that would
# result in a timeout on test shutdown (and a memory leak).
ws = yield self.ws_connect("/async_prepare")
ws.write_message("hello")
res = yield ws.read_message()
self.assertEqual(res, "hello")
@gen_test
def test_path_args(self):
ws = yield self.ws_connect("/path_args/hello")
res = yield ws.read_message()
self.assertEqual(res, "hello")
@gen_test
def test_coroutine(self):
ws = yield self.ws_connect("/coroutine")
# Send both messages immediately, coroutine must process one at a time.
yield ws.write_message("hello1")
yield ws.write_message("hello2")
res = yield ws.read_message()
self.assertEqual(res, "hello1")
res = yield ws.read_message()
self.assertEqual(res, "hello2")
@gen_test
def test_check_origin_valid_no_path(self):
port = self.get_http_port()
url = "ws://127.0.0.1:%d/echo" % port
headers = {"Origin": "http://127.0.0.1:%d" % port}
ws = yield websocket_connect(HTTPRequest(url, headers=headers))
ws.write_message("hello")
response = yield ws.read_message()
self.assertEqual(response, "hello")
@gen_test
def test_check_origin_valid_with_path(self):
port = self.get_http_port()
url = "ws://127.0.0.1:%d/echo" % port
headers = {"Origin": "http://127.0.0.1:%d/something" % port}
ws = yield websocket_connect(HTTPRequest(url, headers=headers))
ws.write_message("hello")
response = yield ws.read_message()
self.assertEqual(response, "hello")
@gen_test
def test_check_origin_invalid_partial_url(self):
port = self.get_http_port()
url = "ws://127.0.0.1:%d/echo" % port
headers = {"Origin": "127.0.0.1:%d" % port}
with self.assertRaises(HTTPError) as cm:
yield websocket_connect(HTTPRequest(url, headers=headers))
self.assertEqual(cm.exception.code, 403)
@gen_test
def test_check_origin_invalid(self):
port = self.get_http_port()
url = "ws://127.0.0.1:%d/echo" % port
# Host is 127.0.0.1, which should not be accessible from some other
# domain
headers = {"Origin": "http://somewhereelse.com"}
with self.assertRaises(HTTPError) as cm:
yield websocket_connect(HTTPRequest(url, headers=headers))
self.assertEqual(cm.exception.code, 403)
@gen_test
def test_check_origin_invalid_subdomains(self):
port = self.get_http_port()
url = "ws://localhost:%d/echo" % port
# Subdomains should be disallowed by default. If we could pass a
# resolver to websocket_connect we could test sibling domains as well.
headers = {"Origin": "http://subtenant.localhost"}
with self.assertRaises(HTTPError) as cm:
yield websocket_connect(HTTPRequest(url, headers=headers))
self.assertEqual(cm.exception.code, 403)
@gen_test
def test_subprotocols(self):
ws = yield self.ws_connect(
"/subprotocol", subprotocols=["badproto", "goodproto"]
)
self.assertEqual(ws.selected_subprotocol, "goodproto")
res = yield ws.read_message()
self.assertEqual(res, "subprotocol=goodproto")
@gen_test
def test_subprotocols_not_offered(self):
ws = yield self.ws_connect("/subprotocol")
self.assertIs(ws.selected_subprotocol, None)
res = yield ws.read_message()
self.assertEqual(res, "subprotocol=None")
@gen_test
def test_open_coroutine(self):
self.message_sent = Event()
ws = yield self.ws_connect("/open_coroutine")
yield ws.write_message("hello")
self.message_sent.set()
res = yield ws.read_message()
self.assertEqual(res, "ok")
class NativeCoroutineOnMessageHandler(TestWebSocketHandler):
def initialize(self, **kwargs):
super().initialize(**kwargs)
self.sleeping = 0
async def on_message(self, message):
if self.sleeping > 0:
self.write_message("another coroutine is already sleeping")
self.sleeping += 1
await gen.sleep(0.01)
self.sleeping -= 1
self.write_message(message)
class WebSocketNativeCoroutineTest(WebSocketBaseTestCase):
def get_app(self):
return Application([("/native", NativeCoroutineOnMessageHandler)])
@gen_test
def test_native_coroutine(self):
ws = yield self.ws_connect("/native")
# Send both messages immediately, coroutine must process one at a time.
yield ws.write_message("hello1")
yield ws.write_message("hello2")
res = yield ws.read_message()
self.assertEqual(res, "hello1")
res = yield ws.read_message()
self.assertEqual(res, "hello2")
class CompressionTestMixin(object):
MESSAGE = "Hello world. Testing 123 123"
def get_app(self):
class LimitedHandler(TestWebSocketHandler):
@property
def max_message_size(self):
return 1024
def on_message(self, message):
self.write_message(str(len(message)))
return Application(
[
(
"/echo",
EchoHandler,
dict(compression_options=self.get_server_compression_options()),
),
(
"/limited",
LimitedHandler,
dict(compression_options=self.get_server_compression_options()),
),
]
)
def get_server_compression_options(self):
return None
def get_client_compression_options(self):
return None
@gen_test
def test_message_sizes(self):
ws = yield self.ws_connect(
"/echo", compression_options=self.get_client_compression_options()
)
# Send the same message three times so we can measure the
# effect of the context_takeover options.
for i in range(3):
ws.write_message(self.MESSAGE)
response = yield ws.read_message()
self.assertEqual(response, self.MESSAGE)
self.assertEqual(ws.protocol._message_bytes_out, len(self.MESSAGE) * 3)
self.assertEqual(ws.protocol._message_bytes_in, len(self.MESSAGE) * 3)
self.verify_wire_bytes(ws.protocol._wire_bytes_in, ws.protocol._wire_bytes_out)
@gen_test
def test_size_limit(self):
ws = yield self.ws_connect(
"/limited", compression_options=self.get_client_compression_options()
)
# Small messages pass through.
ws.write_message("a" * 128)
response = yield ws.read_message()
self.assertEqual(response, "128")
# This message is too big after decompression, but it compresses
# down to a size that will pass the initial checks.
ws.write_message("a" * 2048)
response = yield ws.read_message()
self.assertIsNone(response)
class UncompressedTestMixin(CompressionTestMixin):
"""Specialization of CompressionTestMixin when we expect no compression."""
def verify_wire_bytes(self, bytes_in, bytes_out):
# Bytes out includes the 4-byte mask key per message.
self.assertEqual(bytes_out, 3 * (len(self.MESSAGE) + 6))
self.assertEqual(bytes_in, 3 * (len(self.MESSAGE) + 2))
class NoCompressionTest(UncompressedTestMixin, WebSocketBaseTestCase):
pass
# If only one side tries to compress, the extension is not negotiated.
class ServerOnlyCompressionTest(UncompressedTestMixin, WebSocketBaseTestCase):
def get_server_compression_options(self):
return {}
class ClientOnlyCompressionTest(UncompressedTestMixin, WebSocketBaseTestCase):
def get_client_compression_options(self):
return {}
class DefaultCompressionTest(CompressionTestMixin, WebSocketBaseTestCase):
def get_server_compression_options(self):
return {}
def get_client_compression_options(self):
return {}
def verify_wire_bytes(self, bytes_in, bytes_out):
self.assertLess(bytes_out, 3 * (len(self.MESSAGE) + 6))
self.assertLess(bytes_in, 3 * (len(self.MESSAGE) + 2))
# Bytes out includes the 4 bytes mask key per message.
self.assertEqual(bytes_out, bytes_in + 12)
class MaskFunctionMixin(object):
# Subclasses should define self.mask(mask, data)
def test_mask(self):
self.assertEqual(self.mask(b"abcd", b""), b"")
self.assertEqual(self.mask(b"abcd", b"b"), b"\x03")
self.assertEqual(self.mask(b"abcd", b"54321"), b"TVPVP")
self.assertEqual(self.mask(b"ZXCV", b"98765432"), b"c`t`olpd")
# Include test cases with \x00 bytes (to ensure that the C
# extension isn't depending on null-terminated strings) and
# bytes with the high bit set (to smoke out signedness issues).
self.assertEqual(
self.mask(b"\x00\x01\x02\x03", b"\xff\xfb\xfd\xfc\xfe\xfa"),
b"\xff\xfa\xff\xff\xfe\xfb",
)
self.assertEqual(
self.mask(b"\xff\xfb\xfd\xfc", b"\x00\x01\x02\x03\x04\x05"),
b"\xff\xfa\xff\xff\xfb\xfe",
)
class PythonMaskFunctionTest(MaskFunctionMixin, unittest.TestCase):
def mask(self, mask, data):
return _websocket_mask_python(mask, data)
@unittest.skipIf(speedups is None, "tornado.speedups module not present")
class CythonMaskFunctionTest(MaskFunctionMixin, unittest.TestCase):
def mask(self, mask, data):
return speedups.websocket_mask(mask, data)
class ServerPeriodicPingTest(WebSocketBaseTestCase):
def get_app(self):
class PingHandler(TestWebSocketHandler):
def on_pong(self, data):
self.write_message("got pong")
return Application([("/", PingHandler)], websocket_ping_interval=0.01)
@gen_test
def test_server_ping(self):
ws = yield self.ws_connect("/")
for i in range(3):
response = yield ws.read_message()
self.assertEqual(response, "got pong")
# TODO: test that the connection gets closed if ping responses stop.
class ClientPeriodicPingTest(WebSocketBaseTestCase):
def get_app(self):
class PingHandler(TestWebSocketHandler):
def on_ping(self, data):
self.write_message("got ping")
return Application([("/", PingHandler)])
@gen_test
def test_client_ping(self):
ws = yield self.ws_connect("/", ping_interval=0.01)
for i in range(3):
response = yield ws.read_message()
self.assertEqual(response, "got ping")
# TODO: test that the connection gets closed if ping responses stop.
class ManualPingTest(WebSocketBaseTestCase):
def get_app(self):
class PingHandler(TestWebSocketHandler):
def on_ping(self, data):
self.write_message(data, binary=isinstance(data, bytes))
return Application([("/", PingHandler)])
@gen_test
def test_manual_ping(self):
ws = yield self.ws_connect("/")
self.assertRaises(ValueError, ws.ping, "a" * 126)
ws.ping("hello")
resp = yield ws.read_message()
# on_ping always sees bytes.
self.assertEqual(resp, b"hello")
ws.ping(b"binary hello")
resp = yield ws.read_message()
self.assertEqual(resp, b"binary hello")
class MaxMessageSizeTest(WebSocketBaseTestCase):
def get_app(self):
return Application([("/", EchoHandler)], websocket_max_message_size=1024)
@gen_test
def test_large_message(self):
ws = yield self.ws_connect("/")
# Write a message that is allowed.
msg = "a" * 1024
ws.write_message(msg)
resp = yield ws.read_message()
self.assertEqual(resp, msg)
# Write a message that is too large.
ws.write_message(msg + "b")
resp = yield ws.read_message()
# A message of None means the other side closed the connection.
self.assertIs(resp, None)
self.assertEqual(ws.close_code, 1009)
self.assertEqual(ws.close_reason, "message too big")
# TODO: Needs tests of messages split over multiple
# continuation frames.
|
the-stack_0_16212 | # Copyright 2018-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the qubit parameter-shift QubitParamShiftTape"""
import pytest
import numpy as np
import pennylane as qml
from pennylane import numpy as pnp
from pennylane.tape import QubitParamShiftTape
from pennylane.tape.qubit_param_shift import _get_operation_recipe
from pennylane.operation import (
Operation,
OperatorPropertyUndefined,
ParameterFrequenciesUndefinedError,
)
class TestGetOperationRecipe:
"""Tests special cases for the _get_operation_recipe
copy in qubit_param_shift.py, the original is in
gradients/parameter_shift.py
"""
class DummyOp0(Operation):
num_params = 1
num_wires = 1
grad_recipe = (None,)
class DummyOp1(Operation):
num_params = 1
num_wires = 1
grad_recipe = (None,)
@property
def parameter_frequencies(self):
raise ParameterFrequenciesUndefinedError
@pytest.mark.parametrize("Op", [DummyOp0, DummyOp1])
def test_error_no_grad_info(self, Op):
op = Op(0.1, wires=0)
with pytest.raises(
OperatorPropertyUndefined,
match=f"The operation {op.name} does not have a grad_recipe,",
):
_get_operation_recipe(op, 0, None)
class TestGradMethod:
"""Tests for parameter gradient methods"""
def test_non_differentiable(self):
"""Test that a non-differentiable parameter is
correctly marked"""
psi = np.array([1, 0, 1, 0]) / np.sqrt(2)
with QubitParamShiftTape() as tape:
qml.QubitStateVector(psi, wires=[0, 1])
qml.RX(0.543, wires=[0])
qml.RY(-0.654, wires=[1])
qml.CNOT(wires=[0, 1])
qml.probs(wires=[0, 1])
assert tape._grad_method(0) is None
assert tape._grad_method(1) == "A"
assert tape._grad_method(2) == "A"
tape._update_gradient_info()
assert tape._par_info[0]["grad_method"] is None
assert tape._par_info[1]["grad_method"] == "A"
assert tape._par_info[2]["grad_method"] == "A"
def test_independent(self):
"""Test that an independent variable is properly marked
as having a zero gradient"""
with QubitParamShiftTape() as tape:
qml.RX(0.543, wires=[0])
qml.RY(-0.654, wires=[1])
qml.expval(qml.PauliY(0))
assert tape._grad_method(0) == "A"
assert tape._grad_method(1) == "0"
tape._update_gradient_info()
assert tape._par_info[0]["grad_method"] == "A"
assert tape._par_info[1]["grad_method"] == "0"
# in non-graph mode, it is impossible to determine
# if a parameter is independent or not
tape._graph = None
assert tape._grad_method(1, use_graph=False) == "A"
def test_finite_diff(self, monkeypatch):
"""If an op has grad_method=F, this should be respected
by the QubitParamShiftTape"""
monkeypatch.setattr(qml.RX, "grad_method", "F")
psi = np.array([1, 0, 1, 0]) / np.sqrt(2)
with QubitParamShiftTape() as tape:
qml.QubitStateVector(psi, wires=[0, 1])
qml.RX(0.543, wires=[0])
qml.RY(-0.654, wires=[1])
qml.CNOT(wires=[0, 1])
qml.probs(wires=[0, 1])
assert tape._grad_method(0) is None
assert tape._grad_method(1) == "F"
assert tape._grad_method(2) == "A"
def test_specs_num_parameter_shift_executions():
"""Tests specs has the correct number of parameter-shift executions"""
dev = qml.device("default.qubit", wires=3)
x = 0.543
y = -0.654
with qml.tape.QubitParamShiftTape() as tape:
qml.CRX(x, wires=[0, 1])
qml.RY(y, wires=[1])
qml.CNOT(wires=[0, 1])
qml.RY(0.12345, wires=2)
qml.expval(qml.PauliZ(0) @ qml.PauliX(1))
num_exec = tape.specs["num_parameter_shift_executions"]
assert num_exec == 7
jac = tape.jacobian(dev)
assert num_exec == (dev.num_executions + 1)
class TestParameterShiftRule:
"""Tests for the parameter shift implementation"""
@pytest.mark.parametrize("theta", np.linspace(-2 * np.pi, 2 * np.pi, 7))
@pytest.mark.parametrize("shift", [np.pi / 2, 0.3, np.sqrt(2)])
@pytest.mark.parametrize("G", [qml.RX, qml.RY, qml.RZ, qml.PhaseShift])
def test_pauli_rotation_gradient(self, mocker, G, theta, shift, tol):
"""Tests that the automatic gradients of Pauli rotations are correct."""
spy = mocker.spy(QubitParamShiftTape, "parameter_shift")
dev = qml.device("default.qubit", wires=1)
with QubitParamShiftTape() as tape:
qml.QubitStateVector(np.array([1.0, -1.0]) / np.sqrt(2), wires=0)
G(theta, wires=[0])
qml.expval(qml.PauliZ(0))
tape.trainable_params = {1}
autograd_val = tape.jacobian(dev, shift=shift, method="analytic")
manualgrad_val = (
tape.execute(dev, params=[theta + np.pi / 2])
- tape.execute(dev, params=[theta - np.pi / 2])
) / 2
assert np.allclose(autograd_val, manualgrad_val, atol=tol, rtol=0)
assert spy.call_args[1]["shift"] == shift
# compare to finite differences
numeric_val = tape.jacobian(dev, shift=shift, method="numeric")
assert np.allclose(autograd_val, numeric_val, atol=tol, rtol=0)
@pytest.mark.parametrize("theta", np.linspace(-2 * np.pi, 2 * np.pi, 7))
@pytest.mark.parametrize("shift", [np.pi / 2, 0.3, np.sqrt(2)])
def test_Rot_gradient(self, mocker, theta, shift, tol):
"""Tests that the automatic gradient of a arbitrary Euler-angle-parameterized gate is correct."""
spy = mocker.spy(QubitParamShiftTape, "parameter_shift")
dev = qml.device("default.qubit", wires=1)
params = np.array([theta, theta**3, np.sqrt(2) * theta])
with QubitParamShiftTape() as tape:
qml.QubitStateVector(np.array([1.0, -1.0]) / np.sqrt(2), wires=0)
qml.Rot(*params, wires=[0])
qml.expval(qml.PauliZ(0))
tape.trainable_params = {1, 2, 3}
autograd_val = tape.jacobian(dev, shift=shift, method="analytic")
manualgrad_val = np.zeros_like(autograd_val)
for idx in list(np.ndindex(*params.shape)):
s = np.zeros_like(params)
s[idx] += np.pi / 2
forward = tape.execute(dev, params=params + s)
backward = tape.execute(dev, params=params - s)
manualgrad_val[0, idx] = (forward - backward) / 2
assert np.allclose(autograd_val, manualgrad_val, atol=tol, rtol=0)
assert spy.call_args[1]["shift"] == shift
# compare to finite differences
numeric_val = tape.jacobian(dev, shift=shift, method="numeric")
assert np.allclose(autograd_val, numeric_val, atol=tol, rtol=0)
@pytest.mark.parametrize("G", [qml.CRX, qml.CRY, qml.CRZ])
def test_controlled_rotation_gradient(self, G, tol):
"""Test gradient of controlled rotation gates"""
dev = qml.device("default.qubit", wires=2)
b = 0.123
with QubitParamShiftTape() as tape:
qml.QubitStateVector(np.array([1.0, -1.0]) / np.sqrt(2), wires=0)
G(b, wires=[0, 1])
qml.expval(qml.PauliX(0))
tape.trainable_params = {1}
res = tape.execute(dev)
assert np.allclose(res, -np.cos(b / 2), atol=tol, rtol=0)
grad = tape.jacobian(dev, method="analytic")
expected = np.sin(b / 2) / 2
assert np.allclose(grad, expected, atol=tol, rtol=0)
# compare to finite differences
numeric_val = tape.jacobian(dev, method="numeric")
assert np.allclose(grad, numeric_val, atol=tol, rtol=0)
@pytest.mark.parametrize("theta", np.linspace(-2 * np.pi, np.pi, 7))
def test_CRot_gradient(self, mocker, theta, tol):
"""Tests that the automatic gradient of an arbitrary controlled Euler-angle-parameterized
gate is correct."""
spy = mocker.spy(QubitParamShiftTape, "parameter_shift")
dev = qml.device("default.qubit", wires=2)
a, b, c = np.array([theta, theta**3, np.sqrt(2) * theta])
with QubitParamShiftTape() as tape:
qml.QubitStateVector(np.array([1.0, -1.0]) / np.sqrt(2), wires=0)
qml.CRot(a, b, c, wires=[0, 1])
qml.expval(qml.PauliX(0))
tape.trainable_params = {1, 2, 3}
res = tape.execute(dev)
expected = -np.cos(b / 2) * np.cos(0.5 * (a + c))
assert np.allclose(res, expected, atol=tol, rtol=0)
grad = tape.jacobian(dev, method="analytic")
expected = np.array(
[
[
0.5 * np.cos(b / 2) * np.sin(0.5 * (a + c)),
0.5 * np.sin(b / 2) * np.cos(0.5 * (a + c)),
0.5 * np.cos(b / 2) * np.sin(0.5 * (a + c)),
]
]
)
assert np.allclose(grad, expected, atol=tol, rtol=0)
# compare to finite differences
numeric_val = tape.jacobian(dev, method="numeric")
assert np.allclose(grad, numeric_val, atol=tol, rtol=0)
def test_gradients_agree_finite_differences(self, mocker, tol):
"""Tests that the parameter-shift rule agrees with the first and second
order finite differences"""
params = np.array([0.1, -1.6, np.pi / 5])
with QubitParamShiftTape() as tape:
qml.RX(params[0], wires=[0])
qml.CNOT(wires=[0, 1])
qml.RY(-1.6, wires=[0])
qml.RY(params[1], wires=[1])
qml.CNOT(wires=[1, 0])
qml.RX(params[2], wires=[0])
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
tape.trainable_params = {0, 2, 3}
dev = qml.device("default.qubit", wires=2)
spy_numeric = mocker.spy(tape, "numeric_pd")
spy_analytic = mocker.spy(tape, "analytic_pd")
grad_F1 = tape.jacobian(dev, method="numeric", order=1)
grad_F2 = tape.jacobian(dev, method="numeric", order=2)
spy_numeric.assert_called()
spy_analytic.assert_not_called()
grad_A = tape.jacobian(dev, method="analytic")
spy_analytic.assert_called()
# gradients computed with different methods must agree
assert np.allclose(grad_A, grad_F1, atol=tol, rtol=0)
assert np.allclose(grad_A, grad_F2, atol=tol, rtol=0)
def test_variance_gradients_agree_finite_differences(self, mocker, tol):
"""Tests that the variance parameter-shift rule agrees with the first and second
order finite differences"""
params = np.array([0.1, -1.6, np.pi / 5])
with QubitParamShiftTape() as tape:
qml.RX(params[0], wires=[0])
qml.CNOT(wires=[0, 1])
qml.RY(-1.6, wires=[0])
qml.RY(params[1], wires=[1])
qml.CNOT(wires=[1, 0])
qml.RX(params[2], wires=[0])
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0)), qml.var(qml.PauliX(1))
tape.trainable_params = {0, 2, 3}
dev = qml.device("default.qubit", wires=2)
spy_numeric = mocker.spy(tape, "numeric_pd")
spy_analytic = mocker.spy(tape, "analytic_pd")
grad_F1 = tape.jacobian(dev, method="numeric", order=1)
grad_F2 = tape.jacobian(dev, method="numeric", order=2)
spy_numeric.assert_called()
spy_analytic.assert_not_called()
grad_A = tape.jacobian(dev, method="analytic")
spy_analytic.assert_called()
# gradients computed with different methods must agree
assert np.allclose(grad_A, grad_F1, atol=tol, rtol=0)
assert np.allclose(grad_A, grad_F2, atol=tol, rtol=0)
def test_processing_function_torch(self, mocker, tol):
"""Tests the processing function that is created when using the
parameter_shift method returns a numpy array.
This is a unit test specifically aimed at checking an edge case
discovered related to default.qubit.torch.
"""
torch = pytest.importorskip("torch")
results = [
torch.tensor([0.4342], dtype=torch.float64),
torch.tensor([-0.4342], dtype=torch.float64),
]
theta = torch.tensor(0.543, dtype=torch.float64)
phi = torch.tensor(-0.234, dtype=torch.float64)
pars = torch.tensor([theta, phi], dtype=torch.float64)
with qml.tape.QubitParamShiftTape() as tape:
qml.RY(pars[0], wires=[0])
qml.RX(pars[1], wires=[0])
qml.expval(qml.PauliZ(0))
tapes, func = tape.parameter_shift(0, pars)
assert type(func(results)) == np.ndarray
class TestJacobianIntegration:
"""Tests for general Jacobian integration"""
def test_single_expectation_value(self, tol):
"""Tests correct output shape and evaluation for a tape
with a single expval output"""
dev = qml.device("default.qubit", wires=2)
x = 0.543
y = -0.654
with QubitParamShiftTape() as tape:
qml.RX(x, wires=[0])
qml.RY(y, wires=[1])
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0) @ qml.PauliX(1))
res = tape.jacobian(dev, method="analytic")
assert res.shape == (1, 2)
expected = np.array([[-np.sin(y) * np.sin(x), np.cos(y) * np.cos(x)]])
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_multiple_expectation_values(self, tol):
"""Tests correct output shape and evaluation for a tape
with multiple expval outputs"""
dev = qml.device("default.qubit", wires=2)
x = 0.543
y = -0.654
with QubitParamShiftTape() as tape:
qml.RX(x, wires=[0])
qml.RY(y, wires=[1])
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
qml.expval(qml.PauliX(1))
res = tape.jacobian(dev, method="analytic")
assert res.shape == (2, 2)
expected = np.array([[-np.sin(x), 0], [0, np.cos(y)]])
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_var_expectation_values(self, tol):
"""Tests correct output shape and evaluation for a tape
with expval and var outputs"""
dev = qml.device("default.qubit", wires=2)
x = 0.543
y = -0.654
with QubitParamShiftTape() as tape:
qml.RX(x, wires=[0])
qml.RY(y, wires=[1])
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
qml.var(qml.PauliX(1))
res = tape.jacobian(dev, method="analytic")
assert res.shape == (2, 2)
expected = np.array([[-np.sin(x), 0], [0, -2 * np.cos(y) * np.sin(y)]])
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_prob_expectation_values(self, tol):
"""Tests correct output shape and evaluation for a tape
with prob and expval outputs"""
dev = qml.device("default.qubit", wires=2)
x = 0.543
y = -0.654
with QubitParamShiftTape() as tape:
qml.RX(x, wires=[0])
qml.RY(y, wires=[1])
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
qml.probs(wires=[0, 1])
res = tape.jacobian(dev, method="analytic")
assert res.shape == (5, 2)
expected = (
np.array(
[
[-2 * np.sin(x), 0],
[
-(np.cos(y / 2) ** 2 * np.sin(x)),
-(np.cos(x / 2) ** 2 * np.sin(y)),
],
[
-(np.sin(x) * np.sin(y / 2) ** 2),
(np.cos(x / 2) ** 2 * np.sin(y)),
],
[
(np.sin(x) * np.sin(y / 2) ** 2),
(np.sin(x / 2) ** 2 * np.sin(y)),
],
[
(np.cos(y / 2) ** 2 * np.sin(x)),
-(np.sin(x / 2) ** 2 * np.sin(y)),
],
]
)
/ 2
)
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_involutory_variance(self, mocker, tol):
"""Tests qubit observable that are involutory"""
dev = qml.device("default.qubit", wires=1)
a = 0.54
spy_analytic_var = mocker.spy(QubitParamShiftTape, "parameter_shift_var")
spy_numeric = mocker.spy(QubitParamShiftTape, "numeric_pd")
spy_execute = mocker.spy(dev, "execute")
with QubitParamShiftTape() as tape:
qml.RX(a, wires=0)
qml.var(qml.PauliZ(0))
res = tape.execute(dev)
expected = 1 - np.cos(a) ** 2
assert np.allclose(res, expected, atol=tol, rtol=0)
spy_execute.call_args_list = []
# circuit jacobians
gradA = tape.jacobian(dev, method="analytic")
spy_analytic_var.assert_called()
spy_numeric.assert_not_called()
assert len(spy_execute.call_args_list) == 1 + 2 * 1
spy_execute.call_args_list = []
gradF = tape.jacobian(dev, method="numeric")
spy_numeric.assert_called()
assert len(spy_execute.call_args_list) == 2
expected = 2 * np.sin(a) * np.cos(a)
assert gradF == pytest.approx(expected, abs=tol)
assert gradA == pytest.approx(expected, abs=tol)
def test_non_involutory_variance(self, mocker, tol):
"""Tests a qubit Hermitian observable that is not involutory"""
dev = qml.device("default.qubit", wires=1)
A = np.array([[4, -1 + 6j], [-1 - 6j, 2]])
a = 0.54
spy_analytic_var = mocker.spy(QubitParamShiftTape, "parameter_shift_var")
spy_numeric = mocker.spy(QubitParamShiftTape, "numeric_pd")
spy_execute = mocker.spy(dev, "execute")
with QubitParamShiftTape() as tape:
qml.RX(a, wires=0)
qml.var(qml.Hermitian(A, 0))
tape.trainable_params = {0}
res = tape.execute(dev)
expected = (39 / 2) - 6 * np.sin(2 * a) + (35 / 2) * np.cos(2 * a)
assert np.allclose(res, expected, atol=tol, rtol=0)
spy_execute.call_args_list = []
# circuit jacobians
gradA = tape.jacobian(dev, method="analytic")
spy_analytic_var.assert_called()
spy_numeric.assert_not_called()
assert len(spy_execute.call_args_list) == 1 + 4 * 1
spy_execute.call_args_list = []
gradF = tape.jacobian(dev, method="numeric")
spy_numeric.assert_called()
assert len(spy_execute.call_args_list) == 2
expected = -35 * np.sin(2 * a) - 12 * np.cos(2 * a)
assert gradA == pytest.approx(expected, abs=tol)
assert gradF == pytest.approx(expected, abs=tol)
def test_involutory_and_noninvolutory_variance(self, mocker, tol):
"""Tests a qubit Hermitian observable that is not involutory alongside
a involutory observable."""
dev = qml.device("default.qubit", wires=2)
A = np.array([[4, -1 + 6j], [-1 - 6j, 2]])
a = 0.54
spy_analytic_var = mocker.spy(QubitParamShiftTape, "parameter_shift_var")
spy_numeric = mocker.spy(QubitParamShiftTape, "numeric_pd")
spy_execute = mocker.spy(dev, "execute")
with QubitParamShiftTape() as tape:
qml.RX(a, wires=0)
qml.RX(a, wires=1)
qml.var(qml.PauliZ(0))
qml.var(qml.Hermitian(A, 1))
tape.trainable_params = {0, 1}
res = tape.execute(dev)
expected = [1 - np.cos(a) ** 2, (39 / 2) - 6 * np.sin(2 * a) + (35 / 2) * np.cos(2 * a)]
assert np.allclose(res, expected, atol=tol, rtol=0)
spy_execute.call_args_list = []
# circuit jacobians
gradA = tape.jacobian(dev, method="analytic")
spy_analytic_var.assert_called()
spy_numeric.assert_not_called()
assert len(spy_execute.call_args_list) == 1 + 2 * 4
spy_execute.call_args_list = []
gradF = tape.jacobian(dev, method="numeric")
spy_numeric.assert_called()
assert len(spy_execute.call_args_list) == 1 + 2
expected = [2 * np.sin(a) * np.cos(a), -35 * np.sin(2 * a) - 12 * np.cos(2 * a)]
assert np.diag(gradA) == pytest.approx(expected, abs=tol)
assert np.diag(gradF) == pytest.approx(expected, abs=tol)
def test_expval_and_variance(self, tol):
"""Test that the qnode works for a combination of expectation
values and variances"""
dev = qml.device("default.qubit", wires=3)
a = 0.54
b = -0.423
c = 0.123
with QubitParamShiftTape() as tape:
qml.RX(a, wires=0)
qml.RY(b, wires=1)
qml.CNOT(wires=[1, 2])
qml.RX(c, wires=2)
qml.CNOT(wires=[0, 1])
qml.var(qml.PauliZ(0))
qml.expval(qml.PauliZ(1))
qml.var(qml.PauliZ(2))
res = tape.execute(dev)
expected = np.array(
[
np.sin(a) ** 2,
np.cos(a) * np.cos(b),
0.25 * (3 - 2 * np.cos(b) ** 2 * np.cos(2 * c) - np.cos(2 * b)),
]
)
assert np.allclose(res, expected, atol=tol, rtol=0)
# # circuit jacobians
gradA = tape.jacobian(dev, method="analytic")
gradF = tape.jacobian(dev, method="numeric")
expected = np.array(
[
[2 * np.cos(a) * np.sin(a), -np.cos(b) * np.sin(a), 0],
[
0,
-np.cos(a) * np.sin(b),
0.5 * (2 * np.cos(b) * np.cos(2 * c) * np.sin(b) + np.sin(2 * b)),
],
[0, 0, np.cos(b) ** 2 * np.sin(2 * c)],
]
).T
assert gradA == pytest.approx(expected, abs=tol)
assert gradF == pytest.approx(expected, abs=tol)
class TestHessian:
"""Tests for parameter Hessian method"""
@pytest.mark.parametrize("s1", [np.pi / 2, np.pi / 4, 2])
@pytest.mark.parametrize("s2", [np.pi / 2, np.pi / 4, 3])
@pytest.mark.parametrize("G", [qml.RX, qml.RY, qml.RZ, qml.PhaseShift])
def test_pauli_rotation_hessian(self, s1, s2, G, tol):
"""Tests that the automatic Hessian of Pauli rotations are correct."""
theta = np.array([0.234, 2.443])
dev = qml.device("default.qubit", wires=2)
with QubitParamShiftTape() as tape:
qml.QubitStateVector(np.array([1.0, -1.0, 1.0, -1.0]) / np.sqrt(4), wires=[0, 1])
G(theta[0], wires=[0])
G(theta[1], wires=[1])
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
tape.trainable_params = {1, 2}
autograd_val = tape.hessian(dev, s1=s1, s2=s2)
assert autograd_val.shape == (len(theta), len(theta))
shift = np.eye(len(theta))
manualgrad_val = np.zeros((len(theta), len(theta)))
for i in range(len(theta)):
for j in range(len(theta)):
manualgrad_val[i, j] = (
tape.execute(dev, params=theta + s1 * shift[i] + s2 * shift[j])
- tape.execute(dev, params=theta - s1 * shift[i] + s2 * shift[j])
- tape.execute(dev, params=theta + s1 * shift[i] - s2 * shift[j])
+ tape.execute(dev, params=theta - s1 * shift[i] - s2 * shift[j])
) / (4 * np.sin(s1) * np.sin(s2))
assert np.allclose(autograd_val, manualgrad_val, atol=tol, rtol=0)
def test_vector_output(self, tol):
"""Tests that a vector valued output tape has a hessian with the proper result."""
dev = qml.device("default.qubit", wires=1)
x = np.array([1.0, 2.0])
with QubitParamShiftTape() as tape:
qml.RY(x[0], wires=0)
qml.RX(x[1], wires=0)
qml.probs(wires=[0])
hess = tape.hessian(dev)
expected_hess = expected_hess = np.array(
[
[
[-0.5 * np.cos(x[0]) * np.cos(x[1]), 0.5 * np.cos(x[0]) * np.cos(x[1])],
[0.5 * np.sin(x[0]) * np.sin(x[1]), -0.5 * np.sin(x[0]) * np.sin(x[1])],
],
[
[0.5 * np.sin(x[0]) * np.sin(x[1]), -0.5 * np.sin(x[0]) * np.sin(x[1])],
[-0.5 * np.cos(x[0]) * np.cos(x[1]), 0.5 * np.cos(x[0]) * np.cos(x[1])],
],
]
)
assert np.allclose(hess, expected_hess, atol=tol, rtol=0)
def test_no_trainable_params_hessian(self):
"""Test that an empty Hessian is returned when there are no trainable
parameters."""
dev = qml.device("default.qubit", wires=1)
with QubitParamShiftTape() as tape:
qml.RX(0.224, wires=[0])
qml.expval(qml.PauliZ(0))
tape.trainable_params = {}
hessian = tape.hessian(dev)
assert hessian.shape == (0, 0)
@pytest.mark.parametrize("G", [qml.CRX, qml.CRY, qml.CRZ])
def test_controlled_rotation_error(self, G, tol):
"""Test that attempting to perform the parameter-shift rule on the controlled rotation gates
results in an error."""
dev = qml.device("default.qubit", wires=2)
b = 0.123
with QubitParamShiftTape() as tape:
qml.QubitStateVector(np.array([1.0, -1.0]) / np.sqrt(2), wires=0)
G(b, wires=[0, 1])
qml.expval(qml.PauliX(0))
tape.trainable_params = {1}
res = tape.execute(dev)
assert np.allclose(res, -np.cos(b / 2), atol=tol, rtol=0)
with pytest.raises(ValueError, match="not supported for the parameter-shift Hessian"):
tape.hessian(dev, method="analytic")
@pytest.mark.parametrize("G", [qml.CRX, qml.CRY, qml.CRZ])
def test_controlled_rotation_second_derivative(self, G, tol):
"""Test that the controlled rotation gates return the correct
second derivative if first decomposed."""
dev = qml.device("default.qubit", wires=2)
init_state = qml.numpy.array([1.0, -1.0], requires_grad=False) / np.sqrt(2)
@qml.qnode(dev)
def circuit(b):
qml.QubitStateVector(init_state, wires=0)
G(b, wires=[0, 1])
return qml.expval(qml.PauliX(0))
b = pnp.array(0.123, requires_grad=True)
res = circuit(b)
assert np.allclose(res, -np.cos(b / 2), atol=tol, rtol=0)
grad = qml.grad(qml.grad(circuit))(b)
expected = np.cos(b / 2) / 4
assert np.allclose(grad, expected, atol=tol, rtol=0)
def test_non_differentiable_controlled_rotation(self, tol):
"""Tests that a non-differentiable controlled operation does not affect
the Hessian computation."""
dev = qml.device("default.qubit", wires=2)
x = 0.6
with QubitParamShiftTape() as tape:
qml.RY(x, wires=0)
qml.CRY(np.pi / 2, wires=[0, 1])
qml.expval(qml.PauliX(0))
tape.trainable_params = {0}
hess = tape.hessian(dev)
expected_hess = np.array([-np.sin(x) / np.sqrt(2)])
assert np.allclose(hess, expected_hess, atol=tol, rtol=0)
|
the-stack_0_16213 | import os
import random
import argparse
import traceback
import numpy as np
import torch
from torch.optim import Adam
from torch.utils.data import DataLoader
from tokenizers import SentencePieceBPETokenizer
from transformers import GPT2Config, GPT2LMHeadModel
from tqdm import tqdm
from data import CustomDataset, dynamic_padding_collate_fn, load_dataset
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--epoch', type=int, default=-1)
parser.add_argument('--batch_size', type=int, default=-1)
parser.add_argument('--save', type=str, default='./checkpoint/')
parser.add_argument('--load', type=str, default='./checkpoint/kogpt2_subject_epoch.ckpt')
parser.add_argument('--train_dataset', type=str, default='./dataset/none_train.json', required=True)
parser.add_argument('--valid_dataset', type=str, default='./dataset/none_valid.json')
args = parser.parse_args()
if args.epoch == -1:
args.epoch = 10
if args.batch_size == -1:
args.batch_size = 1
seed = random.randint(0, 2147483647)
np.random.seed(seed)
torch.random.manual_seed(seed)
torch.cuda.manual_seed(seed)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"{device} is used for training")
subject = 'subject'
if args.load != './checkpoint/kogpt2_subject_epoch.ckpt':
checkpoint = torch.load(args.load, map_location=device)
subject = args.load.split('_')[1]
args.load = None
tokenizer = SentencePieceBPETokenizer.from_file(
vocab_filename="./tokenizer/tokenizers_vocab.json",
merges_filename="./tokenizer/tokenizers_merges.txt",
add_prefix_space=False
)
train_dataset = None
try:
pairs = load_dataset(args.train_dataset)
train_dataset = CustomDataset(pairs, tokenizer)
print("loading dataset succeeds")
except Exception as e:
print("loading dataset fails")
traceback.print_exc()
train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, collate_fn=dynamic_padding_collate_fn)
if args.valid_dataset == './dataset/none_valid.json':
valid_flag = False
else:
valid_flag = True
if valid_flag:
valid_dataset = None
try:
pairs = load_dataset(args.valid_dataset)
valid_dataset = CustomDataset(pairs, tokenizer)
print("loading valid dataset succeeds")
except Exception as e:
print("loading valid dataset fails")
traceback.print_exc()
valid_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, collate_fn=dynamic_padding_collate_fn)
model = GPT2LMHeadModel.from_pretrained(pretrained_model_name_or_path="taeminlee/kogpt2")
if not args.load:
model.load_state_dict(checkpoint['model_state_dict'])
model.to(device)
model.eval()
loss = 0
epoch = 1
learning_rate = 3e-5
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min')
# criterion = torch.nn.CrossEntropyLoss()
if not args.load:
epoch = checkpoint['epoch']
# learning_rate = checkpoint['learning_rate']
# optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
# scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
print("KoGPT2 Training Starts")
for epoch in range(epoch, args.epoch + 1):
best_epoch = 0
best_loss = 10000
average_train_loss = (0.0, 0.0)
model.train()
for step, batch in tqdm(enumerate(train_dataloader), desc=f"[TRAIN] Epoch: {epoch}", total=len(train_dataloader)):
optimizer.zero_grad()
input_ids, attention_mask, labels = tuple(value.to(device) for value in batch)
outputs = model.forward(input_ids, attention_mask=attention_mask, labels=labels, return_dict=True)
loss = outputs.loss.item()
outputs.loss.backward()
optimizer.step()
average_train_loss = (average_train_loss[0] * 0.99 + loss, average_train_loss[1] * 0.99 + 1)
if best_loss > average_train_loss[0] / average_train_loss[1]:
best_loss = average_train_loss[0] / average_train_loss[1]
best_epoch = epoch
if step % 10 == 0:
print("[Epoch {0}: {1}] Loss = {2:.5f} Average Train loss = {3:.5f}".format(epoch, step, loss, average_train_loss[0] / average_train_loss[1]))
# scheduler.step(average_loss[0] / average_loss[1])
print("[Epoch {0}] Best Epcoh {1} Best loss = {2:.5f}".format(epoch, best_epoch, best_loss))
if epoch % 2 == 0:
try:
if not os.path.exists(args.save):
os.mkdir(args.save)
torch.save({
'epoch': epoch,
# 'learning_rate': learning_rate,
'model_state_dict': model.state_dict(),
# 'optimizer_state_dict': optimizer.state_dict()
# 'scheduler_state_dict': scheduler.state_dict()
}, args.save + 'kogpt2_' + subject + '_' + str(epoch) + '.ckpt')
print("saving model succeeds")
except Exception as e:
traceback.print_exc()
print("saving model fails")
torch.save({
'epoch': epoch,
# 'learning_rate': learning_rate,
'model_state_dict': model.state_dict(),
# 'optimizer_state_dict': optimizer.state_dict()
# 'scheduler_state_dict': scheduler.state_dict()
}, args.save + 'kogpt2_' + subject + '_' + str(args.epoch + 1) + '.ckpt')
|
the-stack_0_16219 | #--------------------- Packages
import pandas as pd
import dash_table
#--------------------- Datatable
def datatable_asset(df):
"""Function to create a datatable which is used to return the tweets and sentiment."""
datatable = dash_table.DataTable(
id='typing_formatting_1',
data=df.to_dict('records'),
columns=[
{
'id': 'product_name',
'name': 'Auction',
'type': 'text'
},
{
'id': 'link',
'name': 'URL',
'type': 'text',
'presentation': 'markdown'
},
{
'id': 'user_feedback',
'name': 'Feedback',
'type': 'text'
},
{
'id': 'user_feedback_positive',
'name': 'Positive feedback (%)',
'type': 'text'
},
{
'id': 'price',
'name': 'Price ($)',
'type': 'numeric'
},
],
# Highlight Cells based on conditions - first, second, and third row
style_data_conditional=[
# Fix columnd widths
{'if': {'column_id': 'product_name'},
'width': '20%'},
{'if': {'column_id': 'link'},
'width': '20%'},
{'if': {'column_id': 'user_feedback'},
'width': '20%'},
{'if': {'column_id': 'user_feedback_positive'},
'width': '20%'},
{'if': {'column_id': 'price'},
'width': '20%'},
],
# Formatting the data/headers cells
style_cell={'backgroundColor': '#f7f7f7', 'font-family': 'helvetica',
'fontColor': '#000000', 'fontSize': 24,
'textAlign': 'center'
},
style_data={'border': '1px solid LightPink', 'font-size': 24,
'font-family': 'helvetica', 'whiteSpace': 'normal',
},
style_header={'border': '1px solid LightPink', 'font-size': 28,
'font-family': 'helvetica', 'textAlign': 'center',
'fontWeight': 'bold'
},
css=[{
'selector': '.dash-spreadsheet td div',
'rule': '''
line-height: 35px;
max-height: 35px; min-height: 35px; height: 35px;
display: block;
overflow-y: hidden;
''',
}, {'selector': 'table', 'rule': 'table-layout: fixed'}
],
tooltip_data=[{
column: {'value': str(value), 'type': 'markdown'}
for column, value in row.items()
}
for row in df.to_dict('rows')
],
tooltip_duration=None,
editable=True,
page_size=10,
filter_action="native",
sort_action="native",
sort_mode="multi",
column_selectable="single",
row_selectable="multi",
row_deletable=True,
selected_columns=[],
selected_rows=[],
page_action="native",
)
return datatable
|
the-stack_0_16221 | # -*- coding:utf-8 -*-
# ! ./usr/bin/env python
# __author__ = 'zzp'
import cv2
import json
import glob
import numpy as np
from os.path import join
from os import listdir
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--dir',type=str, default='./GOT_10k', help='your got_10k data dir')
args = parser.parse_args()
got10k_base_path = args.dir
sub_sets = sorted({'train_data', 'val_data'})
got10k = []
for sub_set in sub_sets:
sub_set_base_path = join(got10k_base_path, sub_set)
for video_set in sorted(listdir(sub_set_base_path)):
videos = sorted(listdir(join(sub_set_base_path, video_set)))
s = []
for vi, video in enumerate(videos):
print('subset: {}, video_set: {}, video id: {:04d} / {:04d}'.format(sub_set, video_set, vi, len(videos)))
v = dict()
v['base_path'] = join(sub_set, video_set, video)
v['frame'] = []
video_base_path = join(sub_set_base_path, video_set, video)
gts_path = join(video_base_path, 'groundtruth.txt')
# gts_file = open(gts_path, 'r')
# gts = gts_file.readlines()
gts = np.loadtxt(open(gts_path, "rb"), delimiter=',')
# get image size
im_path = join(video_base_path, '00000001.jpg')
im = cv2.imread(im_path)
size = im.shape # height, width
frame_sz = [size[1], size[0]] # width,height
# get all im name
jpgs = sorted(glob.glob(join(video_base_path, '*.jpg')))
f = dict()
for idx, img_path in enumerate(jpgs):
f['frame_sz'] = frame_sz
f['img_path'] = img_path.split('/')[-1]
gt = gts[idx]
bbox = [int(g) for g in gt] # (x,y,w,h)
f['bbox'] = bbox
v['frame'].append(f.copy())
s.append(v)
got10k.append(s)
print('save json (raw got10k info), please wait 1 min~')
json.dump(got10k, open('got10k.json', 'w'), indent=4, sort_keys=True)
print('got10k.json has been saved in ./')
|
the-stack_0_16222 | from functools import total_ordering
from typing import Dict, Union, Callable, Any
from cereal import log, car
import cereal.messaging as messaging
from common.realtime import DT_CTRL
from selfdrive.config import Conversions as CV
from selfdrive.locationd.calibrationd import MIN_SPEED_FILTER
AlertSize = log.ControlsState.AlertSize
AlertStatus = log.ControlsState.AlertStatus
VisualAlert = car.CarControl.HUDControl.VisualAlert
AudibleAlert = car.CarControl.HUDControl.AudibleAlert
EventName = car.CarEvent.EventName
# Alert priorities
class Priority:
LOWEST = 0
LOWER = 1
LOW = 2
MID = 3
HIGH = 4
HIGHEST = 5
# Event types
class ET:
ENABLE = 'enable'
PRE_ENABLE = 'preEnable'
NO_ENTRY = 'noEntry'
WARNING = 'warning'
USER_DISABLE = 'userDisable'
SOFT_DISABLE = 'softDisable'
IMMEDIATE_DISABLE = 'immediateDisable'
PERMANENT = 'permanent'
# get event name from enum
EVENT_NAME = {v: k for k, v in EventName.schema.enumerants.items()}
class Events:
def __init__(self):
self.events = []
self.static_events = []
self.events_prev = dict.fromkeys(EVENTS.keys(), 0)
@property
def names(self):
return self.events
def __len__(self):
return len(self.events)
def add(self, event_name, static=False):
if static:
self.static_events.append(event_name)
self.events.append(event_name)
def clear(self):
self.events_prev = {k: (v+1 if k in self.events else 0) for k, v in self.events_prev.items()}
self.events = self.static_events.copy()
def any(self, event_type):
for e in self.events:
if event_type in EVENTS.get(e, {}).keys():
return True
return False
def create_alerts(self, event_types, callback_args=None):
if callback_args is None:
callback_args = []
ret = []
for e in self.events:
types = EVENTS[e].keys()
for et in event_types:
if et in types:
alert = EVENTS[e][et]
if not isinstance(alert, Alert):
alert = alert(*callback_args)
if DT_CTRL * (self.events_prev[e] + 1) >= alert.creation_delay:
alert.alert_type = f"{EVENT_NAME[e]}/{et}"
ret.append(alert)
return ret
def add_from_msg(self, events):
for e in events:
self.events.append(e.name.raw)
def to_msg(self):
ret = []
for event_name in self.events:
event = car.CarEvent.new_message()
event.name = event_name
for event_type in EVENTS.get(event_name, {}).keys():
setattr(event, event_type , True)
ret.append(event)
return ret
@total_ordering
class Alert:
def __init__(self,
alert_text_1: str,
alert_text_2: str,
alert_status,
alert_size,
alert_priority,
visual_alert,
audible_alert,
duration_sound: float,
duration_hud_alert: float,
duration_text: float,
alert_rate: float = 0.,
creation_delay: float = 0.):
self.alert_type = ""
self.alert_text_1 = alert_text_1
self.alert_text_2 = alert_text_2
self.alert_status = alert_status
self.alert_size = alert_size
self.alert_priority = alert_priority
self.visual_alert = visual_alert
self.audible_alert = audible_alert
self.duration_sound = duration_sound
self.duration_hud_alert = duration_hud_alert
self.duration_text = duration_text
self.start_time = 0.
self.alert_rate = alert_rate
self.creation_delay = creation_delay
# typecheck that enums are valid on startup
tst = car.CarControl.new_message()
tst.hudControl.visualAlert = self.visual_alert
def __str__(self) -> str:
return self.alert_text_1 + "/" + self.alert_text_2 + " " + str(self.alert_priority) + " " + str(
self.visual_alert) + " " + str(self.audible_alert)
def __gt__(self, alert2) -> bool:
return self.alert_priority > alert2.alert_priority
def __eq__(self, alert2) -> bool:
return self.alert_priority == alert2.alert_priority
class NoEntryAlert(Alert):
def __init__(self, alert_text_2, audible_alert=AudibleAlert.chimeError,
visual_alert=VisualAlert.none, duration_hud_alert=2.):
super().__init__("openpilot Unavailable", alert_text_2, AlertStatus.normal,
AlertSize.mid, Priority.LOW, visual_alert,
audible_alert, .4, duration_hud_alert, 3.)
class SoftDisableAlert(Alert):
def __init__(self, alert_text_2):
super().__init__("TAKE CONTROL IMMEDIATELY", alert_text_2,
AlertStatus.critical, AlertSize.full,
Priority.MID, VisualAlert.steerRequired,
AudibleAlert.chimeWarningRepeat, .1, 2., 2.),
class ImmediateDisableAlert(Alert):
def __init__(self, alert_text_2, alert_text_1="TAKE CONTROL IMMEDIATELY"):
super().__init__(alert_text_1, alert_text_2,
AlertStatus.critical, AlertSize.full,
Priority.HIGHEST, VisualAlert.steerRequired,
AudibleAlert.chimeWarningRepeat, 2.2, 3., 4.),
class EngagementAlert(Alert):
def __init__(self, audible_alert=True):
super().__init__("", "",
AlertStatus.normal, AlertSize.none,
Priority.MID, VisualAlert.none,
audible_alert, 2.0, 0., 0.),
# ********** alert callback functions **********
def below_steer_speed_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool) -> Alert:
speed = int(round(CP.minSteerSpeed * (CV.MS_TO_KPH if metric else CV.MS_TO_MPH)))
unit = "km/h" if metric else "mph"
return Alert(
"TAKE CONTROL",
"Steer Unavailable Below %d %s" % (speed, unit),
AlertStatus.userPrompt, AlertSize.mid,
Priority.MID, VisualAlert.steerRequired, AudibleAlert.none, 0., 0.4, .3)
def calibration_incomplete_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool) -> Alert:
speed = int(MIN_SPEED_FILTER * (CV.MS_TO_KPH if metric else CV.MS_TO_MPH))
unit = "km/h" if metric else "mph"
return Alert(
"Calibration in Progress: %d%%" % sm['liveCalibration'].calPerc,
"Drive Above %d %s" % (speed, unit),
AlertStatus.normal, AlertSize.mid,
Priority.LOWEST, VisualAlert.none, AudibleAlert.none, 0., 0., .2)
def no_gps_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool) -> Alert:
gps_integrated = sm['health'].hwType in [log.HealthData.HwType.uno, log.HealthData.HwType.dos]
return Alert(
"Poor GPS reception",
"If sky is visible, contact support" if gps_integrated else "Check GPS antenna placement",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., .2, creation_delay=300.)
def wrong_car_mode_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool) -> Alert:
text = "Cruise Mode Disabled"
if CP.carName == "honda":
text = "Main Switch Off"
return NoEntryAlert(text, duration_hud_alert=0.)
def auto_lane_change_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool) -> Alert:
alc_timer = sm['pathPlan'].autoLaneChangeTimer
return Alert(
"Auto Lane Change starts in (%d)" % alc_timer,
"Monitor Other Vehicles",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.steerRequired, AudibleAlert.none, 0., .1, .1, alert_rate=0.75)
EVENTS: Dict[int, Dict[str, Union[Alert, Callable[[Any, messaging.SubMaster, bool], Alert]]]] = {
# ********** events with no alerts **********
# ********** events only containing alerts displayed in all states **********
EventName.debugAlert: {
ET.PERMANENT: Alert(
"DEBUG ALERT",
"",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .1, .1, .1),
},
EventName.startup: {
ET.PERMANENT: Alert(
"Be ready to take over at any time",
"Always keep hands on wheel and eyes on road",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., 15.),
},
EventName.startupMaster: {
ET.PERMANENT: Alert(
"WARNING: This branch is not tested",
"Always keep hands on wheel and eyes on road",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., 15.),
},
EventName.startupNoControl: {
ET.PERMANENT: Alert(
"Dashcam mode",
"Always keep hands on wheel and eyes on road",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., 15.),
},
EventName.startupNoCar: {
ET.PERMANENT: Alert(
"Dashcam mode for unsupported car",
"Always keep hands on wheel and eyes on road",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., 15.),
},
EventName.invalidGiraffeToyota: {
ET.PERMANENT: Alert(
"Unsupported Giraffe Configuration",
"Visit comma.ai/tg",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
},
EventName.whitePandaUnsupported: {
ET.PERMANENT: Alert(
"White Panda Is No Longer Supported",
"Upgrade to comma two or black panda",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
ET.NO_ENTRY: NoEntryAlert("White panda is no longer supported"),
},
EventName.invalidLkasSetting: {
ET.PERMANENT: Alert(
"Stock LKAS is turned on",
"Turn off stock LKAS to engage",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
},
EventName.communityFeatureDisallowed: {
# LOW priority to overcome Cruise Error
ET.PERMANENT: Alert(
"Community Feature Detected",
"Enable Community Features in Developer Settings",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
},
EventName.carUnrecognized: {
ET.PERMANENT: Alert(
"Dashcam Mode",
"Car Unrecognized",
AlertStatus.normal, AlertSize.mid,
Priority.LOWEST, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
},
EventName.stockAeb: {
ET.PERMANENT: Alert(
"BRAKE!",
"Stock AEB: Risk of Collision",
AlertStatus.critical, AlertSize.full,
Priority.HIGHEST, VisualAlert.fcw, AudibleAlert.none, 1., 2., 2.),
},
EventName.stockFcw: {
ET.PERMANENT: Alert(
"BRAKE!",
"Stock FCW: Risk of Collision",
AlertStatus.critical, AlertSize.full,
Priority.HIGHEST, VisualAlert.fcw, AudibleAlert.none, 1., 2., 2.),
},
EventName.fcw: {
ET.PERMANENT: Alert(
"BRAKE!",
"Risk of Collision",
AlertStatus.critical, AlertSize.full,
Priority.HIGHEST, VisualAlert.fcw, AudibleAlert.chimeWarningRepeat, 1., 2., 2.),
},
EventName.ldw: {
ET.PERMANENT: Alert(
"TAKE CONTROL",
"Lane Departure Detected",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.chimePrompt, 1., 2., 3.),
},
# ********** events only containing alerts that display while engaged **********
EventName.gasPressed: {
ET.PRE_ENABLE: Alert(
"openpilot will not brake while gas pressed",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOWEST, VisualAlert.none, AudibleAlert.none, .0, .0, .1, creation_delay=1.),
},
EventName.vehicleModelInvalid: {
ET.WARNING: Alert(
"Vehicle Parameter Identification Failed",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOWEST, VisualAlert.steerRequired, AudibleAlert.none, .0, .0, .1),
},
EventName.steerTempUnavailableMute: {
ET.WARNING: Alert(
"TAKE CONTROL",
"Steering Temporarily Unavailable",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .2, .2, .2),
},
EventName.preDriverDistracted: {
ET.WARNING: Alert(
"KEEP EYES ON ROAD: Driver Distracted",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.none, .0, .1, .1, alert_rate=0.75),
},
EventName.promptDriverDistracted: {
ET.WARNING: Alert(
"KEEP EYES ON ROAD",
"Driver Appears Distracted",
AlertStatus.userPrompt, AlertSize.mid,
Priority.MID, VisualAlert.steerRequired, AudibleAlert.chimeWarning2Repeat, .1, .1, .1),
},
EventName.driverDistracted: {
ET.WARNING: Alert(
"DISENGAGE IMMEDIATELY",
"Driver Was Distracted",
AlertStatus.critical, AlertSize.full,
Priority.HIGH, VisualAlert.steerRequired, AudibleAlert.chimeWarningRepeat, .1, .1, .1),
},
EventName.preDriverUnresponsive: {
ET.WARNING: Alert(
"TOUCH STEERING WHEEL: No Face Detected",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.none, .0, .1, .1, alert_rate=0.75),
},
EventName.promptDriverUnresponsive: {
ET.WARNING: Alert(
"TOUCH STEERING WHEEL",
"Driver Is Unresponsive",
AlertStatus.userPrompt, AlertSize.mid,
Priority.MID, VisualAlert.steerRequired, AudibleAlert.chimeWarning2Repeat, .1, .1, .1),
},
EventName.driverUnresponsive: {
ET.WARNING: Alert(
"DISENGAGE IMMEDIATELY",
"Driver Was Unresponsive",
AlertStatus.critical, AlertSize.full,
Priority.HIGH, VisualAlert.steerRequired, AudibleAlert.chimeWarningRepeat, .1, .1, .1),
},
EventName.driverMonitorLowAcc: {
ET.WARNING: Alert(
"CHECK DRIVER FACE VISIBILITY",
"Driver Monitor Model Output Uncertain",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.none, .4, 0., 1.5),
},
EventName.manualRestart: {
ET.WARNING: Alert(
"TAKE CONTROL",
"Resume Driving Manually",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
},
EventName.resumeRequired: {
ET.WARNING: Alert(
"STOPPED",
"Press Resume to Move",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
},
EventName.belowSteerSpeed: {
ET.WARNING: below_steer_speed_alert,
},
EventName.preLaneChangeLeft: {
ET.WARNING: Alert(
"Steer Left to Start Lane Change",
"Monitor Other Vehicles",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.none, .0, .1, .1, alert_rate=0.75),
},
EventName.preLaneChangeRight: {
ET.WARNING: Alert(
"Steer Right to Start Lane Change",
"Monitor Other Vehicles",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.none, .0, .1, .1, alert_rate=0.75),
},
EventName.laneChangeBlocked: {
ET.WARNING: Alert(
"Car Detected in Blindspot",
"Monitor Other Vehicles",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.none, .0, .1, .1),
},
EventName.laneChange: {
ET.WARNING: Alert(
"Changing Lane",
"Monitor Other Vehicles",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.none, .0, .1, .1),
},
EventName.steerSaturated: {
ET.WARNING: Alert(
"TAKE CONTROL",
"Turn Exceeds Steering Limit",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.chimePrompt, 1., 2., 3.),
},
EventName.turningIndicatorOn: {
ET.WARNING: Alert(
"TAKE CONTROL",
"Steer Unavailable while Turning",
AlertStatus.userPrompt, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .0, .0, .2),
},
EventName.lkasButtonOff: {
ET.WARNING: Alert(
"lkasButtonOff",
"LKAS button off",
"",
AlertStatus.userPrompt, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, 0., 0., .1),
},
EventName.autoLaneChange: {
ET.WARNING: auto_lane_change_alert,
},
# ********** events that affect controls state transitions **********
EventName.pcmEnable: {
ET.ENABLE: EngagementAlert(AudibleAlert.chimeEngage),
},
EventName.buttonEnable: {
ET.ENABLE: EngagementAlert(AudibleAlert.chimeEngage),
},
EventName.pcmDisable: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.chimeDisengage),
},
EventName.buttonCancel: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.chimeDisengage),
},
EventName.brakeHold: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.chimeDisengage),
ET.NO_ENTRY: NoEntryAlert("Brake Hold Active"),
},
EventName.parkBrake: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.chimeDisengage),
ET.NO_ENTRY: NoEntryAlert("Park Brake Engaged"),
},
EventName.pedalPressed: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.chimeDisengage),
ET.NO_ENTRY: NoEntryAlert("Pedal Pressed During Attempt",
visual_alert=VisualAlert.brakePressed),
},
EventName.wrongCarMode: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.chimeDisengage),
ET.NO_ENTRY: wrong_car_mode_alert,
},
EventName.wrongCruiseMode: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.chimeDisengage),
ET.NO_ENTRY: NoEntryAlert("Enable Adaptive Cruise"),
},
EventName.steerTempUnavailable: {
ET.WARNING: Alert(
"TAKE CONTROL",
"Steering Temporarily Unavailable",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.chimeWarning1, .4, 2., 3.),
ET.NO_ENTRY: NoEntryAlert("Steering Temporarily Unavailable",
duration_hud_alert=0.),
},
EventName.focusRecoverActive: {
ET.WARNING: Alert(
"TAKE CONTROL",
"Attempting Refocus: Camera Focus Invalid",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.chimeWarning1, .4, 2., 3., creation_delay=3.1),
},
EventName.outOfSpace: {
ET.NO_ENTRY: NoEntryAlert("Out of Storage Space",
duration_hud_alert=0.),
},
EventName.belowEngageSpeed: {
ET.NO_ENTRY: NoEntryAlert("Speed Too Low"),
},
EventName.neosUpdateRequired: {
ET.PERMANENT: Alert(
"NEOS Update Required",
"Please Wait for Update",
AlertStatus.normal, AlertSize.mid,
Priority.HIGHEST, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
ET.NO_ENTRY: NoEntryAlert("NEOS Update Required"),
},
EventName.sensorDataInvalid: {
ET.PERMANENT: Alert(
"No Data from Device Sensors",
"Reboot your Device",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., .2, creation_delay=1.),
ET.NO_ENTRY: NoEntryAlert("No Data from Device Sensors"),
},
EventName.noGps: {
ET.PERMANENT: no_gps_alert,
},
EventName.soundsUnavailable: {
ET.PERMANENT: Alert(
"Speaker not found",
"Reboot your Device",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
ET.NO_ENTRY: NoEntryAlert("Speaker not found"),
},
EventName.tooDistracted: {
ET.NO_ENTRY: NoEntryAlert("Distraction Level Too High"),
},
EventName.overheat: {
ET.SOFT_DISABLE: SoftDisableAlert("System Overheated"),
ET.NO_ENTRY: NoEntryAlert("System overheated"),
},
EventName.wrongGear: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.chimeDisengage),
ET.NO_ENTRY: NoEntryAlert("Gear not D"),
},
EventName.calibrationInvalid: {
ET.PERMANENT: Alert(
"Calibration Invalid",
"Reposition Device and Recalibrate",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
ET.SOFT_DISABLE: SoftDisableAlert("Calibration Invalid: Reposition Device & Recalibrate"),
ET.NO_ENTRY: NoEntryAlert("Calibration Invalid: Reposition Device & Recalibrate"),
},
EventName.calibrationIncomplete: {
ET.SOFT_DISABLE: SoftDisableAlert("Calibration in Progress"),
ET.PERMANENT: calibration_incomplete_alert,
ET.NO_ENTRY: NoEntryAlert("Calibration in Progress"),
},
EventName.doorOpen: {
ET.SOFT_DISABLE: SoftDisableAlert("Door Open"),
ET.NO_ENTRY: NoEntryAlert("Door open"),
},
EventName.seatbeltNotLatched: {
ET.SOFT_DISABLE: SoftDisableAlert("Seatbelt Unlatched"),
ET.NO_ENTRY: NoEntryAlert("Seatbelt unlatched"),
},
EventName.espDisabled: {
ET.SOFT_DISABLE: SoftDisableAlert("ESP Off"),
ET.NO_ENTRY: NoEntryAlert("ESP Off"),
},
EventName.lowBattery: {
ET.SOFT_DISABLE: SoftDisableAlert("Low Battery"),
ET.NO_ENTRY: NoEntryAlert("Low Battery"),
},
EventName.commIssue: {
ET.SOFT_DISABLE: SoftDisableAlert("Communication Issue between Processes"),
ET.NO_ENTRY: NoEntryAlert("Communication Issue between Processes",
audible_alert=AudibleAlert.chimeDisengage),
},
EventName.radarCommIssue: {
ET.SOFT_DISABLE: SoftDisableAlert("Radar Communication Issue"),
ET.NO_ENTRY: NoEntryAlert("Radar Communication Issue",
audible_alert=AudibleAlert.chimeDisengage),
},
EventName.radarCanError: {
ET.SOFT_DISABLE: SoftDisableAlert("Radar Error: Restart the Car"),
ET.NO_ENTRY: NoEntryAlert("Radar Error: Restart the Car"),
},
EventName.radarFault: {
ET.SOFT_DISABLE: SoftDisableAlert("Radar Error: Restart the Car"),
ET.NO_ENTRY : NoEntryAlert("Radar Error: Restart the Car"),
},
EventName.modeldLagging: {
ET.SOFT_DISABLE: SoftDisableAlert("Driving model lagging"),
ET.NO_ENTRY : NoEntryAlert("Driving model lagging"),
},
EventName.posenetInvalid: {
ET.SOFT_DISABLE: SoftDisableAlert("Vision Model Output Uncertain"),
ET.NO_ENTRY: NoEntryAlert("Vision Model Output Uncertain"),
},
EventName.deviceFalling: {
ET.SOFT_DISABLE: SoftDisableAlert("Device Fell Off Mount"),
ET.NO_ENTRY: NoEntryAlert("Device Fell Off Mount"),
},
EventName.lowMemory: {
ET.SOFT_DISABLE: SoftDisableAlert("Low Memory: Reboot Your Device"),
ET.PERMANENT: Alert(
"RAM Critically Low",
"Reboot your Device",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
ET.NO_ENTRY : NoEntryAlert("Low Memory: Reboot Your Device",
audible_alert=AudibleAlert.chimeDisengage),
},
EventName.controlsFailed: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Controls Failed"),
ET.NO_ENTRY: NoEntryAlert("Controls Failed"),
},
EventName.controlsMismatch: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Controls Mismatch"),
},
EventName.canError: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("CAN Error: Check Connections"),
ET.PERMANENT: Alert(
"CAN Error: Check Connections",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, 0., 0., .2, creation_delay=1.),
ET.NO_ENTRY: NoEntryAlert("CAN Error: Check Connections"),
},
EventName.steerUnavailable: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("LKAS Fault: Restart the Car"),
ET.PERMANENT: Alert(
"LKAS Fault: Restart the car to engage",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
ET.NO_ENTRY: NoEntryAlert("LKAS Fault: Restart the Car"),
},
EventName.brakeUnavailable: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Cruise Fault: Restart the Car"),
ET.PERMANENT: Alert(
"Cruise Fault: Restart the car to engage",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
ET.NO_ENTRY: NoEntryAlert("Cruise Fault: Restart the Car"),
},
EventName.gasUnavailable: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Gas Fault: Restart the Car"),
ET.NO_ENTRY: NoEntryAlert("Gas Error: Restart the Car"),
},
EventName.reverseGear: {
ET.PERMANENT: Alert(
"Reverse\nGear",
"",
AlertStatus.normal, AlertSize.full,
Priority.LOWEST, VisualAlert.none, AudibleAlert.none, 0., 0., .2, creation_delay=0.5),
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Reverse Gear"),
ET.NO_ENTRY: NoEntryAlert("Reverse Gear"),
},
EventName.cruiseDisabled: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Cruise Is Off"),
},
EventName.plannerError: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Planner Solution Error"),
ET.NO_ENTRY: NoEntryAlert("Planner Solution Error"),
},
EventName.relayMalfunction: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Harness Malfunction"),
ET.PERMANENT: Alert(
"Harness Malfunction",
"Please Check Hardware",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
ET.NO_ENTRY: NoEntryAlert("Harness Malfunction"),
},
EventName.noTarget: {
ET.IMMEDIATE_DISABLE: Alert(
"openpilot Canceled",
"No close lead car",
AlertStatus.normal, AlertSize.mid,
Priority.HIGH, VisualAlert.none, AudibleAlert.chimeDisengage, .4, 2., 3.),
ET.NO_ENTRY : NoEntryAlert("No Close Lead Car"),
},
EventName.speedTooLow: {
ET.IMMEDIATE_DISABLE: Alert(
"openpilot Canceled",
"Speed too low",
AlertStatus.normal, AlertSize.mid,
Priority.HIGH, VisualAlert.none, AudibleAlert.chimeDisengage, .4, 2., 3.),
},
EventName.speedTooHigh: {
ET.WARNING: Alert(
"Speed Too High",
"Slow down to resume operation",
AlertStatus.normal, AlertSize.mid,
Priority.HIGH, VisualAlert.steerRequired, AudibleAlert.chimeWarning2Repeat, 2.2, 3., 4.),
ET.NO_ENTRY: Alert(
"Speed Too High",
"Slow down to engage",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.chimeError, .4, 2., 3.),
},
EventName.internetConnectivityNeeded: {
ET.PERMANENT: Alert(
"Please connect to Internet",
"An Update Check Is Required to Engage",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
ET.NO_ENTRY: NoEntryAlert("Please Connect to Internet",
audible_alert=AudibleAlert.chimeDisengage),
},
EventName.lowSpeedLockout: {
ET.PERMANENT: Alert(
"Cruise Fault: Restart the car to engage",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
ET.NO_ENTRY: NoEntryAlert("Cruise Fault: Restart the Car"),
},
}
|
the-stack_0_16223 | """This module defines custom management commands for the app admin."""
import asyncio
from asgiref.sync import sync_to_async
from typing import Dict, Optional, Union, List, Tuple
from decimal import Decimal
from django.core.management.base import BaseCommand
from django.db.models import Q
from stellar_sdk.exceptions import NotFoundError
from stellar_sdk.transaction import Transaction as HorizonTransaction
from stellar_sdk.transaction_envelope import TransactionEnvelope
from stellar_sdk.utils import from_xdr_amount
from stellar_sdk.xdr import (
PaymentResult,
PathPaymentStrictSendResult,
PathPaymentStrictReceiveResult,
OperationResult,
TransactionResult,
)
from stellar_sdk.operation import (
Operation,
Payment,
PathPaymentStrictReceive,
PathPaymentStrictSend,
)
from stellar_sdk.server_async import ServerAsync
from stellar_sdk.client.aiohttp_client import AiohttpClient
from polaris import settings
from polaris.models import Asset, Transaction
from polaris.utils import getLogger, maybe_make_callback_async
from polaris.integrations import registered_custody_integration as rci
logger = getLogger(__name__)
PaymentOpResult = Union[
PaymentResult, PathPaymentStrictSendResult, PathPaymentStrictReceiveResult
]
PaymentOp = Union[Payment, PathPaymentStrictReceive, PathPaymentStrictSend]
class Command(BaseCommand):
"""
Streams transactions to the :attr:`~polaris.models.Asset.distribution_account`
of each :class:`~polaris.models.Asset` in the DB.
Note that this command assumes Stellar payments are made to one distribution
account address per asset. Some third party custody service providers may not
use this scheme, in which case the custody integration class should provide
an alternative command for detecting incoming Stellar payments.
For every response from the server, attempts to find a matching transaction in
the database and updates the transaction's status to ``pending_anchor`` or
``pending_receiver`` depending on the protocol.
Then, the :mod:`~polaris.management.commands.execute_outgoing_transactions` process
will query for transactions in those statuses and provide the anchor an integration
function for executing the payment or withdrawal.
**Optional arguments:**
-h, --help show this help message and exit
"""
def handle(self, *_args, **_options): # pragma: no cover
try:
asyncio.run(self.watch_transactions())
except Exception as e:
# This is very likely a bug, so re-raise the error and crash.
# Heroku will restart the process unless it is repeatedly crashing,
# in which case restarting isn't of much use.
logger.exception("watch_transactions() threw an unexpected exception")
raise e
async def watch_transactions(self): # pragma: no cover
assets = await sync_to_async(list)(Asset.objects.all())
await asyncio.gather(
*[
self._for_account(rci.get_distribution_account(asset=asset))
for asset in assets
]
)
async def _for_account(self, account: str):
"""
Stream transactions for the server Stellar address.
"""
async with ServerAsync(settings.HORIZON_URI, client=AiohttpClient()) as server:
try:
# Ensure the distribution account actually exists
await server.load_account(account)
except NotFoundError:
# This exception will crash the process, but the anchor needs
# to provide valid accounts to watch.
raise RuntimeError(
"Stellar distribution account does not exist in horizon"
)
last_completed_transaction = await sync_to_async(
Transaction.objects.filter(
Q(kind=Transaction.KIND.withdrawal) | Q(kind=Transaction.KIND.send),
receiving_anchor_account=account,
status=Transaction.STATUS.completed,
)
.order_by("-completed_at")
.first
)()
cursor = "0"
if last_completed_transaction and last_completed_transaction.paging_token:
cursor = last_completed_transaction.paging_token
logger.info(
f"starting transaction stream for {account} with cursor {cursor}"
)
endpoint = server.transactions().for_account(account).cursor(cursor)
async for response in endpoint.stream():
await self.process_response(response, account)
@classmethod
async def process_response(cls, response, account):
# We should not match valid pending transactions with ones that were
# unsuccessful on the stellar network. If they were unsuccessful, the
# client is also aware of the failure and will likely attempt to
# resubmit it, in which case we should match the resubmitted transaction
if not response.get("successful"):
return
try:
_ = response["id"]
envelope_xdr = response["envelope_xdr"]
memo = response["memo"]
result_xdr = response["result_xdr"]
except KeyError:
return
# Query filters for SEP6 and 24
withdraw_filters = Q(
status=Transaction.STATUS.pending_user_transfer_start,
kind__in=[
Transaction.KIND.withdrawal,
getattr(Transaction.KIND, "withdrawal-exchange"),
],
)
# Query filters for SEP31
send_filters = Q(
status=Transaction.STATUS.pending_sender,
kind=Transaction.KIND.send,
)
transactions = await sync_to_async(list)(
Transaction.objects.filter(
withdraw_filters | send_filters,
memo=memo,
receiving_anchor_account=account,
)
.select_related("asset")
.all()
)
if not transactions:
logger.info(f"No match found for stellar transaction {response['id']}")
return
elif len(transactions) == 1:
transaction = transactions[0]
else:
# in the prior implementation of watch_transactions, the first transaction
# to have the same memo is matched, so we'll do the same in the refactored
# version.
logger.error(f"multiple Transaction objects returned for memo: {memo}")
transaction = transactions[0]
logger.info(
f"Matched transaction object {transaction.id} for stellar transaction {response['id']}"
)
op_results = TransactionResult.from_xdr(result_xdr).result.results
horizon_tx = TransactionEnvelope.from_xdr(
envelope_xdr,
network_passphrase=settings.STELLAR_NETWORK_PASSPHRASE,
).transaction
payment_data = await cls._find_matching_payment_data(
response, horizon_tx, op_results, transaction
)
if not payment_data:
logger.warning(f"Transaction matching memo {memo} has no payment operation")
return
# Transaction.amount_in is overwritten with the actual amount sent in the stellar
# transaction. This allows anchors to validate the actual amount sent in
# execute_outgoing_transactions() and handle invalid amounts appropriately.
transaction.amount_in = round(
Decimal(payment_data["amount"]),
transaction.asset.significant_decimals,
)
# The stellar transaction has been matched with an existing record in the DB.
# Now the anchor needs to initiate the off-chain transfer of the asset.
if transaction.protocol == Transaction.PROTOCOL.sep31:
# SEP-31 uses 'pending_receiver' status
transaction.status = Transaction.STATUS.pending_receiver
await sync_to_async(transaction.save)()
else:
# SEP-6 and 24 uses 'pending_anchor' status
transaction.status = Transaction.STATUS.pending_anchor
await sync_to_async(transaction.save)()
await maybe_make_callback_async(transaction)
return None
@classmethod
async def _find_matching_payment_data(
cls,
response: Dict,
horizon_tx: HorizonTransaction,
result_ops: List[OperationResult],
transaction: Transaction,
) -> Optional[Dict]:
matching_payment_data = None
ops = horizon_tx.operations
for idx, op_result in enumerate(result_ops):
op, op_result = cls._cast_operation_and_result(ops[idx], op_result)
if not op_result: # not a payment op
continue
maybe_payment_data = cls._check_for_payment_match(
op, op_result, transaction.asset, transaction
)
if maybe_payment_data:
if ops[idx].source:
source = ops[idx].source.account_muxed or ops[idx].source.account_id
else:
source = (
horizon_tx.source.account_muxed or horizon_tx.source.account_id
)
await cls._update_transaction_info(
transaction, response["id"], response["paging_token"], source
)
matching_payment_data = maybe_payment_data
break
return matching_payment_data
@classmethod
async def _update_transaction_info(
cls, transaction: Transaction, stellar_txid: str, paging_token: str, source: str
):
transaction.stellar_transaction_id = stellar_txid
transaction.from_address = source
transaction.paging_token = paging_token
await sync_to_async(transaction.save)()
@classmethod
def _check_for_payment_match(
cls,
operation: PaymentOp,
op_result: PaymentOpResult,
want_asset: Asset,
transaction: Transaction,
) -> Optional[Dict]:
payment_data = cls._get_payment_values(operation, op_result)
if (
payment_data["destination"] == transaction.receiving_anchor_account
and payment_data["code"] == want_asset.code
and payment_data["issuer"] == want_asset.issuer
):
return payment_data
else:
return None
@classmethod
def _cast_operation_and_result(
cls, operation: Operation, op_result: OperationResult
) -> Tuple[Optional[PaymentOp], Optional[PaymentOpResult]]:
op_xdr_obj = operation.to_xdr_object()
if isinstance(operation, Payment):
return (
Payment.from_xdr_object(op_xdr_obj),
op_result.tr.payment_result,
)
elif isinstance(operation, PathPaymentStrictSend):
return (
PathPaymentStrictSend.from_xdr_object(op_xdr_obj),
op_result.tr.path_payment_strict_send_result,
)
elif isinstance(operation, PathPaymentStrictReceive):
return (
PathPaymentStrictReceive.from_xdr_object(op_xdr_obj),
op_result.tr.path_payment_strict_receive_result,
)
else:
return None, None
@classmethod
def _get_payment_values(
cls, operation: PaymentOp, op_result: PaymentOpResult
) -> Dict:
values = {
"destination": operation.destination.account_id,
"amount": None,
"code": None,
"issuer": None,
}
if isinstance(operation, Payment):
values["amount"] = str(operation.amount)
values["code"] = operation.asset.code
values["issuer"] = operation.asset.issuer
elif isinstance(operation, PathPaymentStrictSend):
# since the dest amount is not specified in a strict-send op,
# we need to get the dest amount from the operation's result
#
# this method of fetching amounts gives the "raw" amount, so
# we need to divide by Operation._ONE: 10000000
# (Stellar uses 7 decimals places of precision)
values["amount"] = from_xdr_amount(op_result.success.last.amount.int64)
values["code"] = operation.dest_asset.code
values["issuer"] = operation.dest_asset.issuer
elif isinstance(operation, PathPaymentStrictReceive):
values["amount"] = str(operation.dest_amount)
values["code"] = operation.dest_asset.code
values["issuer"] = operation.dest_asset.issuer
else:
raise ValueError("Unexpected operation, expected payment or path payment")
return values
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.