max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
emerge.py | kargaranamir/emerge | 142 | 12709362 | <filename>emerge.py<gh_stars>100-1000
"""
Simple wrapper to start emerge as a standalone tool.
"""
# Authors: <NAME> <<EMAIL>>
# License: MIT
from emerge.appear import Emerge
def run():
emerge = Emerge()
emerge.start()
if __name__ == "__main__":
run()
|
tests/transformers/convert_doc_test.py | elifesciences/sciencebeam | 272 | 12709385 | <reponame>elifesciences/sciencebeam<filename>tests/transformers/convert_doc_test.py
import logging
from configparser import ConfigParser
from pathlib import Path
from unittest.mock import patch, MagicMock
import pytest
from sciencebeam.utils.mime_type_constants import MimeTypes
from sciencebeam.transformers import convert_doc as convert_doc_module
from sciencebeam.transformers.convert_doc import (
DEFAULT_DOC_CONVERT_PROCESS_TIMEOUT,
DEFAULT_DOC_CONVERT_MAX_UPTIME,
DOC_CONVERT_SECTION_NAME,
AppConfigOptions,
EnvironmentVariables,
_get_default_config,
_convert_doc_to,
doc_to_pdf,
doc_to_docx
)
LOGGER = logging.getLogger(__name__)
DOC_CONTENT_1 = b'doc content 1'
PDF_CONTENT_1 = b'pdf content 1'
DOCX_CONTENT_1 = b'docx content 1'
@pytest.fixture(name='get_doc_converter_mock', autouse=True)
def _get_doc_converter_mock():
with patch.object(convert_doc_module, '_get_doc_converter') as m:
yield m
@pytest.fixture(name='pdf_path')
def _pdf_path(temp_dir: Path):
return temp_dir.joinpath('temp.pdf')
@pytest.fixture(name='doc_converter_mock', autouse=True)
def _doc_converter_mock(get_doc_converter_mock: MagicMock, pdf_path: Path):
doc_converter_mock = get_doc_converter_mock.return_value
doc_converter_mock.convert.return_value = str(pdf_path)
return doc_converter_mock
@pytest.fixture(name='TemporaryDirectory_mock', autouse=True)
def _mock_temp_directory(tmpdir):
with patch.object(convert_doc_module, 'TemporaryDirectory') as m:
m.return_value.__enter__.return_value = str(tmpdir)
yield m
@pytest.fixture(name='get_app_config_mock')
def _get_app_config_mock():
with patch.object(convert_doc_module, 'get_app_config') as m:
m.return_value = ConfigParser()
yield m
@pytest.fixture(name='app_config_mock')
def _app_config_mock(get_app_config_mock: MagicMock) -> ConfigParser:
return get_app_config_mock.return_value
class TestGetDefaultConfig:
@patch('os.environ', {})
def test_should_load_config_from_app_config(self, app_config_mock: ConfigParser):
app_config_mock.read_dict({
DOC_CONVERT_SECTION_NAME: {
AppConfigOptions.PROCESS_TIMEOUT: '123',
AppConfigOptions.MAX_UPTIME: '101',
AppConfigOptions.STOP_LISTENER_ON_ERROR: 'true',
AppConfigOptions.ENABLE_DEBUG: 'true'
}
})
config = _get_default_config()
LOGGER.debug('config: %s', config)
assert config.get('process_timeout') == 123
assert config.get('max_uptime') == 101
assert config.get('stop_listener_on_error') is True
assert config.get('enable_debug') is True
@patch('os.environ', {
EnvironmentVariables.DOC_CONVERT_PROCESS_TIMEOUT: '123',
EnvironmentVariables.DOC_CONVERT_MAX_UPTIME: '101',
EnvironmentVariables.DOC_CONVERT_ENABLE_DEBUG: 'true'
})
def test_should_load_config_from_env(self, app_config_mock: ConfigParser):
app_config_mock.read_dict({
DOC_CONVERT_SECTION_NAME: {
AppConfigOptions.PROCESS_TIMEOUT: '1',
AppConfigOptions.MAX_UPTIME: '1',
AppConfigOptions.STOP_LISTENER_ON_ERROR: 'true',
AppConfigOptions.ENABLE_DEBUG: 'false'
}
})
config = _get_default_config()
LOGGER.debug('config: %s', config)
assert config.get('process_timeout') == 123
assert config.get('max_uptime') == 101
assert config.get('enable_debug') is True
@patch('os.environ', {})
def test_should_use_defaults(self, app_config_mock: ConfigParser):
app_config_mock.read_dict({
DOC_CONVERT_SECTION_NAME: {
AppConfigOptions.STOP_LISTENER_ON_ERROR: 'true',
AppConfigOptions.ENABLE_DEBUG: 'true'
}
})
config = _get_default_config()
LOGGER.debug('config: %s', config)
assert config.get('process_timeout') == DEFAULT_DOC_CONVERT_PROCESS_TIMEOUT
assert config.get('max_uptime') == DEFAULT_DOC_CONVERT_MAX_UPTIME
class TestConvertDocTo:
def test_should_return_pdf(self, pdf_path: Path):
pdf_path.write_bytes(PDF_CONTENT_1)
assert _convert_doc_to(
DOC_CONTENT_1, MimeTypes.DOC, 'pdf'
) == PDF_CONTENT_1
def test_should_call_convert_with_doc(
self, temp_dir: Path, pdf_path: Path, doc_converter_mock: MagicMock):
pdf_path.write_bytes(PDF_CONTENT_1)
_convert_doc_to(DOC_CONTENT_1, MimeTypes.DOC, 'pdf')
doc_converter_mock.convert.assert_called_with(
str(temp_dir.joinpath('temp.doc')),
output_type='pdf'
)
def test_should_call_check_output_with_docx(
self, temp_dir: Path, pdf_path: Path, doc_converter_mock: MagicMock):
pdf_path.write_bytes(PDF_CONTENT_1)
_convert_doc_to(DOC_CONTENT_1, MimeTypes.DOCX, 'pdf')
doc_converter_mock.convert.assert_called_with(
str(temp_dir.joinpath('temp.docx')),
output_type='pdf'
)
def test_should_call_check_output_with_dotx(
self, temp_dir: Path, pdf_path: Path, doc_converter_mock: MagicMock):
pdf_path.write_bytes(PDF_CONTENT_1)
_convert_doc_to(DOC_CONTENT_1, MimeTypes.DOTX, 'pdf')
doc_converter_mock.convert.assert_called_with(
str(temp_dir.joinpath('temp.dotx')),
output_type='pdf'
)
def test_should_call_check_output_with_rtf(
self, temp_dir: Path, pdf_path: Path, doc_converter_mock: MagicMock):
pdf_path.write_bytes(PDF_CONTENT_1)
_convert_doc_to(DOC_CONTENT_1, MimeTypes.RTF, 'pdf')
doc_converter_mock.convert.assert_called_with(
str(temp_dir.joinpath('temp.rtf')),
output_type='pdf'
)
class TestDocToPdf:
def test_should_return_pdf(self, pdf_path: Path):
pdf_path.write_bytes(PDF_CONTENT_1)
assert doc_to_pdf(DOC_CONTENT_1, MimeTypes.DOC) == PDF_CONTENT_1
class TestDocToDocx:
def test_should_return_docx(self, temp_dir: Path, doc_converter_mock: MagicMock):
docx_path = temp_dir.joinpath('temp.docx')
doc_converter_mock.convert.return_value = str(docx_path)
docx_path.write_bytes(DOCX_CONTENT_1)
assert doc_to_docx(DOC_CONTENT_1, MimeTypes.DOC) == DOCX_CONTENT_1
|
guild/tests/samples/projects/autocomplete/echo.py | dwolfschlaeger/guildai | 694 | 12709393 | <reponame>dwolfschlaeger/guildai<filename>guild/tests/samples/projects/autocomplete/echo.py
msg = None
if msg:
print(msg)
|
dojo/unittests/tools/test_eslint_parser.py | axelpavageau/django-DefectDojo | 1,772 | 12709407 | <gh_stars>1000+
from django.test import TestCase
from dojo.tools.eslint.parser import ESLintParser
from dojo.models import Test
class TestESLintParser(TestCase):
def test_parse_file_has_two_findings(self):
testfile = open("dojo/unittests/scans/eslint/scan.json")
parser = ESLintParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(2, len(findings))
def test_parse_empty_file(self):
testfile = open("dojo/unittests/scans/eslint/empty.json")
parser = ESLintParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(0, len(findings))
def test_parse_file_with_no_finding(self):
testfile = open("dojo/unittests/scans/eslint/no_finding.json")
parser = ESLintParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(0, len(findings))
|
androguard/core/bytecodes/axml/types.py | amimo/androguard | 4,084 | 12709418 | # Type definiton for (type, data) tuples representing a value
# See http://androidxref.com/9.0.0_r3/xref/frameworks/base/libs/androidfw/include/androidfw/ResourceTypes.h#262
# The 'data' is either 0 or 1, specifying this resource is either
# undefined or empty, respectively.
TYPE_NULL = 0x00
# The 'data' holds a ResTable_ref, a reference to another resource
# table entry.
TYPE_REFERENCE = 0x01
# The 'data' holds an attribute resource identifier.
TYPE_ATTRIBUTE = 0x02
# The 'data' holds an index into the containing resource table's
# global value string pool.
TYPE_STRING = 0x03
# The 'data' holds a single-precision floating point number.
TYPE_FLOAT = 0x04
# The 'data' holds a complex number encoding a dimension value
# such as "100in".
TYPE_DIMENSION = 0x05
# The 'data' holds a complex number encoding a fraction of a
# container.
TYPE_FRACTION = 0x06
# The 'data' holds a dynamic ResTable_ref, which needs to be
# resolved before it can be used like a TYPE_REFERENCE.
TYPE_DYNAMIC_REFERENCE = 0x07
# The 'data' holds an attribute resource identifier, which needs to be resolved
# before it can be used like a TYPE_ATTRIBUTE.
TYPE_DYNAMIC_ATTRIBUTE = 0x08
# Beginning of integer flavors...
TYPE_FIRST_INT = 0x10
# The 'data' is a raw integer value of the form n..n.
TYPE_INT_DEC = 0x10
# The 'data' is a raw integer value of the form 0xn..n.
TYPE_INT_HEX = 0x11
# The 'data' is either 0 or 1, for input "false" or "true" respectively.
TYPE_INT_BOOLEAN = 0x12
# Beginning of color integer flavors...
TYPE_FIRST_COLOR_INT = 0x1c
# The 'data' is a raw integer value of the form #aarrggbb.
TYPE_INT_COLOR_ARGB8 = 0x1c
# The 'data' is a raw integer value of the form #rrggbb.
TYPE_INT_COLOR_RGB8 = 0x1d
# The 'data' is a raw integer value of the form #argb.
TYPE_INT_COLOR_ARGB4 = 0x1e
# The 'data' is a raw integer value of the form #rgb.
TYPE_INT_COLOR_RGB4 = 0x1f
# ...end of integer flavors.
TYPE_LAST_COLOR_INT = 0x1f
# ...end of integer flavors.
TYPE_LAST_INT = 0x1f
|
simba/dpk_script/annotator.py | justinshenk/simba | 172 | 12709423 | <filename>simba/dpk_script/annotator.py<gh_stars>100-1000
import warnings
warnings.filterwarnings('ignore',category=FutureWarning)
from deepposekit import Annotator
import cv2
import numpy as np
import warnings
from configparser import ConfigParser
import os
warnings.filterwarnings('ignore')
def dpkAnnotator(dpkini,annotationfile):
config = ConfigParser()
configFile = str(dpkini)
config.read(configFile)
project_path = config.get('general DPK settings', 'project_folder')
annotationsPath = annotationfile
bodyPartsListPath = os.path.join(project_path, 'skeleton.csv')
app = Annotator(datapath=annotationsPath, dataset='images', skeleton=bodyPartsListPath, shuffle_colors=False, text_scale=1)
im = np.zeros((300, 600, 3))
cv2.putText(im, 'Instructions', (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255, 255, 255), 2)
cv2.putText(im, '+- = rescale image by +/- 10%', (10, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0, 255, 0), 2)
cv2.putText(im, 'left mouse button = move active keypoint to cursor location', (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0, 255, 0), 2)
cv2.putText(im, 'WASD = move active keypoint 1px or 10px', (10, 80), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0, 255, 0), 2)
cv2.putText(im, 'JL = next or previous image', (10, 100), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0, 255, 0), 2)
cv2.putText(im, '<> = jump 10 images forward or backward', (10, 120), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0, 255, 0), 2)
cv2.putText(im, 'I,K or tab, shift+tab = switch active keypoint', (10, 140), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0, 255, 0), 2)
cv2.putText(im, 'R = mark image as unannotated ("reset")', (10, 160), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0, 255, 0), 2)
cv2.putText(im, 'F = mark image as annotated ("finished")', (10, 180), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0, 255, 0), 2)
cv2.putText(im, 'esc or Q = quit', (10, 200), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0, 255, 0), 2)
cv2.putText(im, 'Tap tab to begin', (10, 240), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
cv2.imshow('Instructions', im)
k = cv2.waitKey(0)
while (1):
cv2.imshow('Instructions', im)
k = cv2.waitKey(0)
app.run()
if k == 27: # Esc key to stop
print('Annotatations saved in: ' + str(annotationfile))
break |
examples/devices/xor-multidevice.py | ruyimarone/dynet | 3,307 | 12709469 | <gh_stars>1000+
# Usage:
# python xor-multidevice.py --dynet-devices CPU,GPU:0,GPU:1
# or python xor-multidevice.py --dynet-gpus 2
import sys
import dynet as dy
#xsent = True
xsent = False
HIDDEN_SIZE = 8
ITERATIONS = 2000
m = dy.Model()
trainer = dy.SimpleSGDTrainer(m)
pW1 = m.add_parameters((HIDDEN_SIZE, 2), device="GPU:1")
pb1 = m.add_parameters(HIDDEN_SIZE, device="GPU:1")
pW2 = m.add_parameters((HIDDEN_SIZE, HIDDEN_SIZE), device="GPU:0")
pb2 = m.add_parameters(HIDDEN_SIZE, device="GPU:0")
pV = m.add_parameters((1, HIDDEN_SIZE), device="CPU")
pa = m.add_parameters(1, device="CPU")
if len(sys.argv) == 2:
m.populate_from_textfile(sys.argv[1])
dy.renew_cg()
W1, b1, W2, b2, V, a = dy.parameter(pW1, pb1, pW2, pb2, pV, pa)
x = dy.vecInput(2, "GPU:1")
y = dy.scalarInput(0, "CPU")
h1 = dy.tanh((W1*x) + b1)
h1_gpu0 = dy.to_device(h1, "GPU:0")
h2 = dy.tanh((W2*h1_gpu0) + b2)
h2_cpu = dy.to_device(h2, "CPU")
if xsent:
y_pred = dy.logistic((V*h2_cpu) + a)
loss = dy.binary_log_loss(y_pred, y)
T = 1
F = 0
else:
y_pred = (V*h2_cpu) + a
loss = dy.squared_distance(y_pred, y)
T = 1
F = -1
for iter in range(ITERATIONS):
mloss = 0.0
for mi in range(4):
x1 = mi % 2
x2 = (mi // 2) % 2
x.set([T if x1 else F, T if x2 else F])
y.set(T if x1 != x2 else F)
mloss += loss.scalar_value()
loss.backward()
trainer.update()
mloss /= 4.
print("loss: %0.9f" % mloss)
x.set([F,T])
z = -(-y_pred)
print(z.scalar_value())
m.save("xor.pymodel")
dy.renew_cg()
W1, b1, W2, b2, V, a = dy.parameter(pW1, pb1, pW2, pb2, pV, pa)
x = dy.vecInput(2, "GPU:1")
y = dy.scalarInput(0, "CPU")
h1 = dy.tanh((W1*x) + b1)
h1_gpu0 = dy.to_device(h1, "GPU:0")
h2 = dy.tanh((W2*h1_gpu0) + b2)
h2_cpu = dy.to_device(h2, "CPU")
if xsent:
y_pred = dy.logistic((V*h2_cpu) + a)
else:
y_pred = (V*h2_cpu) + a
x.set([T,F])
print("TF",y_pred.scalar_value())
x.set([F,F])
print("FF",y_pred.scalar_value())
x.set([T,T])
print("TT",y_pred.scalar_value())
x.set([F,T])
print("FT",y_pred.scalar_value())
|
databuilder/databuilder/models/query/base.py | defendercrypt/amundsen | 2,072 | 12709494 | # Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
from typing import Iterator
from databuilder.models.graph_serializable import GraphSerializable
class QueryBase(GraphSerializable):
@staticmethod
def _normalize(sql: str) -> str:
"""
Normalizes a SQL query or SQL expression.
No checks are made to ensure that the input is valid SQL.
This is not a full normalization. The following operations are preformed:
- Any run of whitespace characters outside of a quoted region is replaces by a single ' ' character.
- Characters outside of quoted regions are made lower case.
- If present, a trailing ';' is removed from the query.
Note:
Making characters outside quoted regions lower case does not in general result in an equivalent SQL statement.
For example, with MySQL the case sensitivity of table names is operating system dependant.
In practice, modern systems rarely rely on case sensitivity, and since making the non-quoted regions of the
query lowercase is very helpful in identifying queries, we go ahead and do so.
Also, this method fails to identify expressions such as `1 + 2` and `1+2`.
There are likely too many special cases in this area to make much progress without doing a proper parse.
"""
text = sql.strip()
it = iter(text)
sb = []
for c in it:
if c.isspace():
c = QueryBase._process_whitespace(it)
sb.append(' ')
sb.append(c.lower())
if c in ('`', '"', "'"):
for d in QueryBase._process_quoted(it, c):
sb.append(d)
if sb[-1] == ';':
sb.pop()
return ''.join(sb)
@staticmethod
def _process_quoted(it: Iterator[str], quote: str) -> Iterator[str]:
"""
Yields characters up to and including the first occurrence of the (non-escaped) character `quote`.
Allows `quote` to be escaped with '\\'.
"""
p = ''
for c in it:
yield c
if c == quote and p != '\\':
break
p = c
@staticmethod
def _process_whitespace(it: Iterator[str]) -> str:
"""
Returns the first non-whitespace character encountered.
This should never return `None` since the query text is striped before being processed.
That is, if the current character is a whitespace character, then there remains at least one non-whitespace
character in the stream.
"""
for c in it:
if not c.isspace():
return c
raise ValueError("Input string was not stripped!")
|
recipes/Python/578359_Media_File_Renamer/recipe-578359.py | tdiprima/code | 2,023 | 12709500 | import os
import sys
# NOTE
# ====
# Renaming should happen in groups based on extention.
# All files should first be renamed with a unique ID.
################################################################################
ERR = False
ALL = ''.join(map(chr, xrange(256)))
NUM = '0123456789'
LET = ALL.translate(ALL, NUM)
EXT = 'avi', 'bmp', 'gif', 'jpg', 'wmv'
################################################################################
class Filename:
def __init__(self, filename):
self.filename = filename.lower()
split = self.filename.rsplit('.', 1)
self.name = split[0]
self.ext = split[1] if len(split) == 2 else ''
self.let = self.name.translate(ALL, NUM)
self.num = self.name.translate(ALL, LET)
def __eq__(self, other):
return bool(self.num) and other == int(self.num)
################################################################################
def main():
try:
arguments = sys.argv[1:]
assert arguments
for path in arguments:
assert os.path.isdir(path)
for path in arguments:
engine(path)
except:
sys.stdout.write('Usage: %s <directory>' % os.path.basename(sys.argv[0]))
def engine(path):
global ERR
for root, dirs, files in os.walk(path):
# gather all relevant names
files = filter(lambda name: name.num and name.ext in EXT, map(Filename, files))
# find all taken number names
taken = []
for name in files[:]:
if name.name == name.num:
files.remove(name)
taken.append(name)
# put all names in order
files.sort(compare)
taken.sort(compare)
# rename all non-number names
count = 0
for name in files:
while count in taken:
taken.remove(count)
count += 1
name.new = str(count)
count += 1
# condense all numerical names
for name in taken:
if name.num != str(count):
name.new = str(count)
files.append(name)
count += 1
# rename files needing new names
for name in files:
old = os.path.join(root, name.filename)
try:
os.rename(old, os.path.join(root, name.new + '.' + name.ext))
except:
sys.stderr.write('%sError: %s' % (ERR and '\n' or '', old))
ERR = True
def compare(x, y):
integer = cmp(x.let, y.let)
return integer if integer else cmp(int(x.num), int(y.num))
################################################################################
if __name__ == '__main__':
main()
|
torchaudio/models/deepspeech.py | popcornell/audio | 1,718 | 12709505 | <gh_stars>1000+
import torch
__all__ = ["DeepSpeech"]
class FullyConnected(torch.nn.Module):
"""
Args:
n_feature: Number of input features
n_hidden: Internal hidden unit size.
"""
def __init__(self, n_feature: int, n_hidden: int, dropout: float, relu_max_clip: int = 20) -> None:
super(FullyConnected, self).__init__()
self.fc = torch.nn.Linear(n_feature, n_hidden, bias=True)
self.relu_max_clip = relu_max_clip
self.dropout = dropout
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.fc(x)
x = torch.nn.functional.relu(x)
x = torch.nn.functional.hardtanh(x, 0, self.relu_max_clip)
if self.dropout:
x = torch.nn.functional.dropout(x, self.dropout, self.training)
return x
class DeepSpeech(torch.nn.Module):
"""
DeepSpeech model architecture from *Deep Speech: Scaling up end-to-end speech recognition*
[:footcite:`hannun2014deep`].
Args:
n_feature: Number of input features
n_hidden: Internal hidden unit size.
n_class: Number of output classes
"""
def __init__(
self,
n_feature: int,
n_hidden: int = 2048,
n_class: int = 40,
dropout: float = 0.0,
) -> None:
super(DeepSpeech, self).__init__()
self.n_hidden = n_hidden
self.fc1 = FullyConnected(n_feature, n_hidden, dropout)
self.fc2 = FullyConnected(n_hidden, n_hidden, dropout)
self.fc3 = FullyConnected(n_hidden, n_hidden, dropout)
self.bi_rnn = torch.nn.RNN(n_hidden, n_hidden, num_layers=1, nonlinearity="relu", bidirectional=True)
self.fc4 = FullyConnected(n_hidden, n_hidden, dropout)
self.out = torch.nn.Linear(n_hidden, n_class)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Args:
x (torch.Tensor): Tensor of dimension (batch, channel, time, feature).
Returns:
Tensor: Predictor tensor of dimension (batch, time, class).
"""
# N x C x T x F
x = self.fc1(x)
# N x C x T x H
x = self.fc2(x)
# N x C x T x H
x = self.fc3(x)
# N x C x T x H
x = x.squeeze(1)
# N x T x H
x = x.transpose(0, 1)
# T x N x H
x, _ = self.bi_rnn(x)
# The fifth (non-recurrent) layer takes both the forward and backward units as inputs
x = x[:, :, : self.n_hidden] + x[:, :, self.n_hidden :]
# T x N x H
x = self.fc4(x)
# T x N x H
x = self.out(x)
# T x N x n_class
x = x.permute(1, 0, 2)
# N x T x n_class
x = torch.nn.functional.log_softmax(x, dim=2)
# N x T x n_class
return x
|
luna/gateware/platform/lambdaconcept.py | modwizcode/luna | 609 | 12709516 | #
# This file is part of LUNA.
#
# Copyright (c) 2020 <NAME> <<EMAIL>>
# SPDX-License-Identifier: BSD-3-Clause
""" LambdaConcept board platform definitions.
This is a non-core platform. To use it, you'll need to set your LUNA_PLATFORM variable:
> export LUNA_PLATFORM="luna.gateware.platform.lambdaconcept:USB2SnifferPlatform"
or
> export LUNA_PLATFORM="luna.gateware.platform.lambdaconcept:ECPIX5PlatformRev02"
"""
import os
import subprocess
from amaranth import Elaboratable, ClockDomain, Module, ResetSignal
from amaranth.build import Resource, Subsignal, Pins, PinsN, Attrs, Clock, DiffPairs, Connector
from amaranth.vendor.xilinx_7series import Xilinx7SeriesPlatform
from amaranth.vendor.lattice_ecp5 import LatticeECP5Platform
from .core import LUNAPlatform
from ..architecture.car import PHYResetController
def ULPIResource(name, data_sites, clk_site, dir_site, nxt_site, stp_site, reset_site, extras=(), attrs=None):
""" Generates a set of resources for a ULPI-connected USB PHY. """
attrs = Attrs() if attrs is None else attrs
return Resource(name, 0,
Subsignal("data", Pins(data_sites, dir="io")),
Subsignal("clk", Pins(clk_site, dir="i" ), Clock(60e6)),
Subsignal("dir", Pins(dir_site, dir="i" )),
Subsignal("nxt", Pins(nxt_site, dir="i" )),
Subsignal("stp", Pins(stp_site, dir="o" )),
Subsignal("rst", Pins(reset_site, dir="o" )),
attrs
)
class StubClockDomainGenerator(Elaboratable):
""" Stub clock domain generator; stands in for the typical LUNA one.
This generator creates domains; but currently does not configuration.
"""
def __init__(self, *, clock_frequencies=None, clock_signal_name=None):
pass
def elaborate(self, platform):
m = Module()
# Create our domains; but don't do anything else for them, for now.
m.domains.usb = ClockDomain()
m.domains.fast = ClockDomain()
# Handle USB PHY resets.
m.submodules.usb_reset = controller = PHYResetController()
m.d.comb += [
ResetSignal("usb") .eq(controller.phy_reset)
]
return m
class USB2SnifferPlatform(Xilinx7SeriesPlatform, LUNAPlatform):
""" Board description for OpenVizsla USB analyzer. """
name = "LambdaConcept USB2Sniffer"
device = "xc7a35t"
package = "fgg484"
speed = "1"
default_clk = "clk100"
# Provide the type that'll be used to create our clock domains.
clock_domain_generator = StubClockDomainGenerator
# We only have a single PHY; so use it directly.
default_usb_connection = "target_phy"
#
# I/O resources.
#
resources = [
Resource("clk100", 0, Pins("J19"), Attrs(IOStandard="LVCMOS33")),
Resource("led", 0, PinsN("W1"), Attrs(IOStandard="LVCMOS33")),
Resource("led", 1, PinsN("Y2"), Attrs(IOStandard="LVCMOS33")),
Resource("rgb_led", 0,
Subsignal("r", PinsN("W2")),
Subsignal("g", PinsN("Y1")),
Subsignal("b", PinsN("W1")),
Attrs(IOStandard="LVCMOS33"),
),
Resource("rgb_led", 1,
Subsignal("r", PinsN("AA1")),
Subsignal("g", PinsN("AB1")),
Subsignal("b", PinsN("Y2")),
Attrs(IOStandard="LVCMOS33"),
),
Resource("serial", 0,
Subsignal("tx", Pins("U21")), # FPGA_GPIO0
Subsignal("rx", Pins("T21")), # FPGA_GPIO1
Attrs(IOStandard="LVCMOS33"),
),
Resource("ddram", 0,
Subsignal("a", Pins(
"M2 M5 M3 M1 L6 P1 N3 N2"
"M6 R1 L5 N5 N4 P2 P6"),
Attrs(IOStandard="SSTL15")),
Subsignal("ba", Pins("L3 K6 L4"), Attrs(IOStandard="SSTL15")),
Subsignal("ras_n", Pins("J4"), Attrs(IOStandard="SSTL15")),
Subsignal("cas_n", Pins("K3"), Attrs(IOStandard="SSTL15")),
Subsignal("we_n", Pins("L1"), Attrs(IOStandard="SSTL15")),
Subsignal("dm", Pins("G3 F1"), Attrs(IOStandard="SSTL15")),
Subsignal("dq", Pins(
"G2 H4 H5 J1 K1 H3 H2 J5"
"E3 B2 F3 D2 C2 A1 E2 B1"),
Attrs(IOStandard="SSTL15", IN_TERM="UNTUNED_SPLIT_50")),
Subsignal("dqs_p", Pins("K2 E1"), Attrs(IOStandard="DIFF_SSTL15")),
Subsignal("dqs_n", Pins("J2 D1"), Attrs(IOStandard="DIFF_SSTL15")),
Subsignal("clk_p", Pins("P5"), Attrs(IOStandard="DIFF_SSTL15")),
Subsignal("clk_n", Pins("P4"), Attrs(IOStandard="DIFF_SSTL15")),
Subsignal("cke", Pins("J6"), Attrs(IOStandard="SSTL15")),
Subsignal("odt", Pins("K4"), Attrs(IOStandard="SSTL15")),
Subsignal("reset_n", Pins("G1"), Attrs(IOStandard="SSTL15")),
Attrs(SLEW="FAST"),
),
Resource("flash", 0,
Subsignal("cs_n", Pins("T19")),
Subsignal("mosi", Pins("P22")),
Subsignal("miso", Pins("R22")),
Subsignal("vpp", Pins("P21")),
Subsignal("hold", Pins("R21")),
Attrs(IOStandard="LVCMOS33")
),
Resource("usb_fifo_clock", 0, Pins("D17"), Attrs(IOStandard="LVCMOS33")),
Resource("usb_fifo", 0,
Subsignal("rst", Pins("K22")),
Subsignal("data", Pins("A16 F14 A15 F13 A14 E14 A13 E13 B13 C15 C13 C14 B16 E17 B15 F16"
"A20 E18 B20 F18 D19 D21 E19 E21 A21 B21 A19 A18 F20 F19 B18 B17")),
Subsignal("be", Pins("K16 L16 G20 H20")),
Subsignal("rxf_n", Pins("M13")),
Subsignal("txe_n", Pins("L13")),
Subsignal("rd_n", Pins("K19")),
Subsignal("wr_n", Pins("M15")),
Subsignal("oe_n", Pins("L21")),
Subsignal("siwua", Pins("M16")),
Attrs(IOStandard="LVCMOS33", SLEW="FAST")
),
Resource("ulpi_sw", 0,
Subsignal("s", Pins("Y8", dir="o")),
Subsignal("oe", PinsN("Y9", dir="o")),
Attrs(IOStandard="LVCMOS33"),
),
# Host PHY -- connects directly to the host port.
ULPIResource("target_phy",
data_sites="AB18 AA18 AA19 AB20 AA20 AB21 AA21 AB22",
clk_site="W19",
dir_site="W21", stp_site="Y22", nxt_site="W22", reset_site="V20",
attrs=Attrs(IOStandard="LVCMOS33", SLEW="FAST")
),
# Target PHY -- connects via a switch to the target port.
ULPIResource("sideband_phy",
data_sites="AB2 AA3 AB3 Y4 AA4 AB5 AA5 AB6",
clk_site="V4",
dir_site="AB7", stp_site="AA6", nxt_site="AB8", reset_site="AA8",
attrs=Attrs(IOStandard="LVCMOS33", SLEW="FAST")
)
]
connectors = []
def toolchain_program(self, products, name):
xc3sprog = os.environ.get("XC3SPROG", "xc3sprog")
with products.extract("{}.bit".format(name)) as bitstream_file:
subprocess.check_call([xc3sprog, "-c", "ft4232h", bitstream_file])
class ECPIX5PlatformRev02(LatticeECP5Platform, LUNAPlatform):
name = "ECPIX-5 R02"
device = "LFE5UM5G-85F"
package = "BG554"
speed = "8"
default_clk = "clk100"
default_rst = "rst"
# Provide the type that'll be used to create our clock domains.
clock_domain_generator = StubClockDomainGenerator
# We only have a single PHY; so use it directly.
default_usb_connection = "ulpi"
resources = [
Resource("rst", 0, PinsN("AB1", dir="i"), Attrs(IO_TYPE="LVCMOS33")),
Resource("clk100", 0, Pins("K23", dir="i"), Clock(100e6), Attrs(IO_TYPE="LVCMOS33")),
# LEDs
Resource("rgb_led", 0,
Subsignal("r", Pins("U21")),
Subsignal("g", Pins("W21")),
Subsignal("b", Pins("T24")),
Attrs(IO_TYPE="LVCMOS33"),
),
Resource("rgb_led", 1,
Subsignal("r", Pins("T23")),
Subsignal("g", Pins("R21")),
Subsignal("b", Pins("T22")),
Attrs(IO_TYPE="LVCMOS33"),
),
Resource("rgb_led", 2,
Subsignal("r", Pins("P21")),
Subsignal("g", Pins("R23")),
Subsignal("b", Pins("P22")),
Attrs(IO_TYPE="LVCMOS33"),
),
Resource("rgb_led", 3,
Subsignal("r", Pins("K21")),
Subsignal("g", Pins("K24")),
Subsignal("b", Pins("M21")),
Attrs(IO_TYPE="LVCMOS33"),
),
Resource("uart", 0,
Subsignal("rx", Pins("R26", dir="i")),
Subsignal("tx", Pins("R24", dir="o")),
Attrs(IO_TYPE="LVCMOS33", PULLMODE="UP")
),
Resource("eth_rgmii", 0,
Subsignal("rst", PinsN("C13", dir="o")),
Subsignal("mdio", Pins("A13", dir="io")),
Subsignal("mdc", Pins("C11", dir="o")),
Subsignal("tx_clk", Pins("A12", dir="o")),
Subsignal("tx_ctrl", Pins("C9", dir="o")),
Subsignal("tx_data", Pins("D8 C8 B8 A8", dir="o")),
Subsignal("rx_clk", Pins("E11", dir="i")),
Subsignal("rx_ctrl", Pins("A11", dir="i")),
Subsignal("rx_data", Pins("B11 A10 B10 A9", dir="i")),
Attrs(IO_TYPE="LVCMOS33")
),
Resource("eth_int", 0, PinsN("B13", dir="i"), Attrs(IO_TYPE="LVCMOS33")),
Resource("ddr3", 0,
Subsignal("clk", DiffPairs("H3", "J3", dir="o"), Attrs(IO_TYPE="SSTL135D_I")),
Subsignal("clk_en", Pins("P1", dir="o")),
Subsignal("we", PinsN("R3", dir="o")),
Subsignal("ras", PinsN("T3", dir="o")),
Subsignal("cas", PinsN("P2", dir="o")),
Subsignal("a", Pins("T5 M3 L3 V6 K2 W6 K3 L1 H2 L2 N1 J1 M1 K1", dir="o")),
Subsignal("ba", Pins("U6 N3 N4", dir="o")),
Subsignal("dqs", DiffPairs("V4 V1", "U5 U2", dir="io"), Attrs(IO_TYPE="SSTL135D_I")),
Subsignal("dq", Pins("T4 W4 R4 W5 R6 P6 P5 P4 R1 W3 T2 V3 U3 W1 T1 W2", dir="io")),
Subsignal("dm", Pins("J4 H5", dir="o")),
Subsignal("odt", Pins("L2", dir="o")),
Attrs(IO_TYPE="SSTL135_I")
),
Resource("hdmi", 0,
Subsignal("rst", PinsN("N6", dir="o")),
Subsignal("scl", Pins("C17", dir="io")),
Subsignal("sda", Pins("E17", dir="io")),
Subsignal("pclk", Pins("C1", dir="o")),
Subsignal("vsync", Pins("A4", dir="o")),
Subsignal("hsync", Pins("B4", dir="o")),
Subsignal("de", Pins("A3", dir="o")),
Subsignal("d",
Subsignal("b", Pins("AD25 AC26 AB24 AB25 B3 C3 D3 B1 C2 D2 D1 E3", dir="o")),
Subsignal("g", Pins("AA23 AA22 AA24 AA25 E1 F2 F1 D17 D16 E16 J6 H6", dir="o")),
Subsignal("r", Pins("AD26 AE25 AF25 AE26 E10 D11 D10 C10 D9 E8 H5 J4", dir="o")),
),
Subsignal("mclk", Pins("E19", dir="o")),
Subsignal("sck", Pins("D6", dir="o")),
Subsignal("ws", Pins("C6", dir="o")),
Subsignal("i2s", Pins("A6 B6 A5 C5", dir="o")),
Subsignal("int", PinsN("C4", dir="i")),
Attrs(IO_TYPE="LVTTL33")
),
Resource("sata", 0,
Subsignal("tx", DiffPairs("AD16", "AD17", dir="o")),
Subsignal("rx", DiffPairs("AF15", "AF16", dir="i")),
Attrs(IO_TYPE="LVDS")
),
ULPIResource("ulpi",
data_sites="M26 L25 L26 K25 K26 J23 P25 H25",
clk_site="H24",
dir_site="F22", stp_site="H23", nxt_site="F23", reset_site="E23",
attrs=Attrs(IO_TYPE="LVCMOS33")
),
Resource("usbc_cfg", 0,
Subsignal("scl", Pins("D24", dir="io")),
Subsignal("sda", Pins("C24", dir="io")),
Subsignal("dir", Pins("B23", dir="i")),
Subsignal("id", Pins("D23", dir="i")),
Subsignal("int", PinsN("B24", dir="i")),
Attrs(IO_TYPE="LVCMOS33")
),
Resource("usbc_mux", 0,
Subsignal("en", Pins("C23", dir="oe")),
Subsignal("amsel", Pins("B26", dir="oe")),
Subsignal("pol", Pins("D26", dir="o")),
#Subsignal("lna", DiffPairs( "AF9", "AF10", dir="i"), Attrs(IO_TYPE="LVCMOS18D")),
#Subsignal("lnb", DiffPairs("AD10", "AD11", dir="o"), Attrs(IO_TYPE="LVCMOS18D")),
#Subsignal("lnc", DiffPairs( "AD7", "AD8", dir="o"), Attrs(IO_TYPE="LVCMOS18D")),
#Subsignal("lnd", DiffPairs( "AF6", "AF7", dir="i"), Attrs(IO_TYPE="LVCMOS18D")),
Attrs(IO_TYPE="LVCMOS33")
),
# Compatibility aliases.
Resource("led", 0, Pins("W21", dir="o"), Attrs(IO_TYPE="LVCMOS33")),
Resource("led", 1, Pins("R21", dir="o"), Attrs(IO_TYPE="LVCMOS33")),
Resource("led", 2, Pins("R23", dir="o"), Attrs(IO_TYPE="LVCMOS33")),
Resource("led", 3, Pins("K24", dir="o"), Attrs(IO_TYPE="LVCMOS33")),
Resource("user_io", 0, Pins("T25")),
Resource("user_io", 1, Pins("U25")),
Resource("user_io", 2, Pins("U24")),
Resource("user_io", 3, Pins("V24")),
]
connectors = [
Connector("pmod", 0, "T25 U25 U24 V24 - - T26 U26 V26 W26 - -"),
Connector("pmod", 1, "U23 V23 U22 V21 - - W25 W24 W23 W22 - -"),
Connector("pmod", 2, "J24 H22 E21 D18 - - K22 J21 H21 D22 - -"),
Connector("pmod", 3, " E4 F4 E6 H4 - - F3 D4 D5 F5 - -"),
Connector("pmod", 4, "E26 D25 F26 F25 - - A25 A24 C26 C25 - -"),
Connector("pmod", 5, "D19 C21 B21 C22 - - D21 A21 A22 A23 - -"),
Connector("pmod", 6, "C16 B17 C18 B19 - - A17 A18 A19 C19 - -"),
Connector("pmod", 7, "D14 B14 E14 B16 - - C14 A14 A15 A16 - -"),
]
@property
def file_templates(self):
return {
**super().file_templates,
"{{name}}-openocd.cfg": r"""
interface ftdi
ftdi_vid_pid 0x0403 0x6010
ftdi_channel 0
ftdi_layout_init 0xfff8 0xfffb
reset_config none
adapter_khz 25000
jtag newtap ecp5 tap -irlen 8 -expected-id 0x81113043
"""
}
def toolchain_program(self, products, name):
openocd = os.environ.get("OPENOCD", "openocd")
with products.extract("{}-openocd.cfg".format(name), "{}.svf".format(name)) \
as (config_filename, vector_filename):
subprocess.check_call([openocd,
"-f", config_filename,
"-c", "transport select jtag; init; svf -quiet {}; exit".format(vector_filename)
])
|
mx_mg/models/__init__.py | CheminfoPKU/molecule_generator | 127 | 12709525 | from .networks import *
from .functions import * |
python/phonenumbers/shortdata/region_NE.py | rodgar-nvkz/python-phonenumbers | 2,424 | 12709533 | <reponame>rodgar-nvkz/python-phonenumbers
"""Auto-generated file, do not edit by hand. NE metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_NE = PhoneMetadata(id='NE', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='[1-3578]\\d(?:\\d(?:\\d{3})?)?', possible_length=(2, 3, 6)),
toll_free=PhoneNumberDesc(national_number_pattern='1(?:18|[578])|723\\d{3}', example_number='15', possible_length=(2, 3, 6)),
emergency=PhoneNumberDesc(national_number_pattern='1(?:18|[578])|723141', example_number='15', possible_length=(2, 3, 6)),
short_code=PhoneNumberDesc(national_number_pattern='1(?:0[01]|1[128]|2[034]|3[013]|[46]0|55?|[78])|222|333|555|723141|888', example_number='15', possible_length=(2, 3, 6)),
carrier_specific=PhoneNumberDesc(national_number_pattern='1(?:0[01]|1[12]|2[034]|3[013]|[46]0|55)|222|333|555|888', example_number='100', possible_length=(3,)),
short_data=True)
|
chapter15/cache_aside/cache_aside.py | JoeanAmiee/Mastering-Python-Design-Patterns-Second-Edition | 278 | 12709568 | <filename>chapter15/cache_aside/cache_aside.py
import sys
import sqlite3
import csv
cache_key_prefix = "quote"
class QuoteCache:
def __init__(self, filename=""):
self.filename = filename
def get(self, key):
with open(self.filename) as csv_file:
items = csv.reader(csv_file, delimiter=';')
for item in items:
if item[0] == key.split('.')[1]:
return item[1]
def set(self, key, quote):
existing = []
with open(self.filename) as csv_file:
items = csv.reader(csv_file, delimiter=';')
existing = [cache_key_prefix + "." + item[0] for item in items]
if key in existing:
print("This is weird. The key already exists.")
else:
# save the new data
with open(self.filename, "a", newline="") as csv_file:
writer = csv.DictWriter(csv_file,
fieldnames=['id', 'text'],
delimiter=";")
#print(f"Adding '{q[1]}' to cache")
writer.writerow({'id': key.split('.')[1], 'text': quote})
cache = QuoteCache('data/quotes_cache.csv')
def get_quote(quote_id):
# Return the item from cache if found in it. If not found in cache, read from data store.
# Put the read item in cache and return it.
quote = cache.get(f"quote.{quote_id}")
out = ""
if quote is None:
try:
db = sqlite3.connect('data/quotes.sqlite3')
cursor = db.cursor()
cursor.execute(f"SELECT text FROM quotes WHERE id = {quote_id}")
for row in cursor:
quote = row[0]
print(f"Got '{quote}' FROM DB")
except Exception as e:
print(e)
finally:
# Close the db connection
db.close()
# and add it to the cache
key = f"{cache_key_prefix}.{quote_id}"
cache.set(key, quote)
if quote:
out = f"{quote} (FROM CACHE, with key 'quote.{quote_id}')"
return out
if __name__ == "__main__":
args = sys.argv
if args[1] == 'fetch':
while True:
quote_id = input('Enter the ID of the quote: ')
q = get_quote(quote_id)
if q:
print(q)
|
rethinkdb/datadog_checks/rethinkdb/config.py | vbarbaresi/integrations-core | 663 | 12709571 | # (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from typing import List, Optional
from datadog_checks.base import ConfigurationError
from .types import Instance
class Config(object):
"""
Hold instance configuration for a RethinkDB check.
Encapsulates the validation of an `instance` dictionary while improving type information.
"""
def __init__(self, instance=None):
# type: (Instance) -> None
if instance is None:
instance = {}
host = instance.get('host', 'localhost')
port = instance.get('port', 28015)
user = instance.get('username')
password = instance.get('password')
tls_ca_cert = instance.get('tls_ca_cert')
tags = instance.get('tags', [])
if not isinstance(host, str):
raise ConfigurationError('host {!r} must be a string (got {!r})'.format(host, type(host)))
try:
port = int(port)
except (ValueError, TypeError):
raise ConfigurationError('port {!r} must be convertible to an integer (got {!r})'.format(port, type(port)))
if port < 0:
raise ConfigurationError('port must be positive (got {!r})'.format(port))
if not isinstance(tags, list):
raise ConfigurationError('tags {!r} must be a list (got {!r})'.format(tags, type(tags)))
self.host = host # type: str
self.port = port # type: int
self.user = user # type: Optional[str]
self.password = password # type: Optional[str]
self.tls_ca_cert = tls_ca_cert # type: Optional[str]
self.tags = tags # type: List[str]
self.service_check_tags = ('host:{}'.format(self.host), 'port:{}'.format(self.port)) + tuple(self.tags)
|
tools/gemini/test-data/util/shrink_simple_tab.py | ic4f/tools-iuc | 142 | 12709596 | <reponame>ic4f/tools-iuc
from __future__ import print_function
import argparse
from functools import partial
def keep_line(line, pos_cols, region):
fields = line.rstrip().split(b'\t')
if fields[pos_cols[0]] == region[0]: # same chromosome
if (
region[1] < int(fields[pos_cols[1]]) < region[2]
) or (
region[1] < int(fields[pos_cols[2]]) < region[2]
):
return True
def main(infile, ofile, num_header_lines):
print(infile, '->', ofile)
with open(infile, 'rb') as i:
with open(ofile, 'wb') as o:
# copy header lines
for c in range(num_header_lines):
o.write(next(i))
for line in i:
if keep_line(line):
o.write(line)
if __name__ == '__main__':
p = argparse.ArgumentParser()
p.add_argument('infile')
p.add_argument(
'-r', '--region',
required=True,
help='the region of the input file to rewrite'
)
p.add_argument(
'-o', '--ofile',
required=True,
help="the name of the output file"
)
p.add_argument(
'-c', '--cols',
nargs=3, type=int, required=True,
help="the columns of the input file specifying chrom, start and stop, "
"respectively"
)
p.add_argument(
'-n', '--num-header-lines',
type=int, default=0,
help='the number of header lines present in the input; These will '
'always be copied over to the new file.'
)
args = vars(p.parse_args())
chrom, reg = args['region'].split(':')
region = [chrom.encode()] + [int(x) for x in reg.split('-')]
keep_line = partial(keep_line, pos_cols=args['cols'], region=region)
main(args['infile'], args['ofile'], args['num_header_lines'])
|
jixianjiancha/models.py | zx273983653/vulscan | 582 | 12709628 | <reponame>zx273983653/vulscan
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
import sys
reload(sys)
sys.setdefaultencoding('utf8')
# Create your models here.
class BaseCheck(models.Model):
vid=models.IntegerField(primary_key=True) #主键
ip=models.CharField(max_length=255,null=True,blank=True)#扫描ip
time=models.CharField(max_length=255,null=True,blank=True)#扫描时间
checkpoint=models.CharField(max_length=255,null=True,blank=True)#检查项
level=models.CharField(max_length=255,null=True,blank=True)#漏洞等级
suggestion=models.CharField(max_length=255,null=True,blank=True)#修复建议
describe=models.CharField(max_length=255,null=True,blank=True)#漏洞描述
class Process_save(models.Model):
vid=models.IntegerField(primary_key=True) #主键
ip=models.CharField(max_length=255,null=True,blank=True)#扫描ip
time=models.CharField(max_length=255,null=True,blank=True)#扫描时间
describe=models.TextField()#进程描述
checkpoint=models.CharField(max_length=255,null=True,blank=True)#检查项
level=models.CharField(max_length=255,null=True,blank=True)#漏洞等级
suggestion=models.CharField(max_length=255,null=True,blank=True)#修复建议
class Scan_number(models.Model):
vid=models.IntegerField(primary_key=True) #主键
ip=models.CharField(max_length=255,null=True,blank=True)#扫描ip
time=models.CharField(max_length=255,null=True,blank=True)#扫描时间
|
disentanglement_lib/methods/shared/optimizers_test.py | travers-rhodes/disentanglement_lib | 1,280 | 12709642 | # coding=utf-8
# Copyright 2018 The DisentanglementLib Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for optimizer.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from disentanglement_lib.methods.shared import optimizers
from six.moves import range
import tensorflow.compat.v1 as tf
import gin.tf.external_configurables # pylint: disable=unused-import
import gin.tf
def _make_vae_optimizer_configs():
"""Yield different vae_optimizer test configurations.
Yields:
A tuple containing a list of gin bindings, and the expected learning rate
after 10 steps.
"""
# Constant learning rate specified in the optimizer.
bindings = [
"vae_optimizer.optimizer_fn = @GradientDescentOptimizer",
"GradientDescentOptimizer.learning_rate = 0.1",
]
yield (bindings, 0.1)
# Constant learning rate specified in vae_optimizer.
bindings = [
"vae_optimizer.optimizer_fn = @GradientDescentOptimizer",
"vae_optimizer.learning_rate = 0.1",
]
yield (bindings, 0.1)
# Piecewise constant learning rate.
bindings = [
"vae_optimizer.optimizer_fn = @GradientDescentOptimizer",
"vae_optimizer.learning_rate = @piecewise_constant",
"piecewise_constant.boundaries = (3, 5)",
"piecewise_constant.values = (0.2, 0.1, 0.01)",
]
yield (bindings, 0.01)
# Exponential decay learning rate.
bindings = [
"vae_optimizer.optimizer_fn = @GradientDescentOptimizer",
"vae_optimizer.learning_rate = @exponential_decay",
"exponential_decay.learning_rate = 0.1",
"exponential_decay.decay_steps = 1",
"exponential_decay.decay_rate = 0.9",
]
yield (bindings, 0.03486784401)
class OptimizerTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(list(_make_vae_optimizer_configs()))
def test_vae_optimizer(self, gin_bindings, expected_learning_rate):
gin.parse_config_files_and_bindings([], gin_bindings)
with self.test_session():
x = tf.Variable(0.0)
y = tf.pow(x + 2.0, 2.0)
global_step = tf.train.get_or_create_global_step()
optimizer = optimizers.make_vae_optimizer()
train_op = optimizer.minimize(loss=y, global_step=global_step)
tf.global_variables_initializer().run()
for it in range(10):
self.evaluate(train_op)
self.assertEqual(it + 1, self.evaluate(global_step))
current_learning_rate = self.evaluate(optimizer._learning_rate_tensor)
self.assertAlmostEqual(expected_learning_rate, current_learning_rate)
gin.clear_config()
if __name__ == "__main__":
tf.test.main()
|
sanic_jwt/base.py | jekel/sanic-jwt | 226 | 12709662 | <reponame>jekel/sanic-jwt
class BaseDerivative:
def __init__(self, config, instance, *args, **kwargs):
self.config = config
self.instance = instance
|
robustnessgym/report/report.py | jessevig/robustness-gym | 399 | 12709670 | from __future__ import annotations
import itertools
from functools import partial
from typing import Dict, List
import dill
import numpy as np
import pandas as pd
import plotly.figure_factory as ff
import plotly.graph_objects as go
from plotly.graph_objs import Figure
from plotly.subplots import make_subplots
class ReportColumn:
"""A single column in the Robustness Report."""
def __init__(self, title: str):
self.title = title
def set_title(self, title: str):
self.title = title
class ScoreColumn(ReportColumn):
"""A column for numeric scores in the Robustness Report, displayed as a bar
chart."""
def __init__(
self, title: str, min_val: float, max_val: float, is_0_to_1: bool = False
):
super(ScoreColumn, self).__init__(title)
self.min_val = min_val
self.max_val = max_val
self.is_0_to_1 = is_0_to_1
def set_min(self, min_val: float):
self.min_val = min_val
def set_max(self, max_val: float):
self.max_val = max_val
class ClassDistributionColumn(ReportColumn):
"""A column for discrete class distributions in the Robustness Report,
displayed as a heatmap."""
def __init__(self, title: str, class_codes: List[str]):
super(ClassDistributionColumn, self).__init__(title)
self.class_codes = class_codes
def set_class_codes(self, class_codes: List[str]):
self.class_codes = class_codes
class NumericColumn(ReportColumn):
"""A column for numeric data in the Robustness Report, displayed as the raw
value."""
def __init__(self, title: str):
super(NumericColumn, self).__init__(title)
class Report:
"""Class for Robustness Gym Report."""
def __init__(
self,
data: pd.DataFrame,
columns: List[ReportColumn],
model_name: str = None,
dataset_name: str = None,
**kwargs,
):
"""
Args:
data: Pandas dataframe in the following format:
column 1: category name
column 2: slice name
columns 3-N: data corresponding to passed columns parameter
columns: ReportColumn objects specifying format of columns 3-N in data
model_name (optional): model name to show in report
dataset_name (optional): dataset name to show in report
**kwargs: any additional config paramters
"""
# Make a copy of data since may be modified by methods below
self.data = data.copy()
self.columns = columns
self.model_name = model_name
self.dataset_name = dataset_name
self.config = {
"color_scheme": ["#ec7734", "#3499ec", "#ec34c1", "#9cec34"],
"score_color_complement": "#F3F4F7",
"text_fill_color": "#F3F4F7",
"text_border_color": "#BEC4CE",
"distribution_color_scale": [[0.0, "#FBF5F2"], [1.0, "#EC7734"]],
"col_spacing": 0.035,
"row_height": 24,
"category_padding": 24,
"header_padding": 80,
"score_col_width": 0.6,
"class_dist_col_width": 0.35,
"numeric_col_width": 0.25,
"layout_width": 960,
"font_size_dist": 12,
"font_size_data": 13,
"font_size_heading": 14,
"font_size_category": 14,
}
self.update_config(**kwargs)
def sort(
self, category_order: Dict[str, int] = None, slice_order: Dict[str, int] = None
):
"""Sort rows in report by category / slice alphabetically, or using
specified order.
Args:
category_order (optional): map from category name to sorting rank. If None,
sort categories alphabetically.
slice_order (optional): map from slice name to sorting rank. If None, sort
slices alphabetically (within a category).
"""
if category_order is None:
category_order = {}
if slice_order is None:
slice_order = {}
for col_name in ["sort-order-category", "sort-order-slice"]:
if col_name in self.data:
raise ValueError(f"Column name '{col_name}' is reserved")
self.data["sort-order-category"] = self.data[0].map(
lambda x: (category_order.get(x, 2 ** 10000), x)
)
self.data["sort-order-slice"] = self.data[1].map(
lambda x: (slice_order.get(x, 2 ** 10000), x)
)
self.data = self.data.sort_values(
by=["sort-order-category", "sort-order-slice"]
).drop(["sort-order-category", "sort-order-slice"], axis="columns")
self.data.reset_index(inplace=True, drop=True)
def filter(self, categories: List[str] = None, slices: List[str] = None):
"""Filter report to specific categories AND slices
Args:
categories (optional): list of category names to filter by
slices (optional):list of slice names to filter by
"""
if categories is not None:
# self.data = self.data.loc(self.data[0].isin(categories))
self.data = self.data[self.data[0].isin(categories)]
if slices is not None:
self.data = self.data[self.data[1].isin(slices)]
self.data.reset_index(inplace=True, drop=True)
def rename(self, category_map: Dict[str, str], slice_map: Dict[str, str]):
"""Rename categories, slices
Args:
category_map (optional): map from old to new category name
slice_map (optional): map from old to new slice name
"""
if category_map is not None:
self.data[0] = self.data[0].map(lambda x: category_map.get(x, x))
if slice_map is not None:
self.data[1] = self.data[1].map(lambda x: slice_map.get(x, x))
def set_class_codes(self, class_cds: List[str]):
"""Set single-letter class codes used for class distribution
columns."""
for col in self.columns:
if isinstance(col, ClassDistributionColumn):
col.set_class_codes(class_cds)
def set_model_name(self, model_name):
"""Set model name displayed on report."""
self.model_name = model_name
def set_dataset_name(self, dataset_name):
"""Set dataset name displayed on report."""
self.dataset_name = dataset_name
def set_range(self, col_title: str, min_val: float = None, max_val: float = None):
"""Set min and max values for score columns
Args:
col_title: title of column to update
min_val: minimum value
max_val: maximum value
"""
for col in self.columns:
if isinstance(col, ScoreColumn) and col.title == col_title:
if min_val is not None:
col.min_val = min_val
if max_val is not None:
col.max_val = max_val
def update_config(self, **kwargs):
for k, v in kwargs.items():
if k not in self.config:
raise ValueError(f"Invalid config param: '{k}'")
self.config[k] = v
def round(self):
# Round everything
self.data = self.data.round(3)
self.data.class_dist = self.data.class_dist.apply(partial(np.round, decimals=3))
self.data.pred_dist = self.data.pred_dist.apply(partial(np.round, decimals=3))
@classmethod
def load(cls, path: str) -> Report:
obj = dill.load(open(path, "rb"))
assert isinstance(obj, Report), (
f"dill loaded an instance of {type(obj)}, " f"must load {cls.__name__}."
)
return obj
def save(self, path: str):
return dill.dump(self, open(path, "wb"))
def figure(self, show_title=False) -> Figure:
# Verify that rows are grouped by category
row_categories = self.data[0].tolist()
save_cat_groups = set() # Previous category groupings already encountered
prev_cat = None
# Loop through each row and see if a category is encountered outside of first
# identified group for that category
for cat in row_categories:
if cat != prev_cat: # category changes
if cat in save_cat_groups: # if new category previously encountered
raise ValueError("Rows must be grouped by category.")
prev_cat = cat
save_cat_groups.add(cat)
categories = []
category_sizes = [] # Num rows in each category
for category, group in itertools.groupby(self.data[0]): # column 0 is category
categories.append(category)
category_sizes.append(len(list(group)))
n_rows = sum(category_sizes)
height = (
n_rows * self.config["row_height"]
+ len(categories) * self.config["category_padding"]
+ self.config["header_padding"]
)
col_widths = []
for col in self.columns:
if isinstance(col, ScoreColumn):
col_width = self.config["score_col_width"]
elif isinstance(col, ClassDistributionColumn):
col_width = self.config["class_dist_col_width"]
else:
col_width = self.config["numeric_col_width"]
col_widths.append(col_width)
fig = make_subplots(
rows=len(categories),
row_titles=categories,
cols=len(self.columns),
shared_yaxes=True,
subplot_titles=[col.title for col in self.columns],
horizontal_spacing=self.config["col_spacing"],
vertical_spacing=self.config["category_padding"] / height,
row_width=list(reversed(category_sizes)),
column_width=col_widths,
)
hms = []
coords = []
category_ndx = 1
# Group data by category
for category, category_data in self.data.groupby(0, sort=False):
score_col_ndx = 0
slice_names = category_data[1]
slice_names = [s + " " * 3 for s in slice_names]
for col_ndx, col in enumerate(self.columns):
df_col_ndx = col_ndx + 2
# Dataframe has two leading columns with category, slice
fig_col_ndx = col_ndx + 1 # figure columns are 1-indexed
x = category_data[df_col_ndx].tolist()
if isinstance(col, ScoreColumn):
if col.is_0_to_1:
x = [100 * x_i for x_i in x]
col_max = col.max_val
if col.is_0_to_1:
col_max = 100 * col.max_val
fig.add_trace(
go.Bar(
x=x,
y=slice_names,
orientation="h",
marker=dict(color=self.get_color(score_col_ndx)),
showlegend=False,
text=[f"{x_i:.1f}" for x_i in x],
textposition="inside",
width=0.95,
textfont=dict(color="white"),
),
row=category_ndx,
col=fig_col_ndx,
)
# Add marker for gray fill
fig.add_trace(
go.Bar(
x=[col_max - x_i for x_i in x],
y=slice_names,
orientation="h",
marker=dict(color=self.config["score_color_complement"]),
showlegend=False,
width=0.9,
),
row=category_ndx,
col=fig_col_ndx,
)
score_col_ndx += 1
elif isinstance(col, ClassDistributionColumn):
annotation_text = [
[f"{int(round(z * 100)):d}" for z in rw] for rw in x
]
hm = ff.create_annotated_heatmap(
x,
x=col.class_codes,
xgap=1,
ygap=1,
annotation_text=annotation_text,
colorscale=self.config["distribution_color_scale"],
zmin=0,
zmax=1,
)
hms.append(hm)
# Save annotation data for special code related to heatmaps at end
coords.append(len(self.columns) * (category_ndx - 1) + fig_col_ndx)
fig.add_trace(
hm.data[0],
row=category_ndx,
col=fig_col_ndx,
)
elif isinstance(col, NumericColumn):
# Repurpose bar chart as text field.
fig.add_trace(
go.Bar(
x=[1] * len(x),
y=slice_names,
orientation="h",
marker=dict(
color=self.config["text_fill_color"],
line=dict(
width=0, color=self.config["text_border_color"]
),
),
showlegend=False,
text=[human_format(x_i) for x_i in x],
textposition="inside",
insidetextanchor="middle",
width=0.9,
),
row=category_ndx,
col=fig_col_ndx,
)
else:
raise ValueError("Invalid col type")
category_ndx += 1
for category_ndx in range(1, len(categories) + 1):
if category_ndx == len(categories):
show_x_axis = True
else:
show_x_axis = False
for col_ndx, col in enumerate(self.columns):
fig_col_ndx = col_ndx + 1 # plotly cols are 1-indexed
fig.update_yaxes(autorange="reversed", automargin=True)
if isinstance(col, ScoreColumn):
if col.is_0_to_1:
col_min, col_max = 100 * col.min_val, 100 * col.max_val
else:
col_min, col_max = col.min_val, col.max_val
fig.update_xaxes(
range=[col_min, col_max],
row=category_ndx,
col=fig_col_ndx,
tickvals=[col_min, col_max],
showticklabels=show_x_axis,
)
elif isinstance(col, ClassDistributionColumn):
fig.update_xaxes(
row=category_ndx, col=fig_col_ndx, showticklabels=show_x_axis
)
elif isinstance(col, NumericColumn):
fig.update_xaxes(
range=[0, 1],
row=category_ndx,
col=fig_col_ndx,
showticklabels=False,
)
fig.update_layout(
height=height,
width=self.config["layout_width"],
barmode="stack",
plot_bgcolor="rgba(0, 0, 0, 0)",
paper_bgcolor="rgba(0, 0, 0, 0)",
font=dict(size=self.config["font_size_data"]),
yaxis={"autorange": "reversed"},
margin=go.layout.Margin(
r=0, b=0, t=20 # right margin # bottom margin # top margin
),
)
# Use low-level plotly interface to update padding / font size
for a in fig["layout"]["annotations"]:
# If label for group
if a["text"] in categories:
a["x"] = 0.99 # Add padding
a["font"] = dict(size=self.config["font_size_category"])
else:
a["font"] = dict(
size=self.config["font_size_heading"]
) # Adjust font size for non-category labels
# Due to a quirk in plotly, need to do some special low-level coding
# Code from https://community.plotly.com/t/how-to-create-annotated-heatmaps
# -in-subplots/36686/25
newfont = [
go.layout.Annotation(font_size=self.config["font_size_heading"])
] * len(fig.layout.annotations)
fig_annots = [newfont] + [hm.layout.annotations for hm in hms]
for col_ndx in range(1, len(fig_annots)):
for k in range(len(fig_annots[col_ndx])):
coord = coords[col_ndx - 1]
fig_annots[col_ndx][k]["xref"] = f"x{coord}"
fig_annots[col_ndx][k]["yref"] = f"y{coord}"
fig_annots[col_ndx][k]["font_size"] = self.config["font_size_dist"]
def recursive_extend(mylist, nr):
# mylist is a list of lists
result = []
if nr == 1:
result.extend(mylist[nr - 1])
else:
result.extend(mylist[nr - 1])
result.extend(recursive_extend(mylist, nr - 1))
return result
new_annotations = recursive_extend(fig_annots[::-1], len(fig_annots))
fig.update_layout(annotations=new_annotations)
if show_title:
title = {
"text": f"{self.dataset_name or ''} {self.model_name or ''} "
f"Robustness Report",
"x": 0.5,
"xanchor": "center",
}
else:
title = None
fig.update_layout(
title=title,
margin=go.layout.Margin(
r=0, b=0, t=80 # right margin # bottom margin # top margin
),
)
return fig
def get_color(self, col_ndx):
return self.config["color_scheme"][col_ndx % len(self.config["color_scheme"])]
def human_format(num):
num = float("{:.3g}".format(num))
magnitude = 0
while abs(num) >= 1000:
magnitude += 1
num /= 1000.0
return "{}{}".format(
"{:f}".format(num).rstrip("0").rstrip("."), ["", "K", "M", "B", "T"][magnitude]
)
|
logparser/MoLFI/__init__.py | CUHK-CSE/logalizer | 859 | 12709672 | <reponame>CUHK-CSE/logalizer
from .MoLFI import * |
dataviva/apps/wizard/sessions.py | joelvisroman/dataviva-site | 126 | 12709719 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
class Question:
def __init__(self, title, selectors, redirect):
self.title = title
self.selectors = selectors
self.redirect = redirect
@property
def serialize(self):
return {
"title": self.title,
"selectors": self.selectors,
"redirect": self.redirect,
}
class Session:
def __init__(self, session_title, title, questions):
self.session_title = session_title
self.title = title
self.questions = questions
development_agents_questions = [
Question('Qual a rede de produtos da Localidade Y?',
selectors=["Location"],
redirect="/location/%s"),
Question('Quais os produtos mais próximos da estrutura produtiva da Localidade Y?',
selectors=["Location"],
redirect="/location/%s"),
Question('Quais os produtos de maior complexidade exportados por uma Localidade Y?',
selectors=["Location"],
redirect="/location/%s"),
Question('Quais os produtos de maior complexidade importados por uma Localidade Y?',
selectors=["Location"],
redirect="/location/%s"),
Question('Qual a rede de atividades da Localidade Y?',
selectors=["Location"],
redirect="/location/%s"),
Question('Quais as atividades mais próximas da estrutura produtiva da Localidade Y?',
selectors=["Location"],
redirect="/location/%s"),
Question('Quais localidades concentram o emprego na Atividade X?',
selectors=["Industry"],
redirect="/industry/%s"),
]
student_questions = [
Question('Quais os cursos de nível superior oferecidos na Localidade Y?',
selectors=["Location"],
redirect="/location/%s"),
Question('Quais os cursos de nível técnico oferecidos na Localidade Y?',
selectors=["Location"],
redirect="/location/%s"),
Question('Qual o salário médio da Ocupação Z na Localidade Y?',
selectors=["Occupation", "Location"],
redirect="/occupation/%s?bra_id=%s"),
Question('Em quais localidades paga-se o maior salário médio da Ocupação Z?',
selectors=["Occupation"],
redirect="/occupation/%s"),
Question('Em quais localidades cresce o número de empregados da Ocupação Z?',
selectors=["Occupation"],
redirect="/occupation/%s"),
Question('Quais os principais produtos exportados pela Localidade Y?',
selectors=["Location"],
redirect="/location/%s"),
Question('Quais as principais atividades econômicas de uma Localidade Y?',
selectors=["Location"],
redirect="/location/%s"),
]
entrepreneur_questions = [
Question("Qual o número de estabelecimentos na Atividade X, na Localidade Y?",
selectors=["Industry", "Location"],
redirect="/industry/%s?bra_id=%s"),
Question("Qual o salário médio da Atividade X, na Localidade Y?",
selectors=["Industry", "Location"],
redirect="/industry/%s?bra_id=%s"),
Question("Qual o salário médio da Ocupação Z, na Atividade X, na Localidade Y?",
selectors=["Occupation", "Industry", "Location"],
redirect="/occupation/%s?cnae_id=%s?bra_id=%s"),
Question("Quais os principais parceiros comerciais de um Produto P na Localidade Y?",
selectors=["Product", "Location"],
redirect="/product/%s?bra_id=%s"),
Question("Quais localidades concentram o emprego na Atividade X?",
selectors=["Industry"],
redirect="/industry/%s"),
Question("Quais as localidades que mais importam o Produto P?",
selectors=["Product"],
redirect="/product/%s"),
Question("Quais as localidades que mais exportam o Produto P?",
selectors=["Product"],
redirect="/product/%s"),
Question("Quais os produtos mais próximos da estrutura produtiva da Localidade Y?",
selectors=["Location"],
redirect="/location/%s"),
Question("Quais os cursos de nível superior oferecidos na Localidade Y?",
selectors=["Location"],
redirect="/location/%s"),
Question("Quais os cursos de nível técnico oferecidos na Localidade Y?",
selectors=["Location"],
redirect="/location/%s"),
]
entrepreneur_session = Session(
session_title="Empreendedores",
title="Identifique o perfil econômico e as oportunidades de negócios de uma região",
questions=entrepreneur_questions
)
development_agents_session = Session(
session_title="Agentes de Desenvolvimento",
title="Avalie a criação de políticas de desenvolvimento de acordo com a localidade",
questions=development_agents_questions
)
student_session = Session(
session_title="Estudantes e Profissionais",
title="Descubra informações sobre empregos disponíveis, renda por ocupação e cursos",
questions=student_questions
)
SESSIONS = {
'entrepreneur': entrepreneur_session,
'development_agents': development_agents_session,
'student': student_session,
}
|
fooltrader/proxy/__init__.py | beaquant/fooltrader | 1,103 | 12709737 | <filename>fooltrader/proxy/__init__.py
# -*- coding: utf-8 -*-
import os
import pandas as pd
from fooltrader import settings
# 获取存档的代理列表
def get_proxy_dir():
return os.path.join(settings.FOOLTRADER_STORE_PATH, "proxy")
def get_proxy_path(protocol='http'):
return os.path.join(get_proxy_dir(), "{}_proxy.csv".format(protocol))
def get_checked_proxy_dir(part_name=None):
if part_name:
return os.path.join(get_proxy_dir(), 'checked', 'tmp')
else:
return os.path.join(get_proxy_dir(), 'checked')
def get_checked_proxy_path(protocol='http', part_name=None):
if not os.path.exists(get_checked_proxy_dir(part_name)):
os.makedirs(get_checked_proxy_dir(part_name))
if part_name:
return os.path.join(get_checked_proxy_dir(part_name), "{}_{}_proxy.csv".format(protocol, part_name))
else:
return os.path.join(get_checked_proxy_dir(), "{}_proxy.csv".format(protocol))
def get_sorted_proxy_dir(domain):
return os.path.join(get_proxy_dir(), domain)
def get_sorted_proxy_path(domain, protocol='http', part_name=None):
if not os.path.exists(get_sorted_proxy_dir(domain)):
os.makedirs(get_sorted_proxy_dir(domain))
if part_name:
return os.path.join(get_sorted_proxy_dir(domain), "tmp", "{}_{}_proxy.csv".format(protocol, part_name))
else:
return os.path.join(get_sorted_proxy_dir(domain), "{}_proxy.csv".format(protocol))
def get_checked_proxy(domain=None, protocol='http'):
if domain and os.path.exists(get_sorted_proxy_path(domain, protocol=protocol)):
return pd.read_csv(get_sorted_proxy_path(domain, protocol))
if os.path.exists(get_checked_proxy_path(protocol)):
return pd.read_csv(get_checked_proxy_path(protocol))
def get_proxy(protocol='http'):
if os.path.exists(get_proxy_path(protocol)):
return pd.read_csv(get_proxy_path(protocol))
else:
return pd.DataFrame()
def save_proxy(proxies, protocol='http'):
proxy_df = get_proxy(protocol)
proxy_df = proxy_df.append(proxies)
proxy_df.drop_duplicates(subset=('url'), keep='last')
proxy_df.to_csv(get_proxy_path(protocol), index=False)
if not os.path.exists(get_proxy_dir()):
os.makedirs(get_proxy_dir())
|
uber_problems/problem_1.py | loftwah/Daily-Coding-Problem | 129 | 12709745 | <gh_stars>100-1000
""" This problem was asked by Uber.
Given an array of integers, return a new array such that each element at index i
of the new array is the product of all the numbers in the original array except
the one at i.For example, if our input was [1, 2, 3, 4, 5], the expected output
would be [120, 60, 40, 30, 24]. If our input was [3, 2, 1], the expected output
would be [2, 3, 6]. Follow-up: what if you can't use division? """
# function to get the product of elements inside an array.
def get_array_product(array):
product = 1
for i in array:
product *= i
return product
# this function does the main job of the problem but with division.
def get_new_array_division(array):
# array holding the final result
new_array = []
# getting the product of the array elements.
product = get_array_product(array)
for i in array:
# for each number divide the product by the current element.
new_array.append(product / i)
# return the array
return new_array
# this function does the main job of the problem without division.(a to -1 power)
def get_new_array(array):
# array holding the final result
new_array = []
# getting the product of the array elements.
product = get_array_product(array)
for i in array:
# for each number product the product by the current element.
new_array.append(product * (i ** -1))
# return the array
return new_array
# This function does the main job of the problem without division.(a to -1 power)
def get_new_array_hard(array):
# array holding the final result
new_array = []
for i in range(len(array)):
p = 1
for j in range(len(array)):
if j != i:
p *= array[j]
new_array.append(p)
# return the array
return new_array
# test the functions.
# print(get_new_array_division([1, 2, 3, 4, 5]))
# print(get_new_array_division([3, 2, 1]))
# print(get_new_array([1, 2, 3, 4, 5]))
# print(get_new_array([3, 2, 1]))
print(get_new_array_hard([1, 2, 3, 4, 5]))
print(get_new_array_hard([3, 2, 1]))
|
lanefinder.py | Gautam-J/NFS_v1 | 139 | 12709773 | import cv2
import time
import numpy as np
from grabscreen import grab_screen
from directkeys import PressKey, ReleaseKey
from directkeys import W, A, D
from countdown import CountDown
'''
Most of the code in this script was taken from Sentdex's Python plays GTA-V
'''
def roi(img, vertices):
mask = np.zeros_like(img)
cv2.fillPoly(mask, vertices, 255)
masked = cv2.bitwise_and(img, mask)
return masked
def straight():
print('straight')
PressKey(W)
ReleaseKey(A)
ReleaseKey(D)
def left():
print('left')
PressKey(W)
PressKey(A)
time.sleep(0.05)
ReleaseKey(A)
def right():
print('right')
PressKey(W)
PressKey(D)
time.sleep(0.05)
ReleaseKey(D)
def auto_canny(image, sigma=0.33):
'''
Reference: https://www.pyimagesearch.com/
'''
v = np.median(image)
# apply automatic Canny edge detection using the computed median
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edged = cv2.Canny(image, lower, upper)
# return the edged image
return edged
def draw_lanes(img, lines, color=[0, 255, 255], thickness=3):
# if this fails, go with some default line
try:
# finds the maximum y value for a lane marker
# (since we cannot assume the horizon will always be at the same point.)
ys = []
for i in lines:
for ii in i:
ys += [ii[1], ii[3]]
min_y = min(ys)
max_y = 150
new_lines = []
line_dict = {}
for idx, i in enumerate(lines):
for xyxy in i:
# These four lines:
# modified from http://stackoverflow.com/questions/21565994/method-to-return-the-equation-of-a-straight-line-given-two-points
# Used to calculate the definition of a line, given two sets of coords.
x_coords = (xyxy[0], xyxy[2])
y_coords = (xyxy[1], xyxy[3])
A = np.vstack([x_coords, np.ones(len(x_coords))]).T
m, b = np.linalg.lstsq(A, y_coords)[0]
# Calculating our new, and improved, xs
x1 = (min_y - b) / m
x2 = (max_y - b) / m
line_dict[idx] = [m, b, [int(x1), min_y, int(x2), max_y]]
new_lines.append([int(x1), min_y, int(x2), max_y])
final_lanes = {}
for idx in line_dict:
final_lanes_copy = final_lanes.copy()
m = line_dict[idx][0]
b = line_dict[idx][1]
line = line_dict[idx][2]
if len(final_lanes) == 0:
final_lanes[m] = [[m, b, line]]
else:
found_copy = False
for other_ms in final_lanes_copy:
if not found_copy:
if abs(other_ms * 1.2) > abs(m) > abs(other_ms * 0.8):
if abs(final_lanes_copy[other_ms][0][1] * 1.2) > abs(b) > abs(final_lanes_copy[other_ms][0][1] * 0.8):
final_lanes[other_ms].append([m, b, line])
found_copy = True
break
else:
final_lanes[m] = [[m, b, line]]
line_counter = {}
for lanes in final_lanes:
line_counter[lanes] = len(final_lanes[lanes])
top_lanes = sorted(line_counter.items(), key=lambda item: item[1])[::-1][:2]
lane1_id = top_lanes[0][0]
lane2_id = top_lanes[1][0]
def average_lane(lane_data):
x1s = []
y1s = []
x2s = []
y2s = []
for data in lane_data:
x1s.append(data[2][0])
y1s.append(data[2][1])
x2s.append(data[2][2])
y2s.append(data[2][3])
return int(np.mean(x1s)), int(np.mean(y1s)), int(np.mean(x2s)), int(np.mean(y2s))
l1_x1, l1_y1, l1_x2, l1_y2 = average_lane(final_lanes[lane1_id])
l2_x1, l2_y1, l2_x2, l2_y2 = average_lane(final_lanes[lane2_id])
return [l1_x1, l1_y1, l1_x2, l1_y2], [l2_x1, l2_y1, l2_x2, l2_y2], lane1_id, lane2_id
except Exception:
pass
def LaneFinder(image):
org_image = image
# convert to grayscale
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# gaussian blur
image = cv2.GaussianBlur(image, (3, 3), 0)
# edge detection
image = auto_canny(image)
# Masking Region of Interest
vertices = np.array([[0, 201], [0, 50], [381, 50], [381, 201]], np.int32)
image = roi(image, [vertices])
# probabilistic hough transform
lines = cv2.HoughLinesP(image, rho=1, theta=(np.pi / 180),
threshold=5, minLineLength=20, maxLineGap=5)
m1 = 0
m2 = 0
# drawing lines
try:
l1, l2, m1, m2 = draw_lanes(org_image, lines)
cv2.line(org_image, (l1[0], l1[1]), (l1[2], l1[3]), [0, 255, 0], 3)
cv2.line(org_image, (l2[0], l2[1]), (l2[2], l2[3]), [0, 255, 0], 3)
except Exception:
pass
try:
for coords in lines:
coords = coords[0]
try:
cv2.line(image, (coords[0], coords[1]), (coords[2], coords[3]), [255, 0, 0], 3)
except Exception:
pass
except Exception:
pass
return image, org_image, m1, m2
if __name__ == '__main__':
CountDown(5)
while True:
screen = grab_screen(region=(270, 250, 650, 450))
new_screen, original_image, m1, m2 = LaneFinder(screen)
# cv2.imshow('window', new_screen)
# cv2.imshow('window2', cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB))
if m1 < 0 and m2 < 0:
right()
elif m1 > 0 and m2 > 0:
left()
else:
straight()
if cv2.waitKey(25) == ord('q'):
cv2.destroyAllWindows()
break
|
tests/integrations/test_reviewer_views.py | theSage21/junction | 192 | 12709795 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
import pytest
from django.core.urlresolvers import reverse
from .. import factories as f
from . import helpers
pytestmark = pytest.mark.django_db
class TestReviewerViews:
def test_reviewer_private_comment(
self, settings, login, conferences, create_proposal
):
client = login[0]
conference = conferences["future"]
proposal = create_proposal
kwargs = {"conference_slug": conference.slug, "proposal_slug": proposal.slug}
url = reverse("proposal-comment-create", kwargs=kwargs)
data = {"comment": "Test", "private": True}
response = client.post(url, data)
assert response.status_code == 302
assert response.url.endswith("#js-reviewers")
def test_reviewer_only_private_comment(
self, settings, login, conferences, create_proposal
):
client = login[0]
conference = conferences["future"]
proposal = create_proposal
kwargs = {"conference_slug": conference.slug, "proposal_slug": proposal.slug}
url = reverse("proposal-comment-create", kwargs=kwargs)
data = {"comment": "Test", "reviewer": True}
response = client.post(url, data)
assert response.status_code == 302
assert response.url.endswith("#js-only-reviewers")
def test_get_review_proposal_form(
self, settings, login, conferences, create_reviewer, create_proposal
):
client = login[0]
conference = conferences["future"]
proposal = create_proposal
kwargs = {"conference_slug": conference.slug, "slug": proposal.slug}
url = reverse("proposal-review", kwargs=kwargs)
response = client.get(url)
context = response.context
assert response.status_code == 200
assert context["proposal"] == proposal
helpers.assert_template_used(response, "proposals/review.html")
def test_post_review_proposal(
self, settings, login, conferences, create_reviewer, create_proposal
):
client = login[0]
conference = conferences["future"]
proposal = create_proposal
kwargs = {"conference_slug": conference.slug, "slug": proposal.slug}
url = reverse("proposal-review", kwargs=kwargs)
response = client.post(url, {"review_status": 3})
assert response.status_code == 302
def test_review_proposal_by_non_reviewer(
self, settings, client, conferences, create_proposal
):
username, password = "<PASSWORD>", "<PASSWORD>"
f.create_user(password=password, username=username)
conference = conferences["future"]
client.login(username=username, password=password)
proposal = create_proposal
kwargs = {"conference_slug": conference.slug, "slug": proposal.slug}
url = reverse("proposal-review", kwargs=kwargs)
response = client.get(url)
assert response.status_code == 403
def test_proposal_reviewer_vote_by_non_reviewer(
self, settings, client, conferences, create_proposal
):
username, password = "<PASSWORD>", "<PASSWORD>"
f.create_user(password=password, username=username)
conference = conferences["future"]
client.login(username=username, password=password)
proposal = create_proposal
kwargs = {"conference_slug": conference.slug, "proposal_slug": proposal.slug}
url = reverse("proposal-reviewer-vote", kwargs=kwargs)
response = client.post(url)
assert response.status_code == 403
def test_get_proposal_reviewer_vote(
self, settings, login, conferences, create_proposal, create_reviewer
):
client = login[0]
conference = conferences["future"]
proposal = create_proposal
kwargs = {"conference_slug": conference.slug, "proposal_slug": proposal.slug}
url = reverse("proposal-reviewer-vote", kwargs=kwargs)
response = client.get(url)
context = response.context
assert response.status_code == 200
assert context["proposal"] == proposal
assert context["vote"] is None
helpers.assert_template_used(response, "proposals/vote.html")
def test_post_proposal_reviewer_vote(
self, settings, login, conferences, create_proposal, create_reviewer
):
client = login[0]
conference = conferences["future"]
proposal = create_proposal
kwargs = {"conference_slug": conference.slug, "proposal_slug": proposal.slug}
url = reverse("proposal-reviewer-vote", kwargs=kwargs)
data = {"vote_value": 1, "comment": "Must Have"}
response = client.post(url, data)
assert response.status_code == 302
assert response.url.endswith("review/") is True
def test_update_proposal_reviewer_vote(
self, settings, login, conferences, create_proposal, create_reviewer
):
client = login[0]
conference = conferences["future"]
proposal = create_proposal
kwargs = {"conference_slug": conference.slug, "proposal_slug": proposal.slug}
url = reverse("proposal-reviewer-vote", kwargs=kwargs)
data = {"vote_value": 1, "comment": "Must Have"}
client.post(url, data)
update_data = {"vote_value": 2, "comment": "Must Have"}
response = client.post(url, update_data)
assert response.status_code == 302
assert response.url.endswith("review/") is True
def test_get_proposal_reviewer_vote_after_create(
self, settings, login, conferences, create_proposal, create_reviewer
):
client = login[0]
conference = conferences["future"]
proposal = create_proposal
kwargs = {"conference_slug": conference.slug, "proposal_slug": proposal.slug}
url = reverse("proposal-reviewer-vote", kwargs=kwargs)
comment, vote_value = "Must Have", 1
data = {"vote_value": vote_value, "comment": comment}
client.post(url, data)
response = client.get(url)
context = response.context
assert response.status_code == 200
assert context["form"].initial["vote_value"] == vote_value
assert context["form"].initial["comment"] == comment
def test_post_review_proposal_vote_with_invalid_data(
self, settings, login, conferences, create_proposal, create_reviewer
):
client = login[0]
conference = conferences["future"]
proposal = create_proposal
kwargs = {"conference_slug": conference.slug, "proposal_slug": proposal.slug}
url = reverse("proposal-reviewer-vote", kwargs=kwargs)
data = {"vote_value": 12}
response = client.post(url, data)
assert response.status_code == 200
assert "vote_value" in response.context["form_errors"]
def test_get_proposal_votes_dashboard(self, login, conferences, create_superuser):
client = login[0]
conference = conferences["future"]
kwargs = {"conference_slug": conference.slug}
url = reverse("export-reviewer-votes", kwargs=kwargs)
response = client.get(url)
assert response.status_code == 200
def test_public_comment(settings, login, conferences, create_proposal):
client = login[0]
conference = conferences["future"]
proposal = create_proposal
username, password = "<PASSWORD>", "<PASSWORD>"
f.create_user(password=password, username=username)
client.login(username=username, password=password)
kwargs = {"conference_slug": conference.slug, "proposal_slug": proposal.slug}
url = reverse("proposal-comment-create", kwargs=kwargs)
data = {"comment": "Test"}
response = client.post(url, data)
assert response.status_code == 302
assert response.url.endswith("#js-comments")
|
test/IECore/CompoundVectorParameterTest.py | bradleyhenke/cortex | 386 | 12709831 | ##########################################################################
#
# Copyright (c) 2008-2010, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import imath
import IECore
class TestCompoundVectorParameter( unittest.TestCase ) :
def testConstruction( self ) :
c = IECore.CompoundVectorParameter( 'a', 'dest' )
# test valid parameters
c.addParameter( IECore.IntVectorParameter( 'a', '', IECore.IntVectorData( [ 1, 2 ] ) ) )
c.addParameter( IECore.BoolVectorParameter( 'b', '', IECore.BoolVectorData( [ False, False ] ) ) )
c.addParameters( [ IECore.V2fVectorParameter( 'c', '', IECore.V2fVectorData( [ imath.V2f(), imath.V2f() ] ) ),
IECore.StringVectorParameter( 'd', '', IECore.StringVectorData( [ 'one', 'two' ] ) ) ] )
self.assertEqual( len(c.keys()), 4 )
def addInvalid():
c.addParameter( IECore.StringParameter( 'xx', '', 'value' ) )
# test invalid parameters
self.assertRaises( TypeError, addInvalid )
def testValidation( self ):
c = IECore.CompoundVectorParameter( 'a', 'dest' )
c.addParameter( IECore.IntVectorParameter( 'a', '', IECore.IntVectorData( [ 1, 2 ] ) ) )
c.addParameter( IECore.BoolVectorParameter( 'b', '', IECore.BoolVectorData( [ False, False ] ) ) )
c.validate()
c.addParameter( IECore.IntVectorParameter( 'c', '', IECore.IntVectorData( [ 1, 2,3 ] ) ) )
with self.assertRaises( Exception ) as e :
c.validate()
self.assertTrue(
( 'Parameter "c" has wrong size ( expected 2 but found 3 )' in str( e.exception ) ) or
( 'Parameter "a" has wrong size ( expected 3 but found 2 )' in str( e.exception ) ) or
( 'Parameter "b" has wrong size ( expected 3 but found 2 )' in str( e.exception ) )
)
if __name__ == "__main__":
unittest.main()
|
experimental/distribution.py | keshav47/cnn-facial-landmark | 630 | 12709866 | <gh_stars>100-1000
"""Draw the histgram of the pose distributions
Run it like this:
`python3 -m experimental.distribution.py`
Do not forget to set the dataset file path.
"""
import cv2
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from dataset import get_parsed_dataset
from experimental.pose_estimator import PoseEstimator
if __name__ == "__main__":
ds = get_parsed_dataset("data/helen.record", 1, False)
# Counters
n_faces = 0
pitches = []
yaws = []
rolls = []
for image, marks in ds:
# image = (image.numpy()[0]*255).astype(np.uint8)
height, width = image.shape[1:3]
pose_estimator = PoseEstimator(img_size=(height, width))
marks = np.reshape(marks, (-1, 2))*width
pose = pose_estimator.solve_pose_by_68_points(marks)
# Solve the pitch, yaw and roll angels.
r_mat, _ = cv2.Rodrigues(pose[0])
p_mat = np.hstack((r_mat, np.array([[0], [0], [0]])))
_, _, _, _, _, _, u_angle = cv2.decomposeProjectionMatrix(p_mat)
pitch, yaw, roll = u_angle.flatten()
# I do not know why the roll axis seems flipted 180 degree. Manually by pass
# this issue.
if roll > 0:
roll = 180-roll
elif roll < 0:
roll = -(180 + roll)
pitches.append(pitch)
yaws.append(yaw)
rolls.append(roll)
n_faces += 1
# print("pitch: {:.2f}, yaw: {:.2f}, roll: {:.2f}".format(
# pitch, yaw, roll))
# for mark in marks:
# cv2.circle(image, tuple(mark), 1, (0, 255, 0), 1)
# cv2.imshow("image", image)
# if cv2.waitKey() == 27:
# break
fig, ax = plt.subplots(3, 1)
ax[0].hist(pitches, 40, (-60, 60), density=True)
ax[1].hist(yaws, 40, (-60, 60), density=True)
ax[2].hist(rolls, 40, (-60, 60), density=True)
plt.show()
print(n_faces)
|
rlbench/tasks/turn_tap.py | vonHartz/RLBench | 619 | 12709885 | <reponame>vonHartz/RLBench
from typing import List
from pyrep.objects.dummy import Dummy
from pyrep.objects.joint import Joint
from rlbench.backend.task import Task
from rlbench.backend.conditions import JointCondition
OPTIONS = ['left', 'right']
class TurnTap(Task):
def init_task(self) -> None:
self.left_start = Dummy('waypoint0')
self.left_end = Dummy('waypoint1')
self.right_start = Dummy('waypoint5')
self.right_end = Dummy('waypoint6')
self.left_joint = Joint('left_joint')
self.right_joint = Joint('right_joint')
def init_episode(self, index: int) -> List[str]:
option = OPTIONS[index]
if option == 'right':
self.left_start.set_position(self.right_start.get_position())
self.left_start.set_orientation(self.right_start.get_orientation())
self.left_end.set_position(self.right_end.get_position())
self.left_end.set_orientation(self.right_end.get_orientation())
self.register_success_conditions(
[JointCondition(self.right_joint, 1.57)])
else:
self.register_success_conditions(
[JointCondition(self.left_joint, 1.57)])
return ['turn %s tap' % option,
'rotate the %s tap' % option,
'grasp the %s tap and turn it' % option]
def variation_count(self) -> int:
return 2
|
backends/c-rocm/schedule/standard/algo_format.py | guoshzhao/antares | 132 | 12709956 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from tvm import te
def schedule_branch(attrs, output, prefix):
cfg, s = attrs.auto_config, attrs.scheduler
th_vals = [attrs.get_extent(x) for x in output.op.axis]
# Normal Schedule Plan
blocks = [te.thread_axis('blockIdx.x'), te.thread_axis('blockIdx.y'), te.thread_axis('blockIdx.z')]
threads = [te.thread_axis('threadIdx.x'), te.thread_axis('threadIdx.y'), te.thread_axis('threadIdx.z')]
th_idx = []
for i in range(len(th_vals)):
if th_vals[i] > 1 or (i + 1 == len(th_vals) and len(th_idx) == 0):
th_idx.append(i)
else:
s[output].bind(output.op.axis[i], te.thread_axis('vthread'))
high_vaxis, low_vaxis = [], []
for i in range(len(th_idx)):
ax_name = f'{prefix}D{th_idx[i]}'
ax_obj = output.op.axis[th_idx[i]]
if i < len(blocks):
sizes = cfg.define_split(ax_name, attrs.get_extent(ax_obj), num_outputs=4)
ax1, ax2, ax3, ax4 = cfg.apply_split(s, output, ax_obj, sizes)
s[output].bind(ax1, blocks[i])
s[output].bind(ax3, threads[i])
else:
sizes = cfg.define_split(ax_name, attrs.get_extent(ax_obj), num_outputs=2)
ax2, ax4 = cfg.apply_split(s, output, ax_obj, sizes)
s[output].bind(ax2, te.thread_axis('vthread'))
s[output].bind(ax4, te.thread_axis('vthread'))
high_vaxis.append(ax2)
low_vaxis.append(ax4)
ord_name = f"{prefix}O"
permut = cfg.define_reorder(ord_name, len(high_vaxis), "all")
plan_order = []
for i in permut:
plan_order.append(low_vaxis[i])
plan_order.append(high_vaxis[i])
s[output].reorder(*plan_order)
# unroll
unroll_step = cfg.define_knob(f"{prefix}S", [1, 4, 16, 64, 512])
unroll_explicit = cfg.define_knob(f"{prefix}R", [False, True])
kernel_scope = plan_order[0]
s[output].pragma(kernel_scope, 'auto_unroll_max_step', unroll_step)
s[output].pragma(kernel_scope, 'unroll_explicit', unroll_explicit)
|
Decision Tree/DT_Classify/AnFany_Show_Tree.py | Jojoxiao/Machine-Learning-for-Beginner-by-Python3 | 397 | 12709959 | # -*- coding:utf-8 -*-
# &Author AnFany
# 自适应优化绘制决策树程序
# 绘制决策图主要包括四部分
# 1,确定每一个节点展示的内容(内部节点展示,节点名称,类别比例,分类特征,本节点的结果, 叶子节点没有分类特征的内容)
# 2,确定每一个节点的位置(垂直方向平均分配,水平方向按照这一层的节点个数平均分配)
# 3,确定节点之间的连线
# 4,展示连线的内容(分类规则以及分分割值)
# 5,内部节点,子节点以不用的颜色展示,对给出图例
# 根据所有节点的数据集、所有节点的结果、所有节点的规则、剪枝后代表着树的节点关系绘制树
from pylab import mpl
mpl.rcParams['font.sans-serif'] = ['FangSong'] # 显示中文
mpl.rcParams['axes.unicode_minus'] = False # 显示负号
import matplotlib.pyplot as plt
# 引入绘制树需要的信息
import AnFany_DT_Classify as tree
# 获得数据的字段名称
ziduan = ['Age', 'workclass', 'fnlwgt', 'education', 'education-num', 'marital-status', 'occupation', 'relationship', 'race', 'sex',\
'capital-gain', 'capital-loss', 'hours-per-week', 'native-country']
'''准备部分'''
# 要展示的所有的节点
def allnodes(guanxi):
allnode = list(guanxi.keys())
for jj in guanxi:
for hhh in guanxi[jj]:
if hhh not in allnode:
allnode.append(hhh)
# 之所以要按顺序输出,是因为先画父节点,后画子节点,可以将箭头盖住,更为美观
return sorted(allnode)
# 要展示的所有的叶子节点
def leafnodes(guanxi):
allnode = list(guanxi.keys())
leafnode = []
for jj in guanxi:
for hhh in guanxi[jj]:
if hhh not in allnode:
leafnode.append(hhh)
return leafnode
# 要展示的所有的内部节点
def noye_node(guanxi):
return list(guanxi.keys())
'''第一部分:展示内容'''
# 根据数据集输出各类别之间的比值
def output(shujuji, guanxi):
# 字典
leibie = {}
for jjj in allnodes(guanxi):
leibie[jjj] = []
cu = list(shujuji[jjj][:, -1])
gu = sorted(list(set(list(shujuji[jjj][:, -1]))))
for du in gu:
leibie[jjj].append([du, cu.count(du)]) # 各个类别及其数量
return leibie
# 节点数据集、节点结果、节点规则绘制树
# 制作节点里面的内容
def dingyistr(shujuji, reeult, guize, guanxi, zian):
# 规则字典
guizezidian = {}
# 类别字典
leibii = output(shujuji, guanxi)
# 字符串字典
strdict = {}
# 内部节点
nonode = noye_node(guanxi)
# 遍历需要展示的每一个节点,获得每一个节点需展示的字符串内容
for jjj in allnodes(guanxi):
# 为节点添加名称
strdict[jjj] = '节点:%s \n' % jjj # 内容分行
# 如果不是内部节点,则不需要添加特征,只添加各个类别的比例
if jjj not in nonode:
hu = '占比:'
for fu in leibii[jjj]:
hu += '%d:' % fu[1]
strdict[jjj] += '%s \n' % hu[:-1]
# 对于内部节点需要多填加一个分类特征的内容、和规则
else:
hu = '占比:'
for fu in leibii[jjj]:
hu += '%d:' % fu[1]
strdict[jjj] += '%s \n' % hu[:-1]
# 添加分类特征
strdict[jjj] += '特征:%s \n' % zian[guize['%s' % (jjj + 'r')][-1][0]]
# 添加规则
sign = 0
try:
guize['%s' % (jjj + 'r')][-1][1] + 1
sign = 1
except TypeError:
pass
if sign == 0:
guizezidian[jjj + 'l'] = '值为:\n %s' % guize['%s' % (jjj + 'r')][-1][1]
guizezidian[jjj + 'r'] = '值不为:\n %s' % guize['%s' % (jjj + 'r')][-1][1]
else:
guizezidian[jjj + 'l'] = '值不大于:\n %s' % guize['%s' % (jjj + 'r')][-1][1]
guizezidian[jjj + 'r'] = '值大于:\n %s' % guize['%s' % (jjj + 'r')][-1][1]
# 为需要展示的节点添加结果
strdict[jjj] += '结果:%s ' % reeult[jjj]
return strdict, guizezidian # 分别返回节点展示的内容字典、连线上需要展示的内容字典
'''第二部分:节点的位置'''
# 根据节点名称的最大长度,确定画布的大小
def huabu(guanxi):
# 获得所有的节点
suoyounodes = allnodes(guanxi)
# 获取最长节点名称字符串的长度,这个长度同时也是树的深度。
changdu = max(len(i) for i in suoyounodes)
# 返回长度以及画布大小
return changdu + 1, 2**max(6, changdu)
# 水平放下的位置,是根据这一层节点的个数、以及此节点的顺序确定的
def getorder(exnode, guanxi):
fu = []
for jj in allnodes(guanxi):
if len(jj) == len(exnode):
fu.append(jj)
# 排序
sfu = sorted(fu)
return len(sfu) + 1, sfu.index(exnode) + 1 #前者加1是计算间隔,后者加1是因为index从0开始
# 根据画布大小定义每一个节点的横纵坐标位置
def jiedian_location(guanxi):
# 树的深度,画布大小
shushen, huahuabu = huabu(guanxi)
# 返回每个节点坐标的字典
loca = {}
# 首先将节点名称按照长度组成字典
changdu = {}
for jj in allnodes(guanxi):
try:
changdu[len(jj)].append(jj)
except KeyError:
changdu[len(jj)] = [jj]
# 开始确定需要展示节点的位置
for fi in allnodes(guanxi):
if fi not in loca:
for gu in changdu[len(fi)]: # 同层的节点(也就是节点名称长度一样的)一起计算
number = getorder(gu, guanxi)
loca[gu] = [huahuabu / number[0] * number[1], huahuabu - (huahuabu / shushen) * len(gu)]
return loca
'''第三部分:准备工作结束,开始绘图'''
# 开始绘图
def draw_tree(shujuji, result, guize, guanxi, zian=ziduan):
# 字符串内容
strziu = dingyistr(shujuji, result, guize, guanxi, zian)
# 节点的位置
weihzi = jiedian_location(guanxi)
noyye = noye_node(guanxi)
# 画布的设置
huab = huabu(guanxi)[1] + 2 # 上下左右预留空间
fig, ax = plt.subplots(figsize=(huab, huab))
# 开始绘制
for jj in allnodes(guanxi):
print(jj)
# 绘制所有的节点要展示的内容
# 内部节点
if jj in noyye:
ax.text(weihzi[jj][0], weihzi[jj][1], strziu[0][jj], size=13, rotation=0.,
ha="center", va="center",
bbox=dict(boxstyle="round",
ec=(0.6, 0.2, 0.6),
fc=(0.3, 0.6, 0.3),
)
)
# 叶子节点
else:
ax.text(weihzi[jj][0], weihzi[jj][1], strziu[0][jj], size=13, rotation=0.,
ha="center", va="center",
bbox=dict(boxstyle="round",
ec=(0.2, 0.5, 0.2),
fc=(0.5, 0.2, 0.5),
)
)
# 只对内部节点绘制箭头和左右的分类规则
if jj in noyye:
# 添加左右箭头
ax.annotate(' ', xy=(weihzi[jj + 'r'][0], weihzi[jj + 'r'][1]), xytext=(weihzi[jj][0], weihzi[jj][1]), ha="center", va="center",
arrowprops=dict(facecolor='darkred', shrink=0.128))
ax.annotate(' ', xy=(weihzi[jj + 'l'][0], weihzi[jj + 'l'][1]), xytext=(weihzi[jj][0], weihzi[jj][1]),
ha="center", va="center", arrowprops=dict(facecolor='darkred', shrink=0.128))
# 添加左右规则
ax.text((weihzi[jj + 'l'][0] + weihzi[jj][0]) / 2, \
(weihzi[jj + 'l'][1] + weihzi[jj][1]) / 2 - 0.2, strziu[1][jj + 'l'], fontsize=12, color='red', weight='bold')
ax.text((weihzi[jj + 'r'][0] + weihzi[jj][0]) / 2, \
(weihzi[jj + 'r'][1] + weihzi[jj][1]) / 2 - 0.2, strziu[1][jj + 'r'], fontsize=12, color='red', weight='bold')
ax.set(xlim=(0, huab), ylim=(0, huab))
plt.show()
# 根据不同的深度。看精确率的变化
if __name__ == '__main__':
# 获得树的信息
decision_tree = tree.DT()
# 完全成长的树
decision_tree.grow_tree()
# 剪枝形成的树的集
gu = decision_tree.prue_tree()
# 交叉验证形成的最好的树
cc = decision_tree.jiaocha_tree(gu[0])
print(cc[0])
# 数据集
shuju = decision_tree.node_shujuji
# 结果
jieguo = decision_tree.jieguo_tree()
# 规则
rule = decision_tree.node_rule
draw_tree(shuju, jieguo, rule, cc[0])
|
kentsay/0001/add_num2img.py | saurabh896/python-1 | 3,976 | 12709969 | <reponame>saurabh896/python-1
"""
Question:
第 0000 题:将你的 QQ 头像(或者微博头像)右上角加上红色的数字,类似于微信未读信息数量那种提示效果。
"""
import sys
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
def add_number2img(image, number):
font = ImageFont.truetype("/Library/Fonts/Chalkduster.ttf", 28)
draw = ImageDraw.Draw(image)
draw.text((200,0), str(number),(255, 255, 255), font=font)
draw = ImageDraw.Draw(image)
image.save("mask_with_num.png")
image.show()
origin = Image.open("mask.png")
add_number2img(origin, sys.argv[1]) |
page_parser/beautifulsoup/test_403.py | 2581676612/python | 112 | 12709992 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 17-7-25 下午3:17
# @Author : Tom.Lee
# @CopyRight : 2016-2017 OpenBridge by yihecloud
# @File : test_403.py
# @Product : PyCharm
import bs4
t403 = """
<html>
<head>
<title>403 Forbidden</title>
</head>
<body>
<h1>403 Forbidden</h1>
资源 bc6d81de-97af-4ebd-b01a-b23a6567bea2 is protected and cannot be deleted.<br /><br />
</body>
</html>
"""
soup = bs4.BeautifulSoup(t403, "html.parser", from_encoding='utf-8')
title = soup.find('title')
body = soup.find('body')
title_text = title.getText()
body_text = body.getText().replace(title_text, '').replace('\n', '')
print {title_text.split(' ')[-1]: {'message': body_text, 'code': 1}}
|
test/run/t73.py | timmartin/skulpt | 2,671 | 12709999 | <gh_stars>1000+
xyzy = [100,101,102,103,104,105,106,107]
del xyzy
print xyzy
|
CondTools/Ecal/python/EcalTrivialAlignment_cfi.py | ckamtsikis/cmssw | 852 | 12710000 | <gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
EcalTrivialConditionRetriever = cms.ESSource("EcalTrivialConditionRetriever",
producedEcalClusterLocalContCorrParameters = cms.untracked.bool(True),
producedEcalClusterCrackCorrParameters = cms.untracked.bool(True),
producedEcalClusterEnergyUncertaintyParameters = cms.untracked.bool(True),
producedEcalClusterEnergyCorrectionParameters = cms.untracked.bool(True),
producedEcalClusterEnergyCorrectionObjectSpecificParameters = cms.untracked.bool(True),
getEEAlignmentFromFile = cms.untracked.bool(True),
EEAlignmentFile = cms.untracked.string('CalibCalorimetry/EcalTrivialCondModules/data/EEAlignment_2015.txt'),
getESAlignmentFromFile = cms.untracked.bool(True),
ESAlignmentFile = cms.untracked.string('CalibCalorimetry/EcalTrivialCondModules/data/ESAlignment_2015.txt'),
getEBAlignmentFromFile = cms.untracked.bool(True),
EBAlignmentFile = cms.untracked.string('CalibCalorimetry/EcalTrivialCondModules/data/EBAlignment_2015.txt')
)
|
examples/tutorials/django/blog/views.py | psy-repos-rust/vagga | 1,974 | 12710008 | <reponame>psy-repos-rust/vagga<filename>examples/tutorials/django/blog/views.py
from django.views import generic
from .models import Article
class ArticleList(generic.ListView):
model = Article
paginate_by = 10
class ArticleDetail(generic.DetailView):
model = Article
|
boto3_type_annotations_with_docs/boto3_type_annotations/resource_groups/client.py | cowboygneox/boto3_type_annotations | 119 | 12710009 | <filename>boto3_type_annotations_with_docs/boto3_type_annotations/resource_groups/client.py<gh_stars>100-1000
from typing import Optional
from botocore.client import BaseClient
from typing import Dict
from botocore.paginate import Paginator
from botocore.waiter import Waiter
from typing import Union
from typing import List
class Client(BaseClient):
def can_paginate(self, operation_name: str = None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you\'d normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator(\"create_foo\")``.
:return: ``True`` if the operation can be paginated,
``False`` otherwise.
"""
pass
def create_group(self, Name: str, ResourceQuery: Dict, Description: str = None, Tags: Dict = None) -> Dict:
"""
Creates a group with a specified name, description, and resource query.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/resource-groups-2017-11-27/CreateGroup>`_
**Request Syntax**
::
response = client.create_group(
Name='string',
Description='string',
ResourceQuery={
'Type': 'TAG_FILTERS_1_0'|'CLOUDFORMATION_STACK_1_0',
'Query': 'string'
},
Tags={
'string': 'string'
}
)
**Response Syntax**
::
{
'Group': {
'GroupArn': 'string',
'Name': 'string',
'Description': 'string'
},
'ResourceQuery': {
'Type': 'TAG_FILTERS_1_0'|'CLOUDFORMATION_STACK_1_0',
'Query': 'string'
},
'Tags': {
'string': 'string'
}
}
**Response Structure**
- *(dict) --*
- **Group** *(dict) --*
A full description of the resource group after it is created.
- **GroupArn** *(string) --*
The ARN of a resource group.
- **Name** *(string) --*
The name of a resource group.
- **Description** *(string) --*
The description of the resource group.
- **ResourceQuery** *(dict) --*
The resource query associated with the group.
- **Type** *(string) --*
The type of the query. The valid values in this release are ``TAG_FILTERS_1_0`` and ``CLOUDFORMATION_STACK_1_0`` .
* ``TAG_FILTERS_1_0:`` * A JSON syntax that lets you specify a collection of simple tag filters for resource types and tags, as supported by the AWS Tagging API `GetResources <https://docs.aws.amazon.com/resourcegroupstagging/latest/APIReference/API_GetResources.html>`__ operation. If you specify more than one tag key, only resources that match all tag keys, and at least one value of each specified tag key, are returned in your query. If you specify more than one value for a tag key, a resource matches the filter if it has a tag key value that matches *any* of the specified values.
For example, consider the following sample query for resources that have two tags, ``Stage`` and ``Version`` , with two values each. (``[{"Key":"Stage","Values":["Test","Deploy"]},{"Key":"Version","Values":["1","2"]}]`` ) The results of this query might include the following.
* An EC2 instance that has the following two tags: ``{"Key":"Stage","Value":"Deploy"}`` , and ``{"Key":"Version","Value":"2"}``
* An S3 bucket that has the following two tags: {"Key":"Stage","Value":"Test"}, and {"Key":"Version","Value":"1"}
The query would not return the following results, however. The following EC2 instance does not have all tag keys specified in the filter, so it is rejected. The RDS database has all of the tag keys, but no values that match at least one of the specified tag key values in the filter.
* An EC2 instance that has only the following tag: ``{"Key":"Stage","Value":"Deploy"}`` .
* An RDS database that has the following two tags: ``{"Key":"Stage","Value":"Archived"}`` , and ``{"Key":"Version","Value":"4"}``
* ``CLOUDFORMATION_STACK_1_0:`` * A JSON syntax that lets you specify a CloudFormation stack ARN.
- **Query** *(string) --*
The query that defines a group or a search.
- **Tags** *(dict) --*
The tags associated with the group.
- *(string) --*
- *(string) --*
:type Name: string
:param Name: **[REQUIRED]**
The name of the group, which is the identifier of the group in other operations. A resource group name cannot be updated after it is created. A resource group name can have a maximum of 128 characters, including letters, numbers, hyphens, dots, and underscores. The name cannot start with ``AWS`` or ``aws`` ; these are reserved. A resource group name must be unique within your account.
:type Description: string
:param Description:
The description of the resource group. Descriptions can have a maximum of 511 characters, including letters, numbers, hyphens, underscores, punctuation, and spaces.
:type ResourceQuery: dict
:param ResourceQuery: **[REQUIRED]**
The resource query that determines which AWS resources are members of this group.
- **Type** *(string) --* **[REQUIRED]**
The type of the query. The valid values in this release are ``TAG_FILTERS_1_0`` and ``CLOUDFORMATION_STACK_1_0`` .
* ``TAG_FILTERS_1_0:`` * A JSON syntax that lets you specify a collection of simple tag filters for resource types and tags, as supported by the AWS Tagging API `GetResources <https://docs.aws.amazon.com/resourcegroupstagging/latest/APIReference/API_GetResources.html>`__ operation. If you specify more than one tag key, only resources that match all tag keys, and at least one value of each specified tag key, are returned in your query. If you specify more than one value for a tag key, a resource matches the filter if it has a tag key value that matches *any* of the specified values.
For example, consider the following sample query for resources that have two tags, ``Stage`` and ``Version`` , with two values each. (``[{\"Key\":\"Stage\",\"Values\":[\"Test\",\"Deploy\"]},{\"Key\":\"Version\",\"Values\":[\"1\",\"2\"]}]`` ) The results of this query might include the following.
* An EC2 instance that has the following two tags: ``{\"Key\":\"Stage\",\"Value\":\"Deploy\"}`` , and ``{\"Key\":\"Version\",\"Value\":\"2\"}``
* An S3 bucket that has the following two tags: {\"Key\":\"Stage\",\"Value\":\"Test\"}, and {\"Key\":\"Version\",\"Value\":\"1\"}
The query would not return the following results, however. The following EC2 instance does not have all tag keys specified in the filter, so it is rejected. The RDS database has all of the tag keys, but no values that match at least one of the specified tag key values in the filter.
* An EC2 instance that has only the following tag: ``{\"Key\":\"Stage\",\"Value\":\"Deploy\"}`` .
* An RDS database that has the following two tags: ``{\"Key\":\"Stage\",\"Value\":\"Archived\"}`` , and ``{\"Key\":\"Version\",\"Value\":\"4\"}``
* ``CLOUDFORMATION_STACK_1_0:`` * A JSON syntax that lets you specify a CloudFormation stack ARN.
- **Query** *(string) --* **[REQUIRED]**
The query that defines a group or a search.
:type Tags: dict
:param Tags:
The tags to add to the group. A tag is a string-to-string map of key-value pairs. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.
- *(string) --*
- *(string) --*
:rtype: dict
:returns:
"""
pass
def delete_group(self, GroupName: str) -> Dict:
"""
Deletes a specified resource group. Deleting a resource group does not delete resources that are members of the group; it only deletes the group structure.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/resource-groups-2017-11-27/DeleteGroup>`_
**Request Syntax**
::
response = client.delete_group(
GroupName='string'
)
**Response Syntax**
::
{
'Group': {
'GroupArn': 'string',
'Name': 'string',
'Description': 'string'
}
}
**Response Structure**
- *(dict) --*
- **Group** *(dict) --*
A full description of the deleted resource group.
- **GroupArn** *(string) --*
The ARN of a resource group.
- **Name** *(string) --*
The name of a resource group.
- **Description** *(string) --*
The description of the resource group.
:type GroupName: string
:param GroupName: **[REQUIRED]**
The name of the resource group to delete.
:rtype: dict
:returns:
"""
pass
def generate_presigned_url(self, ClientMethod: str = None, Params: Dict = None, ExpiresIn: int = None, HttpMethod: str = None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to
``ClientMethod``.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By
default, the http method is whatever is used in the method\'s model.
:returns: The presigned url
"""
pass
def get_group(self, GroupName: str) -> Dict:
"""
Returns information about a specified resource group.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/resource-groups-2017-11-27/GetGroup>`_
**Request Syntax**
::
response = client.get_group(
GroupName='string'
)
**Response Syntax**
::
{
'Group': {
'GroupArn': 'string',
'Name': 'string',
'Description': 'string'
}
}
**Response Structure**
- *(dict) --*
- **Group** *(dict) --*
A full description of the resource group.
- **GroupArn** *(string) --*
The ARN of a resource group.
- **Name** *(string) --*
The name of a resource group.
- **Description** *(string) --*
The description of the resource group.
:type GroupName: string
:param GroupName: **[REQUIRED]**
The name of the resource group.
:rtype: dict
:returns:
"""
pass
def get_group_query(self, GroupName: str) -> Dict:
"""
Returns the resource query associated with the specified resource group.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/resource-groups-2017-11-27/GetGroupQuery>`_
**Request Syntax**
::
response = client.get_group_query(
GroupName='string'
)
**Response Syntax**
::
{
'GroupQuery': {
'GroupName': 'string',
'ResourceQuery': {
'Type': 'TAG_FILTERS_1_0'|'CLOUDFORMATION_STACK_1_0',
'Query': 'string'
}
}
}
**Response Structure**
- *(dict) --*
- **GroupQuery** *(dict) --*
The resource query associated with the specified group.
- **GroupName** *(string) --*
The name of a resource group that is associated with a specific resource query.
- **ResourceQuery** *(dict) --*
The resource query which determines which AWS resources are members of the associated resource group.
- **Type** *(string) --*
The type of the query. The valid values in this release are ``TAG_FILTERS_1_0`` and ``CLOUDFORMATION_STACK_1_0`` .
* ``TAG_FILTERS_1_0:`` * A JSON syntax that lets you specify a collection of simple tag filters for resource types and tags, as supported by the AWS Tagging API `GetResources <https://docs.aws.amazon.com/resourcegroupstagging/latest/APIReference/API_GetResources.html>`__ operation. If you specify more than one tag key, only resources that match all tag keys, and at least one value of each specified tag key, are returned in your query. If you specify more than one value for a tag key, a resource matches the filter if it has a tag key value that matches *any* of the specified values.
For example, consider the following sample query for resources that have two tags, ``Stage`` and ``Version`` , with two values each. (``[{"Key":"Stage","Values":["Test","Deploy"]},{"Key":"Version","Values":["1","2"]}]`` ) The results of this query might include the following.
* An EC2 instance that has the following two tags: ``{"Key":"Stage","Value":"Deploy"}`` , and ``{"Key":"Version","Value":"2"}``
* An S3 bucket that has the following two tags: {"Key":"Stage","Value":"Test"}, and {"Key":"Version","Value":"1"}
The query would not return the following results, however. The following EC2 instance does not have all tag keys specified in the filter, so it is rejected. The RDS database has all of the tag keys, but no values that match at least one of the specified tag key values in the filter.
* An EC2 instance that has only the following tag: ``{"Key":"Stage","Value":"Deploy"}`` .
* An RDS database that has the following two tags: ``{"Key":"Stage","Value":"Archived"}`` , and ``{"Key":"Version","Value":"4"}``
* ``CLOUDFORMATION_STACK_1_0:`` * A JSON syntax that lets you specify a CloudFormation stack ARN.
- **Query** *(string) --*
The query that defines a group or a search.
:type GroupName: string
:param GroupName: **[REQUIRED]**
The name of the resource group.
:rtype: dict
:returns:
"""
pass
def get_paginator(self, operation_name: str = None) -> Paginator:
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you\'d normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator(\"create_foo\")``.
:raise OperationNotPageableError: Raised if the operation is not
pageable. You can use the ``client.can_paginate`` method to
check if an operation is pageable.
:rtype: L{botocore.paginate.Paginator}
:return: A paginator object.
"""
pass
def get_tags(self, Arn: str) -> Dict:
"""
Returns a list of tags that are associated with a resource group, specified by an ARN.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/resource-groups-2017-11-27/GetTags>`_
**Request Syntax**
::
response = client.get_tags(
Arn='string'
)
**Response Syntax**
::
{
'Arn': 'string',
'Tags': {
'string': 'string'
}
}
**Response Structure**
- *(dict) --*
- **Arn** *(string) --*
The ARN of the tagged resource group.
- **Tags** *(dict) --*
The tags associated with the specified resource group.
- *(string) --*
- *(string) --*
:type Arn: string
:param Arn: **[REQUIRED]**
The ARN of the resource group for which you want a list of tags. The resource must exist within the account you are using.
:rtype: dict
:returns:
"""
pass
def get_waiter(self, waiter_name: str = None) -> Waiter:
"""
Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters
section of the service docs for a list of available waiters.
:returns: The specified waiter object.
:rtype: botocore.waiter.Waiter
"""
pass
def list_group_resources(self, GroupName: str, Filters: List = None, MaxResults: int = None, NextToken: str = None) -> Dict:
"""
Returns a list of ARNs of resources that are members of a specified resource group.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/resource-groups-2017-11-27/ListGroupResources>`_
**Request Syntax**
::
response = client.list_group_resources(
GroupName='string',
Filters=[
{
'Name': 'resource-type',
'Values': [
'string',
]
},
],
MaxResults=123,
NextToken='string'
)
**Response Syntax**
::
{
'ResourceIdentifiers': [
{
'ResourceArn': 'string',
'ResourceType': 'string'
},
],
'NextToken': 'string',
'QueryErrors': [
{
'ErrorCode': 'CLOUDFORMATION_STACK_INACTIVE'|'CLOUDFORMATION_STACK_NOT_EXISTING',
'Message': 'string'
},
]
}
**Response Structure**
- *(dict) --*
- **ResourceIdentifiers** *(list) --*
The ARNs and resource types of resources that are members of the group that you specified.
- *(dict) --*
The ARN of a resource, and its resource type.
- **ResourceArn** *(string) --*
The ARN of a resource.
- **ResourceType** *(string) --*
The resource type of a resource, such as ``AWS::EC2::Instance`` .
- **NextToken** *(string) --*
The NextToken value to include in a subsequent ``ListGroupResources`` request, to get more results.
- **QueryErrors** *(list) --*
A list of ``QueryError`` objects. Each error is an object that contains ``ErrorCode`` and ``Message`` structures. Possible values for ``ErrorCode`` are ``CLOUDFORMATION_STACK_INACTIVE`` and ``CLOUDFORMATION_STACK_NOT_EXISTING`` .
- *(dict) --*
A two-part error structure that can occur in ``ListGroupResources`` or ``SearchResources`` operations on CloudFormation stack-based queries. The error occurs if the CloudFormation stack on which the query is based either does not exist, or has a status that renders the stack inactive. A ``QueryError`` occurrence does not necessarily mean that AWS Resource Groups could not complete the operation, but the resulting group might have no member resources.
- **ErrorCode** *(string) --*
Possible values are ``CLOUDFORMATION_STACK_INACTIVE`` and ``CLOUDFORMATION_STACK_NOT_EXISTING`` .
- **Message** *(string) --*
A message that explains the ``ErrorCode`` value. Messages might state that the specified CloudFormation stack does not exist (or no longer exists). For ``CLOUDFORMATION_STACK_INACTIVE`` , the message typically states that the CloudFormation stack has a status that is not (or no longer) active, such as ``CREATE_FAILED`` .
:type GroupName: string
:param GroupName: **[REQUIRED]**
The name of the resource group.
:type Filters: list
:param Filters:
Filters, formatted as ResourceFilter objects, that you want to apply to a ListGroupResources operation.
* ``resource-type`` - Filter resources by their type. Specify up to five resource types in the format AWS::ServiceCode::ResourceType. For example, AWS::EC2::Instance, or AWS::S3::Bucket.
- *(dict) --*
A filter name and value pair that is used to obtain more specific results from a list of resources.
- **Name** *(string) --* **[REQUIRED]**
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --* **[REQUIRED]**
One or more filter values. Allowed filter values vary by resource filter name, and are case-sensitive.
- *(string) --*
:type MaxResults: integer
:param MaxResults:
The maximum number of group member ARNs that are returned in a single call by ListGroupResources, in paginated output. By default, this number is 50.
:type NextToken: string
:param NextToken:
The NextToken value that is returned in a paginated ListGroupResources request. To get the next page of results, run the call again, add the NextToken parameter, and specify the NextToken value.
:rtype: dict
:returns:
"""
pass
def list_groups(self, Filters: List = None, MaxResults: int = None, NextToken: str = None) -> Dict:
"""
Returns a list of existing resource groups in your account.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/resource-groups-2017-11-27/ListGroups>`_
**Request Syntax**
::
response = client.list_groups(
Filters=[
{
'Name': 'resource-type',
'Values': [
'string',
]
},
],
MaxResults=123,
NextToken='string'
)
**Response Syntax**
::
{
'GroupIdentifiers': [
{
'GroupName': 'string',
'GroupArn': 'string'
},
],
'Groups': [
{
'GroupArn': 'string',
'Name': 'string',
'Description': 'string'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **GroupIdentifiers** *(list) --*
A list of GroupIdentifier objects. Each identifier is an object that contains both the GroupName and the GroupArn.
- *(dict) --*
The ARN and group name of a group.
- **GroupName** *(string) --*
The name of a resource group.
- **GroupArn** *(string) --*
The ARN of a resource group.
- **Groups** *(list) --*
A list of resource groups.
- *(dict) --*
A resource group.
- **GroupArn** *(string) --*
The ARN of a resource group.
- **Name** *(string) --*
The name of a resource group.
- **Description** *(string) --*
The description of the resource group.
- **NextToken** *(string) --*
The NextToken value to include in a subsequent ``ListGroups`` request, to get more results.
:type Filters: list
:param Filters:
Filters, formatted as GroupFilter objects, that you want to apply to a ListGroups operation.
* ``resource-type`` - Filter groups by resource type. Specify up to five resource types in the format AWS::ServiceCode::ResourceType. For example, AWS::EC2::Instance, or AWS::S3::Bucket.
- *(dict) --*
A filter name and value pair that is used to obtain more specific results from a list of groups.
- **Name** *(string) --* **[REQUIRED]**
The name of the filter. Filter names are case-sensitive.
- **Values** *(list) --* **[REQUIRED]**
One or more filter values. Allowed filter values vary by group filter name, and are case-sensitive.
- *(string) --*
:type MaxResults: integer
:param MaxResults:
The maximum number of resource group results that are returned by ListGroups in paginated output. By default, this number is 50.
:type NextToken: string
:param NextToken:
The NextToken value that is returned in a paginated ``ListGroups`` request. To get the next page of results, run the call again, add the NextToken parameter, and specify the NextToken value.
:rtype: dict
:returns:
"""
pass
def search_resources(self, ResourceQuery: Dict, MaxResults: int = None, NextToken: str = None) -> Dict:
"""
Returns a list of AWS resource identifiers that matches a specified query. The query uses the same format as a resource query in a CreateGroup or UpdateGroupQuery operation.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/resource-groups-2017-11-27/SearchResources>`_
**Request Syntax**
::
response = client.search_resources(
ResourceQuery={
'Type': 'TAG_FILTERS_1_0'|'CLOUDFORMATION_STACK_1_0',
'Query': 'string'
},
MaxResults=123,
NextToken='string'
)
**Response Syntax**
::
{
'ResourceIdentifiers': [
{
'ResourceArn': 'string',
'ResourceType': 'string'
},
],
'NextToken': 'string',
'QueryErrors': [
{
'ErrorCode': 'CLOUDFORMATION_STACK_INACTIVE'|'CLOUDFORMATION_STACK_NOT_EXISTING',
'Message': 'string'
},
]
}
**Response Structure**
- *(dict) --*
- **ResourceIdentifiers** *(list) --*
The ARNs and resource types of resources that are members of the group that you specified.
- *(dict) --*
The ARN of a resource, and its resource type.
- **ResourceArn** *(string) --*
The ARN of a resource.
- **ResourceType** *(string) --*
The resource type of a resource, such as ``AWS::EC2::Instance`` .
- **NextToken** *(string) --*
The NextToken value to include in a subsequent ``SearchResources`` request, to get more results.
- **QueryErrors** *(list) --*
A list of ``QueryError`` objects. Each error is an object that contains ``ErrorCode`` and ``Message`` structures. Possible values for ``ErrorCode`` are ``CLOUDFORMATION_STACK_INACTIVE`` and ``CLOUDFORMATION_STACK_NOT_EXISTING`` .
- *(dict) --*
A two-part error structure that can occur in ``ListGroupResources`` or ``SearchResources`` operations on CloudFormation stack-based queries. The error occurs if the CloudFormation stack on which the query is based either does not exist, or has a status that renders the stack inactive. A ``QueryError`` occurrence does not necessarily mean that AWS Resource Groups could not complete the operation, but the resulting group might have no member resources.
- **ErrorCode** *(string) --*
Possible values are ``CLOUDFORMATION_STACK_INACTIVE`` and ``CLOUDFORMATION_STACK_NOT_EXISTING`` .
- **Message** *(string) --*
A message that explains the ``ErrorCode`` value. Messages might state that the specified CloudFormation stack does not exist (or no longer exists). For ``CLOUDFORMATION_STACK_INACTIVE`` , the message typically states that the CloudFormation stack has a status that is not (or no longer) active, such as ``CREATE_FAILED`` .
:type ResourceQuery: dict
:param ResourceQuery: **[REQUIRED]**
The search query, using the same formats that are supported for resource group definition.
- **Type** *(string) --* **[REQUIRED]**
The type of the query. The valid values in this release are ``TAG_FILTERS_1_0`` and ``CLOUDFORMATION_STACK_1_0`` .
* ``TAG_FILTERS_1_0:`` * A JSON syntax that lets you specify a collection of simple tag filters for resource types and tags, as supported by the AWS Tagging API `GetResources <https://docs.aws.amazon.com/resourcegroupstagging/latest/APIReference/API_GetResources.html>`__ operation. If you specify more than one tag key, only resources that match all tag keys, and at least one value of each specified tag key, are returned in your query. If you specify more than one value for a tag key, a resource matches the filter if it has a tag key value that matches *any* of the specified values.
For example, consider the following sample query for resources that have two tags, ``Stage`` and ``Version`` , with two values each. (``[{\"Key\":\"Stage\",\"Values\":[\"Test\",\"Deploy\"]},{\"Key\":\"Version\",\"Values\":[\"1\",\"2\"]}]`` ) The results of this query might include the following.
* An EC2 instance that has the following two tags: ``{\"Key\":\"Stage\",\"Value\":\"Deploy\"}`` , and ``{\"Key\":\"Version\",\"Value\":\"2\"}``
* An S3 bucket that has the following two tags: {\"Key\":\"Stage\",\"Value\":\"Test\"}, and {\"Key\":\"Version\",\"Value\":\"1\"}
The query would not return the following results, however. The following EC2 instance does not have all tag keys specified in the filter, so it is rejected. The RDS database has all of the tag keys, but no values that match at least one of the specified tag key values in the filter.
* An EC2 instance that has only the following tag: ``{\"Key\":\"Stage\",\"Value\":\"Deploy\"}`` .
* An RDS database that has the following two tags: ``{\"Key\":\"Stage\",\"Value\":\"Archived\"}`` , and ``{\"Key\":\"Version\",\"Value\":\"4\"}``
* ``CLOUDFORMATION_STACK_1_0:`` * A JSON syntax that lets you specify a CloudFormation stack ARN.
- **Query** *(string) --* **[REQUIRED]**
The query that defines a group or a search.
:type MaxResults: integer
:param MaxResults:
The maximum number of group member ARNs returned by ``SearchResources`` in paginated output. By default, this number is 50.
:type NextToken: string
:param NextToken:
The NextToken value that is returned in a paginated ``SearchResources`` request. To get the next page of results, run the call again, add the NextToken parameter, and specify the NextToken value.
:rtype: dict
:returns:
"""
pass
def tag(self, Arn: str, Tags: Dict) -> Dict:
"""
Adds tags to a resource group with the specified ARN. Existing tags on a resource group are not changed if they are not specified in the request parameters.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/resource-groups-2017-11-27/Tag>`_
**Request Syntax**
::
response = client.tag(
Arn='string',
Tags={
'string': 'string'
}
)
**Response Syntax**
::
{
'Arn': 'string',
'Tags': {
'string': 'string'
}
}
**Response Structure**
- *(dict) --*
- **Arn** *(string) --*
The ARN of the tagged resource.
- **Tags** *(dict) --*
The tags that have been added to the specified resource.
- *(string) --*
- *(string) --*
:type Arn: string
:param Arn: **[REQUIRED]**
The ARN of the resource to which to add tags.
:type Tags: dict
:param Tags: **[REQUIRED]**
The tags to add to the specified resource. A tag is a string-to-string map of key-value pairs. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.
- *(string) --*
- *(string) --*
:rtype: dict
:returns:
"""
pass
def untag(self, Arn: str, Keys: List) -> Dict:
"""
Deletes specified tags from a specified resource.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/resource-groups-2017-11-27/Untag>`_
**Request Syntax**
::
response = client.untag(
Arn='string',
Keys=[
'string',
]
)
**Response Syntax**
::
{
'Arn': 'string',
'Keys': [
'string',
]
}
**Response Structure**
- *(dict) --*
- **Arn** *(string) --*
The ARN of the resource from which tags have been removed.
- **Keys** *(list) --*
The keys of tags that have been removed.
- *(string) --*
:type Arn: string
:param Arn: **[REQUIRED]**
The ARN of the resource from which to remove tags.
:type Keys: list
:param Keys: **[REQUIRED]**
The keys of the tags to be removed.
- *(string) --*
:rtype: dict
:returns:
"""
pass
def update_group(self, GroupName: str, Description: str = None) -> Dict:
"""
Updates an existing group with a new or changed description. You cannot update the name of a resource group.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/resource-groups-2017-11-27/UpdateGroup>`_
**Request Syntax**
::
response = client.update_group(
GroupName='string',
Description='string'
)
**Response Syntax**
::
{
'Group': {
'GroupArn': 'string',
'Name': 'string',
'Description': 'string'
}
}
**Response Structure**
- *(dict) --*
- **Group** *(dict) --*
The full description of the resource group after it has been updated.
- **GroupArn** *(string) --*
The ARN of a resource group.
- **Name** *(string) --*
The name of a resource group.
- **Description** *(string) --*
The description of the resource group.
:type GroupName: string
:param GroupName: **[REQUIRED]**
The name of the resource group for which you want to update its description.
:type Description: string
:param Description:
The description of the resource group. Descriptions can have a maximum of 511 characters, including letters, numbers, hyphens, underscores, punctuation, and spaces.
:rtype: dict
:returns:
"""
pass
def update_group_query(self, GroupName: str, ResourceQuery: Dict) -> Dict:
"""
Updates the resource query of a group.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/resource-groups-2017-11-27/UpdateGroupQuery>`_
**Request Syntax**
::
response = client.update_group_query(
GroupName='string',
ResourceQuery={
'Type': 'TAG_FILTERS_1_0'|'CLOUDFORMATION_STACK_1_0',
'Query': 'string'
}
)
**Response Syntax**
::
{
'GroupQuery': {
'GroupName': 'string',
'ResourceQuery': {
'Type': 'TAG_FILTERS_1_0'|'CLOUDFORMATION_STACK_1_0',
'Query': 'string'
}
}
}
**Response Structure**
- *(dict) --*
- **GroupQuery** *(dict) --*
The resource query associated with the resource group after the update.
- **GroupName** *(string) --*
The name of a resource group that is associated with a specific resource query.
- **ResourceQuery** *(dict) --*
The resource query which determines which AWS resources are members of the associated resource group.
- **Type** *(string) --*
The type of the query. The valid values in this release are ``TAG_FILTERS_1_0`` and ``CLOUDFORMATION_STACK_1_0`` .
* ``TAG_FILTERS_1_0:`` * A JSON syntax that lets you specify a collection of simple tag filters for resource types and tags, as supported by the AWS Tagging API `GetResources <https://docs.aws.amazon.com/resourcegroupstagging/latest/APIReference/API_GetResources.html>`__ operation. If you specify more than one tag key, only resources that match all tag keys, and at least one value of each specified tag key, are returned in your query. If you specify more than one value for a tag key, a resource matches the filter if it has a tag key value that matches *any* of the specified values.
For example, consider the following sample query for resources that have two tags, ``Stage`` and ``Version`` , with two values each. (``[{"Key":"Stage","Values":["Test","Deploy"]},{"Key":"Version","Values":["1","2"]}]`` ) The results of this query might include the following.
* An EC2 instance that has the following two tags: ``{"Key":"Stage","Value":"Deploy"}`` , and ``{"Key":"Version","Value":"2"}``
* An S3 bucket that has the following two tags: {"Key":"Stage","Value":"Test"}, and {"Key":"Version","Value":"1"}
The query would not return the following results, however. The following EC2 instance does not have all tag keys specified in the filter, so it is rejected. The RDS database has all of the tag keys, but no values that match at least one of the specified tag key values in the filter.
* An EC2 instance that has only the following tag: ``{"Key":"Stage","Value":"Deploy"}`` .
* An RDS database that has the following two tags: ``{"Key":"Stage","Value":"Archived"}`` , and ``{"Key":"Version","Value":"4"}``
* ``CLOUDFORMATION_STACK_1_0:`` * A JSON syntax that lets you specify a CloudFormation stack ARN.
- **Query** *(string) --*
The query that defines a group or a search.
:type GroupName: string
:param GroupName: **[REQUIRED]**
The name of the resource group for which you want to edit the query.
:type ResourceQuery: dict
:param ResourceQuery: **[REQUIRED]**
The resource query that determines which AWS resources are members of the resource group.
- **Type** *(string) --* **[REQUIRED]**
The type of the query. The valid values in this release are ``TAG_FILTERS_1_0`` and ``CLOUDFORMATION_STACK_1_0`` .
* ``TAG_FILTERS_1_0:`` * A JSON syntax that lets you specify a collection of simple tag filters for resource types and tags, as supported by the AWS Tagging API `GetResources <https://docs.aws.amazon.com/resourcegroupstagging/latest/APIReference/API_GetResources.html>`__ operation. If you specify more than one tag key, only resources that match all tag keys, and at least one value of each specified tag key, are returned in your query. If you specify more than one value for a tag key, a resource matches the filter if it has a tag key value that matches *any* of the specified values.
For example, consider the following sample query for resources that have two tags, ``Stage`` and ``Version`` , with two values each. (``[{\"Key\":\"Stage\",\"Values\":[\"Test\",\"Deploy\"]},{\"Key\":\"Version\",\"Values\":[\"1\",\"2\"]}]`` ) The results of this query might include the following.
* An EC2 instance that has the following two tags: ``{\"Key\":\"Stage\",\"Value\":\"Deploy\"}`` , and ``{\"Key\":\"Version\",\"Value\":\"2\"}``
* An S3 bucket that has the following two tags: {\"Key\":\"Stage\",\"Value\":\"Test\"}, and {\"Key\":\"Version\",\"Value\":\"1\"}
The query would not return the following results, however. The following EC2 instance does not have all tag keys specified in the filter, so it is rejected. The RDS database has all of the tag keys, but no values that match at least one of the specified tag key values in the filter.
* An EC2 instance that has only the following tag: ``{\"Key\":\"Stage\",\"Value\":\"Deploy\"}`` .
* An RDS database that has the following two tags: ``{\"Key\":\"Stage\",\"Value\":\"Archived\"}`` , and ``{\"Key\":\"Version\",\"Value\":\"4\"}``
* ``CLOUDFORMATION_STACK_1_0:`` * A JSON syntax that lets you specify a CloudFormation stack ARN.
- **Query** *(string) --* **[REQUIRED]**
The query that defines a group or a search.
:rtype: dict
:returns:
"""
pass
|
imgreco/ocr/cnocr.py | HTLXMC/ArknightsAutoHelper | 1,035 | 12710017 | <filename>imgreco/ocr/cnocr.py
from cnocr import CnOcr
from .common import *
import cv2
import numpy as np
from functools import lru_cache
import logging
is_online = False
# OCR 过程是否需要网络
info = "cnocr"
@lru_cache()
def get_ocr(model_name='densenet-lite-fc'):
return CnOcr(name=f'imgreco-{model_name}', model_name=model_name)
cn_ocr = get_ocr('densenet-lite-fc')
# 模块说明,用于在 log 中显示
def check_supported():
"""返回模块是否可用"""
return True
class MyCnOcr(OcrEngine):
def recognize(self, image, ppi=70, hints=None, **kwargs):
cv_img = cv2.cvtColor(np.asarray(image), cv2.COLOR_GRAY2RGB)
result = cn_ocr.ocr(cv_img)
line = [OcrLine([OcrWord(Rect(0, 0), w) for w in ocrline]) for ocrline in result]
return OcrResult(line)
def ocr_for_single_line(img, cand_alphabet: str = None, ocr=cn_ocr):
if cand_alphabet:
ocr.set_cand_alphabet(cand_alphabet)
res = ''.join(ocr.ocr_for_single_line(img)).strip()
if cand_alphabet:
ocr.set_cand_alphabet(None)
return res
def search_in_list(s_list, x, min_score=0.5):
import textdistance
max_sim = -1
res = None
if (isinstance(s_list, set) or isinstance(s_list, map)) and x in s_list:
return x, 1
for s in s_list:
if s == x:
return x, 1
sim = textdistance.sorensen(s, x)
if sim > max_sim:
max_sim = sim
res = s
if min_score <= max_sim:
return res, max_sim
def ocr_and_correct(img, s_list, cand_alphabet: str = None, min_score=0.5, log_level=None, model_name='conv-lite-fc'):
ocr = get_ocr(model_name)
ocr_str = ocr_for_single_line(img, cand_alphabet, ocr)
res = search_in_list(s_list, ocr_str, min_score)
if log_level:
logging.log(log_level, f'ocr_str, res: {ocr_str, res}')
return res[0] if res else None
Engine = MyCnOcr
|
Desktop Application/Basic/Python/Graph-Traversing-Visualizer/Graph Traversing Visualizer.py | shivam-s16/Project-Guidance | 219 | 12710018 | <reponame>shivam-s16/Project-Guidance<filename>Desktop Application/Basic/Python/Graph-Traversing-Visualizer/Graph Traversing Visualizer.py
# Graph Traversing #
from tkinter import *
import time
class GraphTraversal:
def __init__(self, root):
self.window = root
self.make_canvas = Canvas(self.window,bg="chocolate",relief=RAISED,bd=7,width=500,height=500)
self.make_canvas.pack()
# Status label initialization
self.status = None
# Some list initialization bt default
self.vertex_store = []
self.total_circle = []
self.queue_bfs = []
self.stack_dfs = []
# Some default function call
self.basic_set_up()
self.make_vertex()
def basic_set_up(self):
heading = Label(self.make_canvas,text="Graph Traversing Visualization",bg="chocolate",fg="yellow",font=("Arial",20,"bold","italic"))
heading.place(x=50,y=10)
bfs_btn = Button(self.window,text="BFS",font=("Arial",15,"bold"),bg="black",fg="green",relief=RAISED,bd=8,command=self.bfs_traversing)
bfs_btn.place(x=20,y=530)
dfs_btn = Button(self.window, text="DFS", font=("Arial", 15, "bold"), bg="black", fg="green", relief=RAISED, bd=8, command=self.dfs_traversing)
dfs_btn.place(x=400, y=530)
self.status = Label(self.make_canvas,text="Not Visited",bg="chocolate",fg="brown",font=("Arial",20,"bold","italic"))
self.status.place(x=50,y=450)
def make_vertex(self):# Vertex with connection make
for i in range(15):
self.total_circle.append(i)
self.total_circle[0] = self.make_canvas.create_oval(80,250,110,280,width=3)
self.total_circle[1] = self.make_canvas.create_oval(160, 180, 190, 210, width=3)
self.total_circle[2] = self.make_canvas.create_oval(160, 320, 190, 350, width=3)
self.total_circle[3] = self.make_canvas.create_oval(230, 130, 260, 160, width=3)
self.total_circle[4] = self.make_canvas.create_oval(230, 230, 260, 260, width=3)
self.total_circle[5] = self.make_canvas.create_oval(230, 270, 260, 300, width=3)
self.total_circle[6] = self.make_canvas.create_oval(230, 370, 260, 400, width=3)
self.total_circle[7] = self.make_canvas.create_oval(280, 80, 310, 110, width=3)
self.total_circle[8] = self.make_canvas.create_oval(280, 180, 310, 210, width=3)
self.total_circle[9] = self.make_canvas.create_oval(280, 250, 310, 280, width=3)
self.total_circle[10] = self.make_canvas.create_oval(280, 320, 310, 350, width=3)
self.total_circle[11] = self.make_canvas.create_oval(280, 420, 310, 450, width=3)
self.total_circle[12] = self.make_canvas.create_oval(350, 130, 380, 160, width=3)
self.total_circle[13] = self.make_canvas.create_oval(350, 220, 380, 250, width=3)
self.total_circle[14] = self.make_canvas.create_oval(350, 360, 380, 390, width=3)
self.make_connector_up(0, 1)
self.make_connector_down(0, 2)
self.collector_connector(0,1,2)
self.make_connector_up(1, 3)
self.make_connector_down(1, 4)
self.collector_connector(1, 3, 4)
self.make_connector_up(2, 5)
self.make_connector_down(2, 6)
self.collector_connector(2, 5, 6)
self.make_connector_up(3, 7)
self.make_connector_down(3, 8)
self.collector_connector(3, 7, 8)
self.make_connector_down(4, 9)
self.collector_connector(4, None, 9)
self.make_connector_down(5, 10)
self.collector_connector(5, None, 10)
self.make_connector_down(6, 11)
self.collector_connector(6, None, 11)
self.make_connector_up(8, 12)
self.collector_connector(8, 12, None)
self.make_connector_up(9, 13)
self.collector_connector(9, 13, None)
self.make_connector_down(10, 14)
self.collector_connector(10, None, 14)
print(self.vertex_store)
def make_connector_up(self,index1,index2):# Up node connection make
first_coord = self.make_canvas.coords(self.total_circle[index1])# Source node coordinates
second_coord = self.make_canvas.coords(self.total_circle[index2])# Destination node coordinates
line_start_x = (first_coord[0]+first_coord[2]) / 2# Connector line start_x
line_end_x = (second_coord[0]+second_coord[2]) / 2# Connector line end_x
line_start_y = (first_coord[1]+first_coord[3]) / 2# Connector line start_y
line_end_y = (second_coord[1]+second_coord[3]) / 2# Connector line end_y
self.make_canvas.create_line(line_start_x+10,line_start_y-10,line_end_x-10,line_end_y+10,width=3)
def make_connector_down(self,index1,index2):# Down node connection make
first_coord = self.make_canvas.coords(self.total_circle[index1])# Source node coordinates
second_coord = self.make_canvas.coords(self.total_circle[index2])# Destination node coordinates
line_start_x = (first_coord[0] + first_coord[2]) / 2# Connector line start_x
line_end_x = (second_coord[0] + second_coord[2]) / 2# Connector line end_x
line_start_y = (first_coord[1] + first_coord[3]) / 2# Connector line start_y
line_end_y = (second_coord[1] + second_coord[3]) / 2# Connector line end_y
self.make_canvas.create_line(line_start_x+12 , line_start_y +5, line_end_x - 12, line_end_y -5, width=3)
def collector_connector(self,source,connector1,connector2):# All about node data collect and store
temp = []
temp.append(self.total_circle[source])
if connector1:
temp.append(self.total_circle[connector1])
else:
temp.append(None)
if connector2:
temp.append(self.total_circle[connector2])
else:
temp.append(None)
self.vertex_store.append(temp)
def binary_search(self,start,end,find_it_as_source):# Binary search algorithm use here
while start<=end:
mid = int((start+end)/2)
if self.vertex_store[mid][0] == find_it_as_source:
return self.vertex_store[mid]
elif self.vertex_store[mid][0] < find_it_as_source:
start = mid + 1
else:
end = mid - 1
return -1
def bfs_traversing(self):
try:
self.status['text'] = "Red: Visited"
self.queue_bfs.append(self.vertex_store[0][0])
while self.queue_bfs:
temp = self.binary_search(0,9,self.queue_bfs[0])
if temp != -1:
if temp[1]:
self.queue_bfs.append(temp[1])
if temp[2]:
self.queue_bfs.append(temp[2])
take_vertex = self.queue_bfs.pop(0)
print(take_vertex)
self.make_canvas.itemconfig(take_vertex,fill="red")
self.window.update()
time.sleep(0.3)
self.status['text'] = "All node Visited"
except:
print("Force stop error")
def dfs_traversing(self):
try:
self.status['text'] = "Blue: Visited"
self.stack_dfs.append(self.vertex_store[0][0])
while self.stack_dfs:
take_vertex = self.stack_dfs.pop()
print(take_vertex)
self.make_canvas.itemconfig(take_vertex, fill="blue")
self.window.update()
time.sleep(0.3)
temp = self.binary_search(0, 9, take_vertex)
if temp != -1:
if temp[1]:
self.stack_dfs.append(temp[1])
if temp[2]:
self.stack_dfs.append(temp[2])
self.status['text'] = "All node Visited"
except:
print("Force stop error")
if __name__ == '__main__':
window = Tk()
window.title("Graph Traversal Visualizer")
window.geometry("400x600")
window.maxsize(500,600)
window.minsize(500,600)
window.config(bg="orange")
GraphTraversal(window)
window.mainloop() |
python/introduction/interchange.py | Sudhanshu-Srivastava/hackerrank-1 | 194 | 12710047 | #https://www.hackerrank.com/challenges/interchange-two-numbers
import fileinput
#Input
a, b = fileinput.input()
#Solve
a, b = (b, a)
#Output
print(a)
print(b) |
tools/Polygraphy/polygraphy/mod/__init__.py | KaliberAI/TensorRT | 5,249 | 12710074 | <filename>tools/Polygraphy/polygraphy/mod/__init__.py
from polygraphy.mod.importer import *
from polygraphy.mod.exporter import *
from polygraphy.mod.util import version
|
hfnet/evaluation/cpp_localization.py | maxtomCMU/hfnet | 555 | 12710078 | import numpy as np
import cv2
import logging
from .utils.localization import LocResult
class CppLocalization:
def __init__(self, db_ids, local_db, global_descriptors, images, points):
import _hloc_cpp
self.hloc = _hloc_cpp.HLoc()
id_to_idx = {}
old_to_new_kpt = {}
for idx, i in enumerate(db_ids):
keypoints = local_db[i].keypoints.T.astype(np.float32).copy()
local_desc = local_db[i].descriptors.T.astype(np.float32).copy()
global_desc = global_descriptors[idx].astype(np.float32).copy()
# keypoints are NOT undistorted or nomalized
idx = self.hloc.addImage(global_desc, keypoints, local_desc)
id_to_idx[i] = idx
old_to_new_kpt[i] = {
k: j for j, k
in enumerate(np.where(images[i].point3D_ids >= 0)[0])}
for i, pt in points.items():
observations = np.array(
[[id_to_idx[im_id], old_to_new_kpt[im_id][kpt_id]]
for im_id, kpt_id in zip(pt.image_ids, pt.point2D_idxs)],
dtype=np.int32)
self.hloc.add3dPoint(
pt.xyz.astype(np.float32).copy(), observations.copy())
self.hloc.buildIndex()
def localize(self, query_info, query_item, global_transf, local_transf):
global_desc = global_transf(query_item.global_desc[np.newaxis])[0]
local_desc = local_transf(query_item.local_desc)
keypoints = cv2.undistortPoints(
query_item.keypoints[np.newaxis], query_info.K,
np.array([query_info.dist, 0, 0, 0]))[0]
logging.info('Localizing image %s', query_info.name)
ret = self.cpp_backend.localize(
global_desc.astype(np.float32),
keypoints.astype(np.float32).T.copy(),
local_desc.astype(np.float32).T.copy())
(success, num_components_total, num_components_tested,
last_component_size, num_db_landmarks, num_matches,
num_inliers, num_iters, global_ms, covis_ms, local_ms, pnp_ms) = ret
result = LocResult(success, num_inliers, 0, np.eye(4))
stats = {
'success': success,
'num_components_total': num_components_total,
'num_components_tested': num_components_tested,
'last_component_size': last_component_size,
'num_db_landmarks': num_db_landmarks,
'num_matches': num_matches,
'num_inliers': num_inliers,
'num_ransac_iters': num_iters,
'timings': {
'global': global_ms,
'covis': covis_ms,
'local': local_ms,
'pnp': pnp_ms,
}
}
return (result, stats)
|
supriya/ugens/gendyn.py | butayama/supriya | 191 | 12710087 | <reponame>butayama/supriya<gh_stars>100-1000
import collections
from supriya import CalculationRate
from supriya.synthdefs import UGen
class Gendy1(UGen):
"""
A dynamic stochastic synthesis generator.
::
>>> gendy_1 = supriya.ugens.Gendy1.ar(
... adparam=1,
... ampdist=1,
... ampscale=0.5,
... ddparam=1,
... durdist=1,
... durscale=0.5,
... init_cps=12,
... knum=10,
... maxfrequency=660,
... minfrequency=440,
... )
>>> gendy_1
Gendy1.ar()
"""
### CLASS VARIABLES ###
_ordered_input_names = collections.OrderedDict(
[
("ampdist", 1),
("durdist", 1),
("adparam", 1),
("ddparam", 1),
("minfrequency", 440),
("maxfrequency", 660),
("ampscale", 0.5),
("durscale", 0.5),
("init_cps", 12),
("knum", None),
]
)
_valid_calculation_rates = (CalculationRate.AUDIO, CalculationRate.CONTROL)
### INITIALIZER ###
def __init__(
self,
calculation_rate=None,
adparam=1,
ampdist=1,
ampscale=0.5,
ddparam=1,
durdist=1,
durscale=0.5,
init_cps=12,
knum=None,
maxfrequency=660,
minfrequency=440,
):
if knum is None:
knum = init_cps
UGen.__init__(
self,
calculation_rate=calculation_rate,
adparam=adparam,
ampdist=ampdist,
ampscale=ampscale,
ddparam=ddparam,
durdist=durdist,
durscale=durscale,
init_cps=init_cps,
knum=knum,
maxfrequency=maxfrequency,
minfrequency=minfrequency,
)
class Gendy2(UGen):
"""
A dynamic stochastic synthesis generator.
::
>>> gendy_2 = supriya.ugens.Gendy2.ar(
... a=1.17,
... adparam=1,
... ampdist=1,
... ampscale=0.5,
... c=0.31,
... ddparam=1,
... durdist=1,
... durscale=0.5,
... init_cps=12,
... knum=10,
... maxfrequency=660,
... minfrequency=440,
... )
>>> gendy_2
Gendy2.ar()
"""
_ordered_input_names = collections.OrderedDict(
[
("ampdist", 1),
("durdist", 1),
("adparam", 1),
("ddparam", 1),
("minfrequency", 440),
("maxfrequency", 660),
("ampscale", 0.5),
("durscale", 0.5),
("init_cps", 12),
("knum", None),
("a", 1.17),
("c", 0.31),
]
)
_valid_calculation_rates = (CalculationRate.AUDIO, CalculationRate.CONTROL)
class Gendy3(UGen):
"""
A dynamic stochastic synthesis generator.
::
>>> gendy_3 = supriya.ugens.Gendy3.ar(
... adparam=1,
... ampdist=1,
... ampscale=0.5,
... ddparam=1,
... durdist=1,
... durscale=0.5,
... frequency=440,
... init_cps=12,
... knum=10,
... )
>>> gendy_3
Gendy3.ar()
"""
_ordered_input_names = collections.OrderedDict(
[
("ampdist", 1),
("durdist", 1),
("adparam", 1),
("ddparam", 1),
("frequency", 440),
("ampscale", 0.5),
("durscale", 0.5),
("init_cps", 12),
("knum", None),
]
)
_valid_calculation_rates = (CalculationRate.AUDIO, CalculationRate.CONTROL)
|
scripts/submit.py | andrewmzhang/displayadv | 120 | 12710120 | <filename>scripts/submit.py
'''
@author: <NAME>
'''
import pandas as pd
import sys
import numpy as np
import gzip
df = pd.read_csv(sys.stdin)
p = 0.55 * df.p1 + 0.15 * df.p2 + 0.15 * df.p3 + 0.15 * df.p4
df['Predicted'] = prob = 1.0 / (1.0 + np.exp(-p))
submission = 'submission.cvs.gz'
print('saving to', submission, '...')
with gzip.open(submission, 'wt') as f:
df[['Id', 'Predicted']].to_csv(f, index=False)
|
tests/py/test_privacy_json.py | kant/gratipay.com | 517 | 12710127 | from __future__ import print_function, unicode_literals
from aspen import json
from gratipay.testing import Harness
class Tests(Harness):
def setUp(self):
Harness.setUp(self)
self.make_participant('alice', claimed_time='now')
def hit_privacy(self, method='GET', expected_code=200, **kw):
response = self.client.hit(method, "/~alice/privacy.json", auth_as='alice', **kw)
if response.code != expected_code:
print(response.body)
return response
def test_participant_can_get_their_privacy_settings(self):
response = self.hit_privacy('GET')
actual = json.loads(response.body)
assert actual == {
'is_searchable': True,
'anonymous_giving': False,
}
def test_participant_can_toggle_is_searchable(self):
response = self.hit_privacy('POST', data={'toggle': 'is_searchable'})
actual = json.loads(response.body)
assert actual['is_searchable'] is False
def test_participant_can_toggle_is_searchable_back(self):
response = self.hit_privacy('POST', data={'toggle': 'is_searchable'})
response = self.hit_privacy('POST', data={'toggle': 'is_searchable'})
actual = json.loads(response.body)
assert actual['is_searchable'] is True
def test_participant_can_toggle_anonymous_giving(self):
response = self.hit_privacy('POST', data={'toggle': 'anonymous_giving'})
actual = json.loads(response.body)
assert actual['anonymous_giving'] is True
def test_participant_can_toggle_anonymous_giving_back(self):
response = self.hit_privacy('POST', data={'toggle': 'anonymous_giving'})
response = self.hit_privacy('POST', data={'toggle': 'anonymous_giving'})
actual = json.loads(response.body)['anonymous_giving']
assert actual is False
# Related to is-searchable
def test_meta_robots_tag_added_on_opt_out(self):
self.hit_privacy('POST', data={'toggle': 'is_searchable'})
expected = '<meta name="robots" content="noindex,nofollow" />'
assert expected in self.client.GET("/~alice/").body
def test_participant_does_show_up_on_search(self):
assert 'alice' in self.client.GET("/search?q=alice").body
def test_participant_doesnt_show_up_on_search(self):
self.hit_privacy('POST', data={'toggle': 'is_searchable'})
assert 'alice' not in self.client.GET("/search.json?q=alice").body
# Related to anonymous_giving
def test_anon_can_see_giving_for_non_anonymous_giving(self):
self.make_participant('bob', claimed_time='now',
giving=10.79, ngiving_to=342, anonymous_giving=False)
assert '10.79' in self.client.GET('/~bob/').body
assert '342' in self.client.GET('/~bob/').body
def test_auth_can_see_giving_for_non_anonymous_giving(self):
self.make_participant('bob', claimed_time='now',
giving=10.79, ngiving_to=342, anonymous_giving=False)
assert '10.79' in self.client.GET('/~bob/', auth_as='alice').body
assert '342' in self.client.GET('/~bob/', auth_as='alice').body
def test_admin_can_see_giving_for_non_anonymous_giving(self):
self.make_participant('bob', claimed_time='now',
giving=10.79, ngiving_to=342, anonymous_giving=False)
self.make_participant('admin', is_admin=True)
assert '10.79' in self.client.GET('/~bob/', auth_as='admin').body
assert '342' in self.client.GET('/~bob/', auth_as='admin').body
assert '[342]' not in self.client.GET('/~bob/', auth_as='admin').body
def test_self_can_see_giving_for_non_anonymous_giving(self):
self.make_participant('bob', claimed_time='now',
giving=10.79, ngiving_to=342, anonymous_giving=False)
assert '10.79' in self.client.GET('/~bob/', auth_as='bob').body.decode('utf8')
assert '342' in self.client.GET('/~bob/', auth_as='bob').body.decode('utf8')
assert '[342]' not in self.client.GET('/~bob/', auth_as='bob').body.decode('utf8')
def test_anon_cannot_see_giving_for_anonymous_giving(self):
self.make_participant('bob', claimed_time='now',
giving=10.79, ngiving_to=342, anonymous_giving=True)
assert '10.79' not in self.client.GET('/~bob/').body
assert '342' not in self.client.GET('/~bob/').body
def test_auth_cannot_see_giving_for_anonymous_giving(self):
self.make_participant('bob', claimed_time='now',
giving=10.79, ngiving_to=342, anonymous_giving=True)
assert '10.79' not in self.client.GET('/~bob/', auth_as='alice').body
assert '342' not in self.client.GET('/~bob/', auth_as='alice').body
def test_admin_can_see_giving_for_anonymous_giving(self):
self.make_participant('bob', claimed_time='now',
giving=10.79, ngiving_to=342, anonymous_giving=True)
self.make_participant('admin', is_admin=True)
assert '10.79' in self.client.GET('/~bob/', auth_as='admin').body
assert '[342]' in self.client.GET('/~bob/', auth_as='admin').body
def test_self_can_see_giving_for_anonymous_giving(self):
self.make_participant('bob', claimed_time='now',
giving=10.79, ngiving_to=342, anonymous_giving=True)
assert '10.79' in self.client.GET('/~bob/', auth_as='bob').body.decode('utf8')
assert '[342]' in self.client.GET('/~bob/', auth_as='bob').body.decode('utf8')
|
DPGAnalysis/Skims/python/EGPDSkim_cfg.py | ckamtsikis/cmssw | 852 | 12710142 | import FWCore.ParameterSet.Config as cms
process = cms.Process("SKIM")
process.configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('$Revision: 1.4 $'),
name = cms.untracked.string('$Source: /cvs/CMSSW/CMSSW/DPGAnalysis/Skims/python/EGPDSkim_cfg.py,v $'),
annotation = cms.untracked.string('EGamma skim')
)
#
#
# This is for testing purposes.
#
#
##run143960
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'/store/data/Run2010A/EG/RECO/v4/000/143/960/84DEE17A-44B1-DF11-B844-001D09F29849.root'
),
secondaryFileNames = cms.untracked.vstring(
'/store/data/Run2010A/EG/RAW/v1/000/143/960/C40C9318-0FB1-DF11-A974-0030487CBD0A.root')
)
process.source.inputCommands = cms.untracked.vstring("keep *", "drop *_MEtoEDMConverter_*_*")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1000)
)
#------------------------------------------
# Load standard sequences.
#------------------------------------------
process.load('Configuration/StandardSequences/MagneticField_AutoFromDBCurrent_cff')
process.load('Configuration/StandardSequences/GeometryIdeal_cff')
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.GlobalTag.globaltag = 'GR10_P_V8::All'
process.load("Configuration/StandardSequences/RawToDigi_Data_cff")
process.load("Configuration/StandardSequences/Reconstruction_cff")
process.load('Configuration/EventContent/EventContent_cff')
#drop collections created on the fly
process.FEVTEventContent.outputCommands.append("drop *_MEtoEDMConverter_*_*")
process.FEVTEventContent.outputCommands.append("drop *_*_*_SKIM")
#
# Load common sequences
#
process.load('L1TriggerConfig.L1GtConfigProducers.L1GtTriggerMaskAlgoTrigConfig_cff')
process.load('L1TriggerConfig.L1GtConfigProducers.L1GtTriggerMaskTechTrigConfig_cff')
process.load('HLTrigger/HLTfilters/hltLevel1GTSeed_cfi')
#################################WZFilter############################################
process.hltFilter = cms.EDFilter("HLTHighLevel",
TriggerResultsTag = cms.InputTag("TriggerResults","","HLT"),
HLTPaths = cms.vstring(
# "HLT_Photon15_L1R",
# "HLT_Photon15_Cleaned_L1R",
# "HLT_Photon20_Cleaned_L1R",
"HLT_Ele15_LW_L1R",
"HLT_Ele15_SW_L1R",
"HLT_Ele15_SW_CaloEleId_L1R",
"HLT_Ele17_SW_CaloEleId_L1R",
"HLT_Ele17_SW_L1R",
"HLT_Ele17_SW_TightEleId_L1R",
"HLT_Ele17_SW_TightCaloEleId_SC8HE_L1R"
),
eventSetupPathsKey = cms.string(''),
andOr = cms.bool(True),
throw = cms.bool(False),
saveTags = cms.bool(False)
)
process.load("DPGAnalysis/Skims/WZinterestingEventFilter_cfi")
process.WZfilter = cms.Path(process.hltFilter*process.WZInterestingEventSelector)
# Output definition
process.outWZfilter = cms.OutputModule("PoolOutputModule",
# splitLevel = cms.untracked.int32(0),
outputCommands = process.FEVTEventContent.outputCommands,
fileName = cms.untracked.string('/tmp/azzi/EGMWZ_filter.root'),
dataset = cms.untracked.PSet(dataTier = cms.untracked.string('RAW-RECO'),
filterName = cms.untracked.string('EGMWZFilter')),
SelectEvents = cms.untracked.PSet(SelectEvents = cms.vstring('WZfilter')
))
#################################logerrorharvester############################################
process.load("FWCore.Modules.logErrorFilter_cfi")
from Configuration.StandardSequences.RawToDigi_Data_cff import gtEvmDigis
process.gtEvmDigis = gtEvmDigis.clone()
process.stableBeam = cms.EDFilter("HLTBeamModeFilter",
L1GtEvmReadoutRecordTag = cms.InputTag("gtEvmDigis"),
AllowedBeamMode = cms.vuint32(11),
saveTags = cms.bool(False)
)
process.logerrorpath=cms.Path(process.gtEvmDigis+process.stableBeam+process.logErrorFilter)
process.outlogerr = cms.OutputModule("PoolOutputModule",
outputCommands = process.FEVTEventContent.outputCommands,
fileName = cms.untracked.string('/tmp/azzi/logerror_filter.root'),
dataset = cms.untracked.PSet(dataTier = cms.untracked.string('RAW-RECO'),
filterName = cms.untracked.string('Skim_logerror')),
SelectEvents = cms.untracked.PSet(SelectEvents = cms.vstring("logerrorpath")
))
#======================
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(True)
)
process.outpath = cms.EndPath(process.outlogerr+process.outWZfilter)
|
examples/modify_header_example.py | pbsds/sanic | 1,883 | 12710159 | """
Modify header or status in response
"""
from sanic import Sanic, response
app = Sanic("Example")
@app.route("/")
def handle_request(request):
return response.json(
{"message": "Hello world!"},
headers={"X-Served-By": "sanic"},
status=200,
)
@app.route("/unauthorized")
def handle_request(request):
return response.json(
{"message": "You are not authorized"},
headers={"X-Served-By": "sanic"},
status=404,
)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8000, debug=True)
|
configs/det/common/mstrain_3x_coco_panoptic.py | yinchimaoliang/K-Net | 361 | 12710175 | _base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoPanopticDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
# file_client_args = dict(backend='disk',)
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(
backend='memcached',
server_list_cfg='/mnt/lustre/share/memcached_client/server_list.conf',
client_cfg='/mnt/lustre/share/memcached_client/client.conf',
sys_path='/mnt/lustre/share/pymc/py3',
)
# In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)],
# multiscale_mode='range'
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(
type='LoadPanopticAnnotations',
with_bbox=True,
with_mask=True,
with_seg=True,
file_client_args=file_client_args),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 800)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(
type='Collect',
keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
# Use RepeatDataset to speed up training
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type='RepeatDataset',
times=3,
dataset=dict(
type=dataset_type,
ann_file=data_root + 'annotations/panoptic_train2017.json',
img_prefix=data_root + 'train2017/',
seg_prefix=data_root + 'annotations/panoptic_train2017/',
pipeline=train_pipeline)),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/panoptic_val2017.json',
img_prefix=data_root + 'val2017/',
seg_prefix=data_root + 'annotations/panoptic_val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/panoptic_val2017.json',
img_prefix=data_root + 'val2017/',
seg_prefix=data_root + 'annotations/panoptic_val2017/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric=['pq'])
# optimizer
# this is different from the original 1x schedule that use SGD
optimizer = dict(
type='AdamW',
lr=0.0001,
weight_decay=0.05,
paramwise_cfg=dict(custom_keys={'backbone': dict(lr_mult=0.25)}))
optimizer_config = dict(grad_clip=dict(max_norm=1, norm_type=2))
# learning policy
# Experiments show that using step=[9, 11] has higher performance
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=1000,
warmup_ratio=0.001,
step=[9, 11])
runner = dict(type='EpochBasedRunner', max_epochs=12)
|
loader/transforms.py | mmendiet/gate-decorator-pruning | 158 | 12710220 | """
pytorch (0.3.1) miss some transforms, will be removed after official support.
"""
import torch
import numpy as np
from PIL import Image
import torchvision.transforms.functional as F
import torch.nn.functional as Func
import random
imagenet_pca = {
'eigval': np.asarray([0.2175, 0.0188, 0.0045]),
'eigvec': np.asarray([
[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203],
])
}
class Lighting(object):
def __init__(self, alphastd,
eigval=imagenet_pca['eigval'],
eigvec=imagenet_pca['eigvec']):
self.alphastd = alphastd
assert eigval.shape == (3,)
assert eigvec.shape == (3, 3)
self.eigval = eigval
self.eigvec = eigvec
def __call__(self, img):
if self.alphastd == 0.:
return img
rnd = np.random.randn(3) * self.alphastd
rnd = rnd.astype('float32')
v = rnd
old_dtype = np.asarray(img).dtype
v = v * self.eigval
v = v.reshape((3, 1))
inc = np.dot(self.eigvec, v).reshape((3,))
img = np.add(img, inc)
if old_dtype == np.uint8:
img = np.clip(img, 0, 255)
img = Image.fromarray(img.astype(old_dtype), 'RGB')
return img
def __repr__(self):
return self.__class__.__name__ + '()'
class InputList(object):
def __init__(self, scales):
self.scales = scales
def __call__(self, img):
# assert img.size[0] == self.scales[0], 'image shape should be equal to max scale'
# input_list = []
# for i in range(len(self.scales)):
# input_list.append(F.resize(img, self.scales[i]))
assert img.size()[1] == self.scales[0], 'image shape should be equal to max scale'
input_list = []
img = img[np.newaxis, :]
for i in range(len(self.scales)):
resized_img = Func.interpolate(img, (self.scales[i], self.scales[i]), mode='bilinear', align_corners=True)
resized_img = torch.squeeze(resized_img)
input_list.append(resized_img)
return input_list
class ListToTensor(object):
def __call__(self, input_list):
tensor_list = []
for i in range(len(input_list)):
pic = input_list[i]
tensor_list.append(F.to_tensor(pic).detach())
return tensor_list
class ListNormalize(object):
def __init__(self, mean, std, inplace=False):
self.mean = mean
self.std = std
self.inplace = inplace
def __call__(self, tensor_list):
norm_list = []
for i in range(len(tensor_list)):
norm_list.append(F.normalize(tensor_list[i], self.mean, self.std, self.inplace))
return norm_list |
app/controllers/dns/zones.py | grepleria/SnitchDNS | 152 | 12710227 | <reponame>grepleria/SnitchDNS
from . import bp
from flask_login import current_user, login_required
from flask import render_template, redirect, url_for, flash, request, send_file
from app.lib.base.provider import Provider
@bp.route('/', methods=['GET'])
@login_required
def index():
results_per_page = 20
provider = Provider()
zones = provider.dns_zones()
tags = provider.tags()
search = request.args.get('search', '').strip()
search_tags = request.args.getlist('tags')
page = int(request.args.get('page', 1))
if page <= 0:
page = 1
user_id = None if current_user.admin else current_user.id
page_url = 'tags=' + '&tags='.join(search_tags)
page_url += "&search={0}&page=".format(search)
return render_template(
'dns/zones/index.html',
zones=zones.get_user_zones_paginated(user_id, order_by='domain', page=page, per_page=results_per_page, search=search, tags=search_tags),
page=page,
per_page=results_per_page,
page_url=page_url,
search=search,
search_tags=search_tags,
tags=tags.all(user_id=user_id, order_by='asc', order_column='name')
)
@bp.route('/<int:dns_zone_id>/view', methods=['GET'])
@login_required
def zone_view(dns_zone_id):
provider = Provider()
zones = provider.dns_zones()
records = provider.dns_records()
if not zones.can_access(dns_zone_id, current_user.id):
flash('Access Denied', 'error')
return redirect(url_for('home.index'))
zone = zones.get(dns_zone_id)
if not zone:
flash('Zone not found', 'error')
return redirect(url_for('home.index'))
return render_template(
'dns/zones/view.html',
zone=zone,
records=records.get_zone_records(dns_zone_id, order_column='type'),
section='records',
tab='records'
)
@bp.route('/<int:dns_zone_id>/edit', methods=['GET'])
@login_required
def zone_edit(dns_zone_id):
provider = Provider()
zones = provider.dns_zones()
tags = provider.tags()
zone = None
dns_zone_id = 0 if dns_zone_id < 0 else dns_zone_id
if dns_zone_id > 0:
if not zones.can_access(dns_zone_id, current_user.id):
flash('Access Denied', 'error')
return redirect(url_for('home.index'))
zone = zones.get(dns_zone_id)
if not zone:
flash('Zone not found', 'error')
return redirect(url_for('home.index'))
username = current_user.username if zone is None else zone.username
user_id = zone.user_id if dns_zone_id > 0 else current_user.id
return render_template(
'dns/zones/edit.html',
dns_zone_id=dns_zone_id,
user_domain=zones.get_user_base_domain(username),
zone=zone,
tags=tags.all(user_id=user_id, order_column='name', order_by='asc')
)
@bp.route('/<int:dns_zone_id>/edit/save', methods=['POST'])
@login_required
def zone_edit_save(dns_zone_id):
dns_zone_id = 0 if dns_zone_id < 0 else dns_zone_id
return __zone_create() if dns_zone_id == 0 else __zone_update(dns_zone_id)
@bp.route('/<int:dns_zone_id>/delete', methods=['POST'])
@login_required
def zone_delete(dns_zone_id):
provider = Provider()
zones = provider.dns_zones()
if not zones.can_access(dns_zone_id, current_user.id):
flash('Access Denied', 'error')
return redirect(url_for('home.index'))
zone = zones.get(dns_zone_id)
if not zone:
flash('Could not get zone', 'error')
return redirect(url_for('dns.index'))
elif zone.master:
flash('You cannot delete a master zone', 'error')
return redirect(url_for('dns.index'))
# Not using the instance's .delete() attribute because we first need to delete all child records.
if not zones.delete(dns_zone_id):
flash('Could not delete zone', 'error')
return redirect(url_for('dns.index'))
flash('Zone deleted', 'success')
return redirect(url_for('dns.index'))
@bp.route('/delete', methods=['POST'])
@login_required
def zone_group_delete():
provider = Provider()
zones = provider.dns_zones()
search = request.form['search'].strip()
search_tags = request.form['tags'].strip().split(',')
zones.group_delete(current_user.id, search=search, tags=search_tags)
flash('Zone(s) deleted', 'success')
return redirect(url_for('dns.index'))
def __zone_create():
provider = Provider()
zones = provider.dns_zones()
dns_zone_id = 0
domain = request.form['domain'].strip().lower()
active = True if int(request.form.get('active', 0)) == 1 else False
catch_all = True if int(request.form.get('catch_all', 0)) == 1 else False
forwarding = True if int(request.form.get('forwarding', 0)) == 1 else False
regex = True if int(request.form.get('regex', 0)) == 1 else False
tags = request.form.getlist('tags')
zone = zones.new(domain, active, catch_all, forwarding, regex, current_user.id, update_old_logs=True)
if isinstance(zone, list):
for error in zone:
flash(error, 'error')
return redirect(url_for('dns.zone_edit', dns_zone_id=dns_zone_id))
zone = zones.save_tags(zone, tags)
if not zone:
flash('Could not save zone tags', 'error')
return redirect(url_for('dns.zone_edit', dns_zone_id=dns_zone_id))
flash('Zone created', 'success')
return redirect(url_for('dns.zone_view', dns_zone_id=zone.id))
def __zone_update(dns_zone_id):
provider = Provider()
zones = provider.dns_zones()
if not zones.can_access(dns_zone_id, current_user.id):
flash('Access Denied', 'error')
return redirect(url_for('home.index'))
zone = zones.get(dns_zone_id)
if not zone:
flash('Could not get zone', 'error')
return redirect(url_for('dns.zone_edit', dns_zone_id=dns_zone_id))
domain = request.form['domain'].strip().lower() if not zone.master else zone.domain
active = True if int(request.form.get('active', 0)) == 1 else False
catch_all = True if int(request.form.get('catch_all', 0)) == 1 else False
forwarding = True if int(request.form.get('forwarding', 0)) == 1 else False
regex = True if int(request.form.get('regex', 0)) == 1 else False
tags = request.form.getlist('tags')
if len(domain) == 0:
flash('Invalid domain', 'error')
return redirect(url_for('dns.zone_edit', dns_zone_id=dns_zone_id))
if zones.has_duplicate(dns_zone_id, domain):
flash('This domain already exists.', 'error')
return redirect(url_for('dns.zone_edit', dns_zone_id=dns_zone_id))
zone = zones.update(zone.id, domain, active, catch_all, forwarding, regex, zone.user_id, master=zone.master, update_old_logs=True)
if isinstance(zone, list):
for error in zone:
flash(error, 'error')
return redirect(url_for('dns.zone_edit', dns_zone_id=dns_zone_id))
zone = zones.save_tags(zone, tags)
if not zone:
flash('Could not save zone tags', 'error')
return redirect(url_for('dns.zone_edit', dns_zone_id=dns_zone_id))
flash('Zone saved', 'success')
return redirect(url_for('dns.zone_view', dns_zone_id=zone.id))
@bp.route('/create/log/<int:query_log_id>', methods=['POST'])
@login_required
def zone_create_from_log(query_log_id):
provider = Provider()
logging = provider.dns_logs()
zones = provider.dns_zones()
log = logging.get(query_log_id)
if not log:
flash('Could not retrieve log record', 'error')
return redirect(url_for('home.index'))
if log.dns_zone_id > 0:
# This means that the zone exists.
if not zones.can_access(log.dns_zone_id, current_user.id):
# This error is misleading on purpose to prevent zone enumeration. Not that it's important by meh.
flash('Could not retrieve log record', 'error')
return redirect(url_for('home.index'))
flash('Zone already exists', 'error')
return redirect(url_for('dns.zone_view', dns_zone_id=log.dns_zone_id))
zone = zones.new(log.domain, True, False, False, False, current_user.id, update_old_logs=True)
if isinstance(zone, list):
for error in zone:
flash(error, 'error')
return redirect(url_for('dns.zone_edit', dns_zone_id=0))
flash('Zone created', 'success')
return redirect(url_for('dns.zone_view', dns_zone_id=zone.id))
@bp.route('/export', methods=['POST'])
@login_required
def zones_export():
provider = Provider()
zones = provider.dns_zones()
search = request.form['search'].strip()
search_tags = request.form['tags'].strip().split(',')
result = zones.export(user_id=current_user.id, export_zones=True, export_records=True, compress_export=True, search=search, tags=search_tags)
if not result:
flash('Could not generate export file.', 'error')
return redirect(url_for('dns.index'))
# And download.
return send_file(result['zip'], attachment_filename='snitch_export.zip', as_attachment=True)
|
flownmt/modules/priors/prior.py | DeNeutoy/flowseq | 256 | 12710258 | <reponame>DeNeutoy/flowseq
import math
from typing import Dict, Tuple, Union
import torch
import torch.nn as nn
from flownmt.flows.nmt import NMTFlow
from flownmt.modules.priors.length_predictors import LengthPredictor
class Prior(nn.Module):
"""
class for Prior with a NMTFlow inside
"""
_registry = dict()
def __init__(self, flow: NMTFlow, length_predictor: LengthPredictor):
super(Prior, self).__init__()
assert flow.inverse, 'prior flow should have inverse mode'
self.flow = flow
self.length_unit = max(2, 2 ** (self.flow.levels - 1))
self.features = self.flow.features
self._length_predictor = length_predictor
self._length_predictor.set_length_unit(self.length_unit)
def sync(self):
self.flow.sync()
def predict_length(self, ctx: torch.Tensor, src_mask: torch.Tensor, topk: int = 1) -> Tuple[torch.LongTensor, torch.Tensor]:
"""
Args:
ctx: Tensor
tensor [batch, features]
src_mask: Tensor
tensor for source mask [batch, src_length]
topk: int (default 1)
return top k length candidates for each src sentence
Returns: LongTensor1, Tensor2
LongTensor1: tensor for lengths [batch, topk]
Tensor2: log probs for each length [batch, topk]
"""
return self._length_predictor.predict(ctx, src_mask, topk=topk)
def length_loss(self, ctx: torch.Tensor, src_mask: torch.Tensor, tgt_mask: torch.Tensor) -> torch.Tensor:
"""
Args:
ctx: Tensor
tensor [batch, features]
src_mask: Tensor
tensor for source mask [batch, src_length]
tgt_mask: Tensor
tensor for target mask [batch, tgt_length]
Returns: Tensor
tensor for loss [batch]
"""
return self._length_predictor.loss(ctx, src_mask, tgt_mask)
def decode(self, epsilon: torch.Tensor, tgt_mask: torch.Tensor,
src: torch.Tensor, src_mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
epsilon: Tensor
epslion [batch, tgt_length, nz]
tgt_mask: Tensor
tensor of target masks [batch, tgt_length]
src: Tensor
source encoding [batch, src_length, hidden_size]
src_mask: Tensor
tensor of source masks [batch, src_length]
Returns: Tensor1, Tensor2
Tensor1: decoded latent code z [batch, tgt_length, nz]
Tensor2: log probabilities [batch]
"""
# [batch, tgt_length, nz]
z, logdet = self.flow.fwdpass(epsilon, tgt_mask, src, src_mask)
# [batch, tgt_length, nz]
log_probs = epsilon.mul(epsilon) + math.log(math.pi * 2.0)
# apply mask
log_probs = log_probs.mul(tgt_mask.unsqueeze(2))
# [batch]
log_probs = log_probs.view(z.size(0), -1).sum(dim=1).mul(-0.5) + logdet
return z, log_probs
def sample(self, nlengths: int, nsamples: int, src: torch.Tensor,
ctx: torch.Tensor, src_mask: torch.Tensor,
tau=0.0, include_zero=False) -> Tuple[Tuple[torch.Tensor, torch.Tensor, torch.Tensor], Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]:
"""
Args:
nlengths: int
number of lengths per sentence
nsamples: int
number of samples per sentence per length
src: Tensor
source encoding [batch, src_length, hidden_size]
ctx: Tensor
tensor for global state [batch, hidden_size]
src_mask: Tensor
tensor of masks [batch, src_length]
tau: float (default 0.0)
temperature of density
include_zero: bool (default False)
include zero sample
Returns: (Tensor1, Tensor2, Tensor3), (Tensor4, Tensor5), (Tensor6, Tensor7, Tensor8)
Tensor1: samples from the prior [batch * nlengths * nsamples, tgt_length, nz]
Tensor2: log probabilities [batch * nlengths * nsamples]
Tensor3: target masks [batch * nlengths * nsamples, tgt_length]
Tensor4: lengths [batch * nlengths]
Tensor5: log probabilities of lengths [batch * nlengths]
Tensor6: source encoding with shape [batch * nlengths * nsamples, src_length, hidden_size]
Tensor7: tensor for global state [batch * nlengths * nsamples, hidden_size]
Tensor8: source masks with shape [batch * nlengths * nsamples, src_length]
"""
batch = src.size(0)
batch_nlen = batch * nlengths
# [batch, nlenths]
lengths, log_probs_length = self.predict_length(ctx, src_mask, topk=nlengths)
# [batch * nlengths]
log_probs_length = log_probs_length.view(-1)
lengths = lengths.view(-1)
max_length = lengths.max().item()
# [batch * nlengths, max_length]
tgt_mask = torch.arange(max_length).to(src.device).unsqueeze(0).expand(batch_nlen, max_length).lt(lengths.unsqueeze(1)).float()
# [batch * nlengths, nsamples, tgt_length, nz]
epsilon = src.new_empty(batch_nlen, nsamples, max_length, self.features).normal_()
epsilon = epsilon.mul(tgt_mask.view(batch_nlen, 1, max_length, 1)) * tau
if include_zero:
epsilon[:, 0].zero_()
# [batch * nlengths * nsamples, tgt_length, nz]
epsilon = epsilon.view(-1, max_length, self.features)
if nsamples * nlengths > 1:
# [batch, nlengths * nsamples, src_length, hidden_size]
src = src.unsqueeze(1) + src.new_zeros(batch, nlengths * nsamples, *src.size()[1:])
# [batch * nlengths * nsamples, src_length, hidden_size]
src = src.view(batch_nlen * nsamples, *src.size()[2:])
# [batch, nlengths * nsamples, hidden_size]
ctx = ctx.unsqueeze(1) + ctx.new_zeros(batch, nlengths * nsamples, ctx.size(1))
# [batch * nlengths * nsamples, hidden_size]
ctx = ctx.view(batch_nlen * nsamples, ctx.size(2))
# [batch, nlengths * nsamples, src_length]
src_mask = src_mask.unsqueeze(1) + src_mask.new_zeros(batch, nlengths * nsamples, src_mask.size(1))
# [batch * nlengths * nsamples, src_length]
src_mask = src_mask.view(batch_nlen * nsamples, src_mask.size(2))
# [batch * nlengths, nsamples, tgt_length]
tgt_mask = tgt_mask.unsqueeze(1) + tgt_mask.new_zeros(batch_nlen, nsamples, tgt_mask.size(1))
# [batch * nlengths * nsamples, tgt_length]
tgt_mask = tgt_mask.view(batch_nlen * nsamples, tgt_mask.size(2))
# [batch * nlength * nsamples, tgt_length, nz]
z, log_probs = self.decode(epsilon, tgt_mask, src, src_mask)
return (z, log_probs, tgt_mask), (lengths, log_probs_length), (src, ctx, src_mask)
def log_probability(self, z: torch.Tensor, tgt_mask: torch.Tensor,
src: torch.Tensor, ctx: torch.Tensor, src_mask: torch.Tensor,
length_loss: bool = True) -> Tuple[torch.Tensor, Union[torch.Tensor, None]]:
"""
Args:
z: Tensor
tensor of latent code [batch, length, nz]
tgt_mask: Tensor
tensor of target masks [batch, length]
src: Tensor
source encoding [batch, src_length, hidden_size]
ctx: Tensor
tensor for global state [batch, hidden_size]
src_mask: Tensor
tensor of source masks [batch, src_length]
length_loss: bool (default True)
compute loss of length
Returns: Tensor1, Tensor2
Tensor1: log probabilities of z [batch]
Tensor2: length loss [batch]
"""
# [batch]
loss_length = self.length_loss(ctx, src_mask, tgt_mask) if length_loss else None
# [batch, length, nz]
epsilon, logdet = self.flow.bwdpass(z, tgt_mask, src, src_mask)
# [batch, tgt_length, nz]
log_probs = epsilon.mul(epsilon) + math.log(math.pi * 2.0)
# apply mask
log_probs = log_probs.mul(tgt_mask.unsqueeze(2))
log_probs = log_probs.view(z.size(0), -1).sum(dim=1).mul(-0.5) + logdet
return log_probs, loss_length
def init(self, z, tgt_mask, src, src_mask, init_scale=1.0):
return self.flow.bwdpass(z, tgt_mask, src, src_mask, init=True, init_scale=init_scale)
@classmethod
def register(cls, name: str):
Prior._registry[name] = cls
@classmethod
def by_name(cls, name: str):
return Prior._registry[name]
@classmethod
def from_params(cls, params: Dict) -> "Prior":
flow_params = params.pop('flow')
flow = NMTFlow.from_params(flow_params)
predictor_params = params.pop('length_predictor')
length_predictor = LengthPredictor.by_name(predictor_params.pop('type')).from_params(predictor_params)
return Prior(flow, length_predictor)
Prior.register('normal')
|
courses/machine_learning/deepdive2/production_ml/labs/samples/contrib/azure-samples/databricks-pipelines/databricks_secretscope_pipeline.py | memeyankm/training-data-analyst | 6,140 | 12710313 | <filename>courses/machine_learning/deepdive2/production_ml/labs/samples/contrib/azure-samples/databricks-pipelines/databricks_secretscope_pipeline.py
"""Create a new secret scope in Databricks."""
import kfp.dsl as dsl
import kfp.compiler as compiler
import databricks
def create_secretscope(
scope_name,
string_secret,
byte_secret,
ref_secret_name,
ref_secret_key,
principal_name):
return databricks.CreateSecretScopeOp(
name="createsecretscope",
scope_name=scope_name,
initial_manage_principal="users",
secrets=[
{
"key": "string-secret",
"string_value": string_secret
},
{
"key": "byte-secret",
"byte_value": byte_secret
},
{
"key": "ref-secret",
"value_from": {
"secret_key_ref": {
"name": ref_secret_name,
"key": ref_secret_key
}
}
}
],
acls=[
{
"principal": principal_name,
"permission": "READ"
}
]
)
def delete_secretscope(scope_name):
return databricks.DeleteSecretScopeOp(
name="deletesecretscope",
scope_name=scope_name
)
@dsl.pipeline(
name="DatabricksSecretScope",
description="A toy pipeline that sets some secrets and acls in an Azure Databricks Secret Scope."
)
def calc_pipeline(
scope_name="test-secretscope",
string_secret="helloworld",
byte_secret="aGVsbG93b3JsZA==",
ref_secret_name="mysecret",
ref_secret_key="username",
principal_name="<EMAIL>"
):
create_secretscope_task = create_secretscope(
scope_name,
string_secret,
byte_secret,
ref_secret_name,
ref_secret_key,
principal_name)
delete_secretscope_task = delete_secretscope(scope_name)
delete_secretscope_task.after(create_secretscope_task)
if __name__ == "__main__":
compiler.Compiler()._create_and_write_workflow(
pipeline_func=calc_pipeline,
package_path=__file__ + ".tar.gz")
|
rest-service/manager_rest/rest/requests_schema.py | TS-at-WS/cloudify-manager | 124 | 12710324 | <gh_stars>100-1000
#########
# Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from flask_restful import fields
from flask_restful_swagger import swagger
@swagger.model
class ExecutionRequest(object):
resource_fields = {
'workflow_id': fields.String,
'parameters': fields.Raw,
'allow_custom_parameters': fields.Boolean,
'force': fields.Boolean
}
@swagger.model
class DeploymentRequest(object):
resource_fields = {
'blueprint_id': fields.String,
}
@swagger.model
class DeploymentModificationRequest(object):
resource_fields = {
'stage': fields.String,
'nodes': fields.Raw,
}
@swagger.model
class ModifyExecutionRequest(object):
resource_fields = {
'action': fields.String
}
@swagger.model
class PostProviderContextRequest(object):
resource_fields = {
'name': fields.String,
'context': fields.Raw
}
@swagger.model
class EvaluateFunctionsRequest(object):
resource_fields = {
'deployment_id': fields.String,
'context': fields.Raw,
'payload': fields.Raw
}
|
hearthbreaker/cards/minions/mage.py | souserge/hearthbreaker | 429 | 12710328 | <reponame>souserge/hearthbreaker<filename>hearthbreaker/cards/minions/mage.py
import hearthbreaker.cards
from hearthbreaker.cards.base import MinionCard
from hearthbreaker.constants import CHARACTER_CLASS, CARD_RARITY, MINION_TYPE
from hearthbreaker.game_objects import Minion
from hearthbreaker.tags.action import AddCard, Give, GiveAura, Damage
from hearthbreaker.tags.base import Effect, Aura, Battlecry, AuraUntil, ActionTag
from hearthbreaker.tags.condition import HasSecret, GreaterThan, IsType, Adjacent, IsSecret, IsSpell
from hearthbreaker.tags.event import SpellCast, DidDamage, TurnEnded, CardPlayed, Drawn, CardUsed
from hearthbreaker.tags.selector import SelfSelector, PlayerSelector, TargetSelector, \
CharacterSelector, EnemyPlayer, RandomPicker, MinionSelector, Count, BothPlayer, CardSelector
from hearthbreaker.tags.status import ChangeAttack, ChangeHealth, Frozen, NoSpellTarget, ManaChange
class ManaWyrm(MinionCard):
def __init__(self):
super().__init__("Mana Wyrm", 1, CHARACTER_CLASS.MAGE, CARD_RARITY.COMMON)
def create_minion(self, player):
return Minion(1, 3, effects=[Effect(SpellCast(), ActionTag(Give(ChangeAttack(1)), SelfSelector()))])
class SorcerersApprentice(MinionCard):
def __init__(self):
super().__init__("Sorcerer's Apprentice", 2, CHARACTER_CLASS.MAGE, CARD_RARITY.COMMON)
def create_minion(self, player):
return Minion(3, 2, auras=[Aura(ManaChange(-1), CardSelector(condition=IsSpell()))])
class KirinTorMage(MinionCard):
def __init__(self):
super().__init__("Kirin Tor Mage", 3, CHARACTER_CLASS.MAGE, CARD_RARITY.RARE,
battlecry=Battlecry(GiveAura([AuraUntil(ManaChange(-100), CardSelector(condition=IsSecret()),
CardPlayed(IsSecret()))]), PlayerSelector()))
def create_minion(self, player):
return Minion(4, 3)
class EtherealArcanist(MinionCard):
def __init__(self):
super().__init__("Ethereal Arcanist", 4, CHARACTER_CLASS.MAGE, CARD_RARITY.RARE)
def create_minion(self, player):
return Minion(3, 3, effects=[Effect(TurnEnded(HasSecret()), ActionTag(Give(ChangeAttack(2)), SelfSelector())),
Effect(TurnEnded(HasSecret()), ActionTag(Give(ChangeHealth(2)), SelfSelector()))])
class Sheep(MinionCard):
def __init__(self):
super().__init__("Sheep", 0, CHARACTER_CLASS.ALL, CARD_RARITY.COMMON, False, MINION_TYPE.BEAST)
def create_minion(self, p):
return Minion(1, 1)
class WaterElemental(MinionCard):
def __init__(self):
super().__init__("Water Elemental", 4, CHARACTER_CLASS.MAGE, CARD_RARITY.COMMON)
def create_minion(self, player):
return Minion(3, 6, effects=[Effect(DidDamage(), ActionTag(Give(Frozen()), TargetSelector()))])
class ArchmageAntonidas(MinionCard):
def __init__(self):
super().__init__("Archmage Antonidas", 7, CHARACTER_CLASS.MAGE, CARD_RARITY.LEGENDARY)
def create_minion(self, player):
return Minion(5, 7, effects=[Effect(SpellCast(), ActionTag(AddCard(hearthbreaker.cards.Fireball()),
PlayerSelector()))])
class Snowchugger(MinionCard):
def __init__(self):
super().__init__("Snowchugger", 2, CHARACTER_CLASS.MAGE, CARD_RARITY.COMMON, minion_type=MINION_TYPE.MECH)
def create_minion(self, player):
return Minion(2, 3, effects=[Effect(DidDamage(), ActionTag(Give(Frozen()), TargetSelector()))])
class SpellbenderMinion(MinionCard):
def __init__(self):
super().__init__("Spellbender", 0, CHARACTER_CLASS.MAGE, CARD_RARITY.EPIC, False,
ref_name="Spellbender (minion)")
def create_minion(self, p):
return Minion(1, 3)
class MirrorImageMinion(MinionCard):
def __init__(self):
super().__init__("Mirror Image", 0, CHARACTER_CLASS.MAGE, CARD_RARITY.COMMON, False,
ref_name="Mirror Image (minion)")
def create_minion(self, p):
return Minion(0, 2, taunt=True)
class GoblinBlastmage(MinionCard):
def __init__(self):
super().__init__("Goblin Blastmage", 4, CHARACTER_CLASS.MAGE, CARD_RARITY.RARE,
battlecry=Battlecry(Damage(1), CharacterSelector(None, EnemyPlayer(), RandomPicker(4)),
GreaterThan(Count(MinionSelector(IsType(MINION_TYPE.MECH))), value=0)))
def create_minion(self, player):
return Minion(5, 4)
class SootSpewer(MinionCard):
def __init__(self):
super().__init__("Soot Spewer", 3, CHARACTER_CLASS.MAGE, CARD_RARITY.RARE, minion_type=MINION_TYPE.MECH)
def create_minion(self, player):
return Minion(3, 3, spell_damage=1)
class WeeSpellstopper(MinionCard):
def __init__(self):
super().__init__("Wee Spellstopper", 4, CHARACTER_CLASS.MAGE, CARD_RARITY.EPIC)
def create_minion(self, player):
return Minion(2, 5, auras=[Aura(NoSpellTarget(), MinionSelector(Adjacent()))])
class FlameLeviathan(MinionCard):
def __init__(self):
super().__init__("Flame Leviathan", 7, CHARACTER_CLASS.MAGE, CARD_RARITY.LEGENDARY,
minion_type=MINION_TYPE.MECH,
effects=[Effect(Drawn(), ActionTag(Damage(2), CharacterSelector(None, BothPlayer())))])
def create_minion(self, player):
return Minion(7, 7)
class Flamewaker(MinionCard):
def __init__(self):
super().__init__("Flamewaker", 3, CHARACTER_CLASS.MAGE, CARD_RARITY.RARE)
def create_minion(self, player):
return Minion(2, 4, effects=[Effect(CardUsed(IsSpell()),
ActionTag(Damage(1),
CharacterSelector(None, EnemyPlayer(), RandomPicker(2))))])
|
src/python/nimbusml/preprocessing/normalization/minmaxscaler.py | michaelgsharp/NimbusML | 134 | 12710355 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# --------------------------------------------------------------------------------------------
# - Generated by tools/entrypoint_compiler.py: do not edit by hand
"""
MinMaxScaler
"""
__all__ = ["MinMaxScaler"]
from sklearn.base import TransformerMixin
from ...base_transform import BaseTransform
from ...internal.core.preprocessing.normalization.minmaxscaler import \
MinMaxScaler as core
from ...internal.utils.utils import trace
class MinMaxScaler(core, BaseTransform, TransformerMixin):
"""
Normalizes columns as specified below.
.. remarks::
In linear classification algorithms instances are viewed as vectors
in
multi-dimensional space. Since the range of values of raw data varies
widely, some objective functions do not work properly without
normalization. For example, if one of the features has a broad range
of
values, the distances between points is governed by this particular
feature. Therefore, the range of all features should be normalized so
that each feature contributes approximately proportionately to the
final
distance. This can provide significant speedup and accuracy benefits.
In
all the linear algorithms in nimbusml (:py:class:`Logistic Regression
<nimbusml.linear_model.LogisticRegressionClassifier>`,
:py:class:`Averaged Perceptron
<nimbusml.linear_model.AveragedPerceptronBinaryClassifier>`, etc.),
the default is to normalize features before training.
``MinMaxScaler`` is the default normalizer for many `nimbusml`
algorithms
and linearly rescales every feature to the [0,1] or the [-1,1]
interval.
Rescaling to the [0,1] interval is done by shifting the values of
each
feature so that the minimal value is 0, and then dividing by the new
maximal value (which is the difference between the original maximal
and
minimal values). Rescaling to the [-1,1] interval is done by dividing
the values of each feature by the maximal absolute value of the
feature.
This method is useful for preserving the sparsity of a dataset, since
0
values do not change. The scaling method can be specified by setting
the
`fix_zero` to `False` for the first method, or setting it to `True`
for
the second method.
:param columns: a dictionary of key-value pairs, where key is the output
column name and value is the input column name.
* Multiple key-value pairs are allowed.
* Input column type: float or double or
`Vector Type </nimbusml/concepts/types#vectortype-column>`_
of floats
or doubles.
* Output column type:
`Vector Type </nimbusml/concepts/types#vectortype-column>`_.
* If the output column names are same as the input column names, then
simply specify ``columns`` as a list of strings.
The << operator can be used to set this value (see
`Column Operator </nimbusml/concepts/columns>`_)
For example
* MinMaxScaler(columns={'out1':'input1', 'out2':'input2'})
* MinMaxScaler() << {'out1':'input1', 'out2':'input2'}
For more details see `Columns </nimbusml/concepts/columns>`_.
:param fix_zero: Whether to map zero to zero, preserving sparsity.
:param max_training_examples: Max number of examples used to train the
normalizer.
:param params: Additional arguments sent to compute engine.
.. note::
*MinMaxScaler* as many other transforms requires input to be of
numeric type.
It will fail for other types. Most of the times, features are
float but a column could be unexpectedly of type string. That
explains why
the following code raises an exception.
::
in_df = pandas.DataFrame(data=dict(Sepal_Length=["2,2", 1, 2,
1]))
normed = MinMaxScaler() << [Sepal_Length']
normed.fit_transform(in_df)
The displayed message is::
'Source column 'Petal_Length' has invalid type ('TX'): Expected
R4 or R8 item type.
The input column must be converted into float or double in the
dataframe
before running the pipeline or inside the pipeline with transform
:py:class:`TypeConverter <nimbusml.preprocessing.schema.TypeConverter>`.
This transform is automatically added in case of integers.
.. seealso::
:py:class:`Binner
<nimbusml.preprocessing.normalization.Binner>`,
:py:class:`MeanVarianceScaler
<nimbusml.preprocessing.normalization.MeanVarianceScaler>`,
:py:class:`LogMeanVarianceScaler
<nimbusml.preprocessing.normalization.LogMeanVarianceScaler>`,
:py:class:`GlobalContrastRowScaler
<nimbusml.preprocessing.normalization.GlobalContrastRowScaler>`.
.. index:: normalize, preprocessing
Example:
.. literalinclude:: /../nimbusml/examples/MinMaxScaler.py
:language: python
"""
@trace
def __init__(
self,
fix_zero=True,
max_training_examples=1000000000,
columns=None,
**params):
if columns:
params['columns'] = columns
BaseTransform.__init__(self, **params)
core.__init__(
self,
fix_zero=fix_zero,
max_training_examples=max_training_examples,
**params)
self._columns = columns
def get_params(self, deep=False):
"""
Get the parameters for this operator.
"""
return core.get_params(self)
def _nodes_with_presteps(self):
"""
Inserts preprocessing before this one.
"""
from ..schema import TypeConverter
return [
TypeConverter(
result_type='R4')._steal_io(self),
self]
|
controller_manager_tests/test/cm_msgs_utils_rostest.py | matthew-reynolds/ros_control | 375 | 12710361 | #!/usr/bin/env python
# Copyright (C) 2014, PAL Robotics S.L.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of PAL Robotics S.L. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import unittest
import rospy
from controller_manager_msgs.utils import *
valid_cm = [
'/',
'/controller_manager',
'/foo/robot/controller_manager1',
'/foo/robot/controller_manager2'
]
invalid_cm = [
'non_existent',
'/incomplete',
'/bad_type'
]
class TestUtils(unittest.TestCase):
def _ready(self):
try:
rospy.wait_for_service("/controller_manager/list_controllers", 2.0)
except ROSException:
return False
return True
def test_is_controller_manager(self):
self.assertTrue(self._ready())
for cm in valid_cm:
self.assertTrue(is_controller_manager(cm))
for cm in invalid_cm:
self.assertFalse(is_controller_manager(cm))
def test_get_controller_managers(self):
self.assertTrue(self._ready())
# Root namespace
self.assertEqual(valid_cm, get_controller_managers())
self.assertEqual(valid_cm, get_controller_managers('/'))
# Nested namespace
nested_cm = [valid_cm[2], valid_cm[3]]
self.assertEqual(nested_cm, get_controller_managers('/foo'))
self.assertEqual(nested_cm, get_controller_managers('/foo/robot'))
self.assertEqual(['/controller_manager'],
get_controller_managers('/controller_manager'))
# Initial guess: Recommended usage pattern
prev_cm = get_controller_managers()
self.assertEqual(valid_cm,
get_controller_managers(initial_guess=prev_cm))
# Initial guess: Partial guess
self.assertEqual(valid_cm,
get_controller_managers(initial_guess=nested_cm))
# Misuse of initial guess. Specifying entries that have not gone
# through a full API check can yield incorrect results.
# You have been warned!
wrong_cm = get_controller_managers(initial_guess=invalid_cm)
self.assertNotEqual(valid_cm, wrong_cm)
diff = list(set(wrong_cm) - set(valid_cm))
self.assertEqual(2, len(diff))
self.assertIn('/incomplete', diff)
self.assertIn('/bad_type', diff)
def test_controller_manager_lister(self):
self.assertTrue(self._ready())
# Root namespace
list_cm = ControllerManagerLister()
self.assertEqual(valid_cm, list_cm())
# Nested namespace
list_cm_foo = ControllerManagerLister('/foo')
nested_cm = [valid_cm[2], valid_cm[3]]
self.assertEqual(nested_cm, list_cm_foo())
def test_controller_lister(self):
self.assertTrue(self._ready())
# Default namespace
list_controllers = ControllerLister()
controllers = list_controllers()
self.assertEqual(2, len(controllers))
self.assertEqual('foo_controller', controllers[0].name)
self.assertEqual('bar_controller', controllers[1].name)
# Custom namespace
list_controllers_ns = ControllerLister('/foo')
self.assertEqual(0, len(list_controllers_ns()))
def test_rosparam_controller_names(self):
# Default namespace
names_def = get_rosparam_controller_names()
self.assertEqual(2, len(names_def))
self.assertIn('foo_controller', names_def)
self.assertIn('bar_controller', names_def)
# Root namespace
names_root = get_rosparam_controller_names('/')
self.assertEqual(2, len(names_root))
self.assertIn('foo_controller', names_root)
self.assertIn('bar_controller', names_root)
# Empty namespace
names_empty = get_rosparam_controller_names('')
self.assertEqual(2, len(names_empty))
self.assertIn('foo_controller', names_empty)
self.assertIn('bar_controller', names_empty)
# Custom namespace
names_ns = get_rosparam_controller_names('/ns')
self.assertEqual(2, len(names_ns))
self.assertIn('baz_controller', names_ns)
self.assertIn('qux_controller', names_ns)
# Custom namespace, trailing slash
names_nss = get_rosparam_controller_names('/ns/')
self.assertEqual(2, len(names_nss))
self.assertIn('baz_controller', names_nss)
self.assertIn('qux_controller', names_nss)
if __name__ == '__main__':
import rostest
rostest.rosrun('controller_manager_msgs',
'cm_msgs_utils_rostest',
TestUtils)
|
rules/vulnerabilities/rule_front-page.py | TomasTorresB/nerve | 365 | 12710372 | <filename>rules/vulnerabilities/rule_front-page.py
from core.redis import rds
from core.triage import Triage
from core.parser import ScanParser
class Rule:
def __init__(self):
self.rule = 'VLN_65C8'
self.rule_severity = 2
self.rule_description = 'This rule checks for FrontPage configuration information disclosure'
self.rule_confirm = 'FrontPage misconfiguration'
self.rule_details = ''
self.rule_mitigation = '''Ensure SharePoint is not anonymously accessible'''
self.intensity = 1
def check_rule(self, ip, port, values, conf):
t = Triage()
p = ScanParser(port, values)
domain = p.get_domain()
module = p.get_module()
if 'http' not in module:
return
resp = t.http_request(ip, port, uri='/_vti_inf.html')
if not resp:
return
if 'Content-Length' in resp.headers and resp.headers['Content-Length'] == '247':
self.rule_details = 'Exposed FrontPage at {}'.format(resp.url)
rds.store_vuln({
'ip': ip,
'port': port,
'domain': domain,
'rule_id': self.rule,
'rule_sev': self.rule_severity,
'rule_desc': self.rule_description,
'rule_confirm': self.rule_confirm,
'rule_details': self.rule_details,
'rule_mitigation': self.rule_mitigation
})
return
|
cea/utilities/date.py | architecture-building-systems/cea-toolbox | 121 | 12710392 | <reponame>architecture-building-systems/cea-toolbox
import pandas as pd
from calendar import isleap
def get_date_range_hours_from_year(year):
"""
creates date range in hours for the year excluding leap day
:param year: year of date range
:type year: int
:return: pd.date_range with 8760 values
:rtype: pandas.data_range
"""
date_range = pd.date_range(start=str(year), end=str(year + 1), freq='H', closed='left')
# Check if leap year and remove extra day
if isleap(year):
date_range = date_range[~((date_range.month == 2) & (date_range.day == 29))]
return date_range
|
vectorhub/encoders/audio/pytorch/__init__.py | boba-and-beer/vectorhub | 385 | 12710403 | <reponame>boba-and-beer/vectorhub
from .wav2vec import *
|
ocpmodels/models/gemnet/layers/interaction_block.py | Irlirion/ocp | 242 | 12710427 | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import math
import torch
from .atom_update_block import AtomUpdateBlock
from .base_layers import Dense, ResidualLayer
from .efficient import (
EfficientInteractionBilinear,
)
from .embedding_block import EdgeEmbedding
from .scaling import ScalingFactor
class InteractionBlockTripletsOnly(torch.nn.Module):
"""
Interaction block for GemNet-T/dT.
Parameters
----------
emb_size_atom: int
Embedding size of the atoms.
emb_size_edge: int
Embedding size of the edges.
emb_size_trip: int
(Down-projected) Embedding size in the triplet message passing block.
emb_size_rbf: int
Embedding size of the radial basis transformation.
emb_size_cbf: int
Embedding size of the circular basis transformation (one angle).
emb_size_bil_trip: int
Embedding size of the edge embeddings in the triplet-based message passing block after the bilinear layer.
num_before_skip: int
Number of residual blocks before the first skip connection.
num_after_skip: int
Number of residual blocks after the first skip connection.
num_concat: int
Number of residual blocks after the concatenation.
num_atom: int
Number of residual blocks in the atom embedding blocks.
activation: str
Name of the activation function to use in the dense layers except for the final dense layer.
scale_file: str
Path to the json file containing the scaling factors.
"""
def __init__(
self,
emb_size_atom,
emb_size_edge,
emb_size_trip,
emb_size_rbf,
emb_size_cbf,
emb_size_bil_trip,
num_before_skip,
num_after_skip,
num_concat,
num_atom,
activation=None,
scale_file=None,
name="Interaction",
):
super().__init__()
self.name = name
block_nr = name.split("_")[-1]
## -------------------------------------------- Message Passing ------------------------------------------- ##
# Dense transformation of skip connection
self.dense_ca = Dense(
emb_size_edge,
emb_size_edge,
activation=activation,
bias=False,
)
# Triplet Interaction
self.trip_interaction = TripletInteraction(
emb_size_edge=emb_size_edge,
emb_size_trip=emb_size_trip,
emb_size_bilinear=emb_size_bil_trip,
emb_size_rbf=emb_size_rbf,
emb_size_cbf=emb_size_cbf,
activation=activation,
scale_file=scale_file,
name=f"TripInteraction_{block_nr}",
)
## ---------------------------------------- Update Edge Embeddings ---------------------------------------- ##
# Residual layers before skip connection
self.layers_before_skip = torch.nn.ModuleList(
[
ResidualLayer(
emb_size_edge,
activation=activation,
)
for i in range(num_before_skip)
]
)
# Residual layers after skip connection
self.layers_after_skip = torch.nn.ModuleList(
[
ResidualLayer(
emb_size_edge,
activation=activation,
)
for i in range(num_after_skip)
]
)
## ---------------------------------------- Update Atom Embeddings ---------------------------------------- ##
self.atom_update = AtomUpdateBlock(
emb_size_atom=emb_size_atom,
emb_size_edge=emb_size_edge,
emb_size_rbf=emb_size_rbf,
nHidden=num_atom,
activation=activation,
scale_file=scale_file,
name=f"AtomUpdate_{block_nr}",
)
## ------------------------------ Update Edge Embeddings with Atom Embeddings ----------------------------- ##
self.concat_layer = EdgeEmbedding(
emb_size_atom,
emb_size_edge,
emb_size_edge,
activation=activation,
)
self.residual_m = torch.nn.ModuleList(
[
ResidualLayer(emb_size_edge, activation=activation)
for _ in range(num_concat)
]
)
self.inv_sqrt_2 = 1 / math.sqrt(2.0)
def forward(
self,
h,
m,
rbf3,
cbf3,
id3_ragged_idx,
id_swap,
id3_ba,
id3_ca,
rbf_h,
idx_s,
idx_t,
):
"""
Returns
-------
h: torch.Tensor, shape=(nEdges, emb_size_atom)
Atom embeddings.
m: torch.Tensor, shape=(nEdges, emb_size_edge)
Edge embeddings (c->a).
"""
# Initial transformation
x_ca_skip = self.dense_ca(m) # (nEdges, emb_size_edge)
x3 = self.trip_interaction(
m,
rbf3,
cbf3,
id3_ragged_idx,
id_swap,
id3_ba,
id3_ca,
)
## ----------------------------- Merge Embeddings after Triplet Interaction ------------------------------ ##
x = x_ca_skip + x3 # (nEdges, emb_size_edge)
x = x * self.inv_sqrt_2
## ---------------------------------------- Update Edge Embeddings --------------------------------------- ##
# Transformations before skip connection
for i, layer in enumerate(self.layers_before_skip):
x = layer(x) # (nEdges, emb_size_edge)
# Skip connection
m = m + x # (nEdges, emb_size_edge)
m = m * self.inv_sqrt_2
# Transformations after skip connection
for i, layer in enumerate(self.layers_after_skip):
m = layer(m) # (nEdges, emb_size_edge)
## ---------------------------------------- Update Atom Embeddings --------------------------------------- ##
h2 = self.atom_update(h, m, rbf_h, idx_t)
# Skip connection
h = h + h2 # (nAtoms, emb_size_atom)
h = h * self.inv_sqrt_2
## ----------------------------- Update Edge Embeddings with Atom Embeddings ----------------------------- ##
m2 = self.concat_layer(h, m, idx_s, idx_t) # (nEdges, emb_size_edge)
for i, layer in enumerate(self.residual_m):
m2 = layer(m2) # (nEdges, emb_size_edge)
# Skip connection
m = m + m2 # (nEdges, emb_size_edge)
m = m * self.inv_sqrt_2
return h, m
class TripletInteraction(torch.nn.Module):
"""
Triplet-based message passing block.
Parameters
----------
emb_size_edge: int
Embedding size of the edges.
emb_size_trip: int
(Down-projected) Embedding size of the edge embeddings after the hadamard product with rbf.
emb_size_bilinear: int
Embedding size of the edge embeddings after the bilinear layer.
emb_size_rbf: int
Embedding size of the radial basis transformation.
emb_size_cbf: int
Embedding size of the circular basis transformation (one angle).
activation: str
Name of the activation function to use in the dense layers except for the final dense layer.
scale_file: str
Path to the json file containing the scaling factors.
"""
def __init__(
self,
emb_size_edge,
emb_size_trip,
emb_size_bilinear,
emb_size_rbf,
emb_size_cbf,
activation=None,
scale_file=None,
name="TripletInteraction",
**kwargs,
):
super().__init__()
self.name = name
# Dense transformation
self.dense_ba = Dense(
emb_size_edge,
emb_size_edge,
activation=activation,
bias=False,
)
# Up projections of basis representations, bilinear layer and scaling factors
self.mlp_rbf = Dense(
emb_size_rbf,
emb_size_edge,
activation=None,
bias=False,
)
self.scale_rbf = ScalingFactor(
scale_file=scale_file, name=name + "_had_rbf"
)
self.mlp_cbf = EfficientInteractionBilinear(
emb_size_trip, emb_size_cbf, emb_size_bilinear
)
self.scale_cbf_sum = ScalingFactor(
scale_file=scale_file, name=name + "_sum_cbf"
) # combines scaling for bilinear layer and summation
# Down and up projections
self.down_projection = Dense(
emb_size_edge,
emb_size_trip,
activation=activation,
bias=False,
)
self.up_projection_ca = Dense(
emb_size_bilinear,
emb_size_edge,
activation=activation,
bias=False,
)
self.up_projection_ac = Dense(
emb_size_bilinear,
emb_size_edge,
activation=activation,
bias=False,
)
self.inv_sqrt_2 = 1 / math.sqrt(2.0)
def forward(
self,
m,
rbf3,
cbf3,
id3_ragged_idx,
id_swap,
id3_ba,
id3_ca,
):
"""
Returns
-------
m: torch.Tensor, shape=(nEdges, emb_size_edge)
Edge embeddings (c->a).
"""
# Dense transformation
x_ba = self.dense_ba(m) # (nEdges, emb_size_edge)
# Transform via radial bessel basis
rbf_emb = self.mlp_rbf(rbf3) # (nEdges, emb_size_edge)
x_ba2 = x_ba * rbf_emb
x_ba = self.scale_rbf(x_ba, x_ba2)
x_ba = self.down_projection(x_ba) # (nEdges, emb_size_trip)
# Transform via circular spherical basis
x_ba = x_ba[id3_ba]
# Efficient bilinear layer
x = self.mlp_cbf(cbf3, x_ba, id3_ca, id3_ragged_idx)
# (nEdges, emb_size_quad)
x = self.scale_cbf_sum(x_ba, x)
# =>
# rbf(d_ba)
# cbf(d_ca, angle_cab)
# Up project embeddings
x_ca = self.up_projection_ca(x) # (nEdges, emb_size_edge)
x_ac = self.up_projection_ac(x) # (nEdges, emb_size_edge)
# Merge interaction of c->a and a->c
x_ac = x_ac[id_swap] # swap to add to edge a->c and not c->a
x3 = x_ca + x_ac
x3 = x3 * self.inv_sqrt_2
return x3
|
path_import.py | 5A59/Zvm | 485 | 12710439 | <filename>path_import.py
# coding=utf-8
import sys
import os
sys.path.append([os.getcwd()])
|
akika_venv/lib/python3.6/site-packages/django_seo_js/middleware/__init__.py | laetitia123/akikatest | 183 | 12710456 | from .escaped_fragment import EscapedFragmentMiddleware
from .hashbang import HashBangMiddleware
from .useragent import UserAgentMiddleware
|
bark/examples/paths.py | mansoorcheema/bark | 174 | 12710459 | <gh_stars>100-1000
# Copyright (c) 2020 fortiss GmbH
#
# Authors: <NAME>, <NAME>, <NAME>,
# <NAME> and <NAME>
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
import os
from pathlib import Path
class Data:
#xodr data
_xodr_data = {}
#track data
_track_data = {}
#params data
_params_data = {}
@staticmethod
def xodr_data(name):
if Data._xodr_data:
return Data._xodr_data[name]
data_dir = os.path.join(os.path.dirname(__file__), "../runtime/tests/data")
files = [f for f in os.listdir(data_dir) if f.endswith(".xodr")]
for file in files:
Data._xodr_data[Path(file).stem] = os.path.join(data_dir, file)
return Data._xodr_data[name]
@staticmethod
def track_data(name):
if Data._track_data:
return Data._track_data[name]
data_dir = os.path.join(os.path.dirname(__file__), "../runtime/tests/data")
files = [f for f in os.listdir(data_dir) if f.endswith(".csv")]
for file in files:
Data._track_data[Path(file).stem] = os.path.join(data_dir, file)
return Data._track_data[name]
@staticmethod
def params_data(name):
if Data._params_data:
return Data._params_data[name]
data_dir = os.path.join(os.path.dirname(__file__), "params")
files = [f for f in os.listdir(data_dir) if f.endswith(".json")]
for file in files:
Data._params_data[Path(file).stem] = os.path.join(data_dir, file)
return Data._params_data[name]
|
tests/comparison/statement_generator.py | suifengzhuliu/impala | 1,523 | 12710500 | <reponame>suifengzhuliu/impala<filename>tests/comparison/statement_generator.py<gh_stars>1000+
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from copy import deepcopy
from tests.comparison.common import Table
from tests.comparison.funcs import CastFunc
from tests.comparison.query import (
InsertClause,
InsertStatement,
Query,
StatementExecutionMode,
ValuesClause,
ValuesRow)
from tests.comparison.query_generator import QueryGenerator
class InsertStatementGenerator(object):
def __init__(self, profile):
# QueryProfile-like object
self.profile = profile
# used to generate SELECT queries for INSERT/UPSERT ... SELECT statements;
# to ensure state is completely reset, this is created anew with each call to
# generate_statement()
self.select_stmt_generator = None
def generate_statement(self, tables, dml_table):
"""
Return a randomly generated INSERT or UPSERT statement. Note that UPSERTs are very
similar to INSERTs, which is why this generator handles both.
tables should be a list of Table objects. A typical source of such a list comes from
db_connection.DbCursor.describe_common_tables(). This list describes the possible
"sources" of the INSERT/UPSERT's WITH and FROM/WHERE clauses.
dml_table is a required Table object. The INSERT/UPSERT will be into this table.
"""
if not (isinstance(tables, list) and len(tables) > 0 and
all((isinstance(t, Table) for t in tables))):
raise Exception('tables must be a not-empty list of Table objects')
if not isinstance(dml_table, Table):
raise Exception('dml_table must be a Table')
self.select_stmt_generator = QueryGenerator(self.profile)
insert_statement = InsertStatement(execution=StatementExecutionMode.DML_TEST)
# Choose whether this is a
# INSERT/UPSERT INTO table SELECT/VALUES
# or
# INSERT/UPSERT INTO table (col1, col2, ...) SELECT/VALUES
# If the method returns None, it's the former.
insert_column_list = self.profile.choose_insert_column_list(dml_table)
if dml_table.primary_keys:
# Having primary keys implies the table is a Kudu table, which makes it subject to
# both INSERTs (with automatic ignoring of primary key duplicates) and UPSERTs.
conflict_action = self.profile.choose_insert_vs_upsert()
else:
conflict_action = InsertClause.CONFLICT_ACTION_DEFAULT
insert_statement.insert_clause = InsertClause(
dml_table, column_list=insert_column_list, conflict_action=conflict_action)
# We still need to internally track the columns we're inserting. Keep in mind None
# means "all" without an explicit column list. Since we've already created the
# InsertClause object though, we can fill this in for ourselves.
if insert_column_list is None:
insert_column_list = dml_table.cols
insert_item_data_types = [col.type for col in insert_column_list]
# Decide whether this is INSERT/UPSERT VALUES or INSERT/UPSERT SELECT
insert_source_clause = self.profile.choose_insert_source_clause()
if issubclass(insert_source_clause, Query):
# Use QueryGenerator()'s public interface to generate the SELECT.
select_query = self.select_stmt_generator.generate_statement(
tables, select_item_data_types=insert_item_data_types)
# To avoid many loss-of-precision errors, explicitly cast the SelectItems. The
# generator's type system is not near sophisticated enough to know how random
# expressions will be implicitly casted in the databases. This requires less work
# to implement. IMPALA-4693 considers alternative approaches.
self._cast_select_items(select_query, insert_column_list)
insert_statement.with_clause = deepcopy(select_query.with_clause)
select_query.with_clause = None
insert_statement.select_query = select_query
elif issubclass(insert_source_clause, ValuesClause):
insert_statement.values_clause = self._generate_values_clause(insert_column_list)
else:
raise Exception('unsupported INSERT/UPSERT source clause: {0}'.format(
insert_source_clause))
return insert_statement
def _generate_values_clause(self, columns):
"""
Return a VALUES clause containing a variable number of rows.
The values corresponding to primary keys will be non-null constants. Any other
columns could be null, constants, or function trees that may or may not evaluate to
null.
"""
values_rows = []
for _ in xrange(self.profile.choose_insert_values_row_count()):
values_row = []
for col in columns:
if col.is_primary_key:
val = self.profile.choose_constant(return_type=col.exact_type, allow_null=False)
elif 'constant' == self.profile.choose_values_item_expr():
val = self.profile.choose_constant(return_type=col.exact_type, allow_null=True)
else:
func_tree = self.select_stmt_generator.create_func_tree(
col.type, allow_subquery=False)
val = self.select_stmt_generator.populate_func_with_vals(func_tree)
# Only the generic type, not the exact type, of the value will be known. To
# avoid a lot of failed queries due to precision errors, we cast the val to
# the exact type of the column. This will still not prevent "out of range"
# conditions, as we don't try to evaluate the random expressions.
val = CastFunc(val, col.exact_type)
values_row.append(val)
values_rows.append(ValuesRow(values_row))
return ValuesClause(values_rows)
def _cast_select_items(self, select_query, column_list):
"""
For a given Query select_query and a column_list (list of Columns), cast each select
item in select_query to the exact type of the column.
A Query may have a UNION, recursively do this down the line.
"""
for col_idx, select_item in enumerate(select_query.select_clause.items):
cast_val_expr = CastFunc(select_item.val_expr, column_list[col_idx].exact_type)
select_item.val_expr = cast_val_expr
if select_query.union_clause:
self._cast_select_items(select_query.union_clause.query, column_list)
def get_generator(statement_type):
"""
Given a statement type, return the proper statement generator.
"""
STATEMENT_GENERATOR_MAP = {
InsertStatement: InsertStatementGenerator,
Query: QueryGenerator,
}
return STATEMENT_GENERATOR_MAP[statement_type]
|
ztag/errors.py | justinbastress/ztag | 107 | 12710501 | <reponame>justinbastress/ztag
class InvalidTag(Exception):
pass
class IgnoreObject(Exception):
def __init__(self, original_exception=None, trback=None, *args, **kwargs):
super(Exception, self).__init__(*args, **kwargs)
self.original_exception = original_exception
self.trback = trback
class UnknownProtocol(Exception):
pass
class MissingTransform(Exception):
pass
class ExtraTransform(Exception):
pass
|
doc/util/disguise.py | jhh67/chapel | 1,602 | 12710517 | from docutils import nodes
def disguise_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
"""
Role to obfuscate e-mail addresses using DISGUISE comments.
"""
obfuscated = '<!-- DISGUISE -->'.join(list(text))
obfuscated = '<b>' + obfuscated
obfuscated = obfuscated + '</b>'
node = nodes.raw('', obfuscated, format='html')
return [node], []
def setup(app):
app.add_role('disguise', disguise_role)
|
share/lib/python/neuron/rxd/geometry3d/GeneralizedVoxelization.py | niltonlk/nrn | 203 | 12710548 | from . import graphicsPrimitives as graphics
from .. import options
def find_voxel(x, y, z, g):
"""returns (i,j,k) of voxel containing point x,y,z if the point is within
the grid, otherwise return the corresponding grid boundary.
"""
# g is grid boundaries
i = max(0, int((x - g["xlo"]) // g["dx"]))
j = max(0, int((y - g["ylo"]) // g["dy"]))
k = max(0, int((z - g["zlo"]) // g["dz"]))
return (i, j, k)
def get_verts(voxel, g):
"""return list (len=8) of point coordinates (x,y,z) that are vertices of the voxel (i,j,k)"""
(i, j, k) = voxel
dx, dy, dz = g["dx"], g["dy"], g["dz"]
v1_0, v1_1, v1_2 = g["xlo"] + i * dx, g["ylo"] + j * dy, g["zlo"] + k * dz
vertices = [
(v1_0, v1_1, v1_2),
(v1_0 + dx, v1_1, v1_2),
(v1_0 + dx, v1_1 + dy, v1_2),
(v1_0, v1_1 + dy, v1_2),
(v1_0, v1_1, v1_2 + dz),
(v1_0 + dx, v1_1, v1_2 + dz),
(v1_0 + dx, v1_1 + dy, v1_2 + dz),
(v1_0, v1_1 + dy, v1_2 + dz),
]
return vertices
def get_surr_rows(row, endpoints, g):
"""return list (len=4) of the rows surrounding the current one on all sides
IF the surrounding row is within the bounds of the grid."""
(y, z) = row
surr = []
if y >= 1:
surr.append(((y - 1, z), endpoints))
if z >= 1:
surr.append(((y, z - 1), endpoints))
if (g["ylo"] + (y + 1) * g["dy"]) < g["yhi"]:
surr.append(((y + 1, z), endpoints))
if (g["zlo"] + (z + 1) * g["dz"]) < g["zhi"]:
surr.append(((y, z + 1), endpoints))
return surr
def verts_in(f, voxel, surf, g):
"""return the number of vertices of this voxel that are contained within the surface"""
verts = get_verts(voxel, g)
ins = 0
distlist = []
for (x, y, z) in verts:
if (
g["xlo"] <= x <= g["xhi"]
and g["ylo"] <= y <= g["yhi"]
and g["zlo"] <= z <= g["zhi"]
):
dist = f.distance(x, y, z)
else:
dist = float("inf")
distlist.append(dist)
if dist <= options.ics_distance_threshold:
ins += 1
if 1 <= ins <= 7:
surf[voxel] = distlist
return ins
def find_endpoints(f, surf, include_ga, row, guesses, g):
"""return the endpoints (L,R) contained in the frustum f; if only one voxel both endpoints will be the same; if none both will be None
f: frustum object
surf: surface voxels
row: current row
guesses: estimates for endpoints
g: grid boundaries"""
# +x or right endpoint
Rend, Lend = None, None
check_surf_L, check_surf_R = (None, None), (None, None)
stop = False
Ri = guesses[1]
ogrverts = verts_in(f, (Ri, row[0], row[1]), surf, g)
if ogrverts == 0:
going_in = True
elif 1 <= ogrverts < 8:
going_in = False
check_surf_R = (True, Ri)
else:
going_in = False
while (0 <= Ri and (g["xlo"] + (Ri) * g["dx"]) < g["xhi"]) and not stop:
verts = verts_in(f, (Ri, row[0], row[1]), surf, g)
if verts == 0:
if not going_in:
stop = True
continue
else:
if Ri == guesses[0]:
# row is empty between guesses
return (None, None)
Ri -= 1
continue
elif verts == 8:
Rend = Ri
Ri += 1
continue
else:
Rend = Ri
if going_in:
check_surf_R = (True, Ri)
break
Ri += 1
# the -x or left endpoint
stop = False
Li = guesses[0]
oglverts = verts_in(f, (Li, row[0], row[1]), surf, g)
if oglverts == 0:
going_in = True
elif 1 <= oglverts < 8:
going_in = False
check_surf_L = (True, Li)
else:
going_in = False
while (0 <= Li and (g["xlo"] + (Li) * g["dx"]) < g["xhi"]) and not stop:
verts = verts_in(f, (Li, row[0], row[1]), surf, g)
if verts == 0:
if not going_in:
stop = True
continue
else:
# it's not empty or would have already returned
Li += 1
continue
elif verts == 8:
Lend = Li
Li -= 1
continue
else:
Lend = Li
if going_in:
check_surf_L = (True, Li)
break
Li -= 1
# check for extra surface voxels missed
if check_surf_R[0] and Lend is not None:
r = check_surf_R[1]
while r > Lend:
verts = verts_in(f, (r, row[0], row[1]), surf, g)
if verts == 8:
break
else:
r -= 1
if check_surf_L[0] and Rend is not None:
l = check_surf_L[1]
while l < Rend:
verts = verts_in(f, (l, row[0], row[1]), surf, g)
if verts == 8:
break
else:
l += 1
# if keeping non-surface but grid-adjacent voxels:
if include_ga:
surf.add((Lend, row[0], row[1]))
surf.add((Rend, row[0], row[1]))
return (Lend, Rend)
def voxelize(grid, Object, corners=None, include_ga=False):
"""return a list of all voxels (i,j,k) that contain part of the object
Other returned elements: set of surface voxels, possibly_missed for error handling"""
# include_ga is whether to include grid-adjacent voxels in the surface, even if entirely within the surface
yes_voxels = set()
checked = set()
surface = {}
if corners is not None:
for i in range(4):
x0, y0, z0 = corners[i][0], corners[i][1], corners[i][2]
(i0, j0, k0) = find_voxel(x0, y0, z0, grid)
# find the contained endpoints and start the set with initial row and initial endpoints
s = set()
ends = find_endpoints(Object, surface, include_ga, (j0, k0), (i0, i0), grid)
if ends[0]:
break
else:
if isinstance(Object, graphics.Sphere):
x0, y0, z0 = Object.x, Object.y, Object.z
else:
x0, y0, z0 = Object._x0, Object._y0, Object._z0
# find the starting voxel
(i0, j0, k0) = find_voxel(x0, y0, z0, grid)
# find the contained endpoints and start the set with initial row and initial endpoints
s = set()
ends = find_endpoints(
Object, surface, include_ga, (j0, k0), (i0 - 1, i0 + 1), grid
)
# the given starting voxel is not actually found
possibly_missed = False
if not ends[0]:
possibly_missed = True
ends = (i0, i0)
# ------
for i in range(ends[0], ends[1] + 1):
yes_voxels.add((i, j0, k0))
# add that initial row to checked and the set (otherwise inital voxel missed)
checked.add((j0, k0))
s.add(((j0, k0), ends))
while s:
startr = s.pop()
newr = get_surr_rows(startr[0], startr[1], grid)
for r in newr:
(row, guesses) = r
if row not in checked:
(Lend, Rend) = find_endpoints(
Object, surface, include_ga, row, guesses, grid
)
if Lend is not None:
for i in range(Lend, Rend + 1):
yes_voxels.add((i, row[0], row[1]))
s.add((row, (Lend, Rend)))
checked.add(row)
missed = False
if possibly_missed and (
len(yes_voxels) == 1
): # no voxels were found, return empty set
missed = (i0, j0, k0)
yes_voxels = set()
return [yes_voxels, surface, missed]
|
Attack/IncDS.py | YingtongDou/Nash-Detect | 103 | 12710554 | <reponame>YingtongDou/Nash-Detect
import copy
import time
"""
The implementation of the IncDS attack.
"""
def compute_density(user_product_graph, product_user_graph, c, t):
"""
Compute the density of controlled accounts according to their local structural density
"""
density = {}
# intialize the auxiliary graph
aux_user_graph = copy.deepcopy(user_product_graph)
aux_prod_graph = copy.deepcopy(product_user_graph)
for u in c:
aux_user_graph[u].append((t, 1, -1, '2012-06-01'))
aux_prod_graph[t].append((u, 1, -1, '2012-06-01'))
for u in c:
user_degree = len(aux_user_graph[u])
prod_degree = sum([len(aux_prod_graph[review[0]]) for review in aux_user_graph[u]])
density[u] = user_degree/prod_degree
return density
def ds_evasion(user_product_graph, product_user_graph, c, r, t):
"""
Args:
user_product_graph: key = user_id, value = list of review tuples
product_product_graph: key = product_id, value = list of review tuples
priors: node priors
c: list of controlled accounts
r: number of reviews to be posted each account
t: target list
feature_config:
"""
# total number of spams posted
count = 0
added_edges = []
t0 = time.time()
# how many new controlled accounts are selected to post spams for the current iteration
unique = 0
new_user_graph = copy.deepcopy(user_product_graph)
new_product_graph = copy.deepcopy(product_user_graph)
account_log = []
# for each target, find controlled accounts to post spams
for target in t:
# compute the density
density = compute_density(new_user_graph, new_product_graph, c, target)
selected_accounts = [(account, density[account]) for account in c]
selected_accounts = sorted(selected_accounts, reverse=False, key=lambda x: x[1])
print("Dict of densities of controlled accounts")
print(selected_accounts)
selected_accounts = [account[0] for account in selected_accounts[:r]]
print("List of selected accounts")
print(selected_accounts)
# count the number of unique added accounts
for account in selected_accounts:
if account not in account_log:
unique += 1
print('Total number of selected unique accounts: %d' % (unique))
account_log = account_log + selected_accounts
# add the added_edges to the global graph
for added_account in selected_accounts:
new_user_graph[added_account].append((target, 1, -1, '2012-06-01'))
new_product_graph[target].append((added_account, 1, -1, '2012-06-01'))
# add new nodes to output
for added_account in selected_accounts:
review_id = (added_account, target)
added_edges.append(review_id)
t1 = time.time()
print('Time consumed: %s' % str(t1 - t0))
print('\n---------------------------------\n')
return added_edges, user_product_graph |
PlatformerPathfinding/test_level.py | gerardrbentley/TheVGLC | 147 | 12710583 | '''
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
import pathfinding
def makeIsSolid(solids):
def isSolid(tile):
return tile in solids
return isSolid
def makeGetNeighbors(jumps,levelStr,visited,isSolid):
maxX = len(levelStr[0])-1
maxY = len(levelStr)-1
jumpDiffs = []
for jump in jumps:
jumpDiff = [jump[0]]
for ii in range(1,len(jump)):
jumpDiff.append((jump[ii][0]-jump[ii-1][0],jump[ii][1]-jump[ii-1][1]))
jumpDiffs.append(jumpDiff)
jumps = jumpDiffs
def getNeighbors(pos):
dist = pos[0]-pos[2]
pos = pos[1]
visited.add((pos[0],pos[1]))
below = (pos[0],pos[1]+1)
neighbors = []
if below[1] > maxY:
return []
if pos[2] != -1:
ii = pos[3] +1
jump = pos[2]
if ii < len(jumps[jump]):
if not (pos[0]+pos[4]*jumps[jump][ii][0] > maxX or pos[0]+pos[4]*jumps[jump][ii][0] < 0 or pos[1]+jumps[jump][ii][1] < 0) and not isSolid(levelStr[pos[1]+jumps[jump][ii][1]][pos[0]+pos[4]*jumps[jump][ii][0]]):
neighbors.append([dist+1,(pos[0]+pos[4]*jumps[jump][ii][0],pos[1]+jumps[jump][ii][1],jump,ii,pos[4])])
if pos[1]+jumps[jump][ii][1] < 0 and not isSolid(levelStr[pos[1]+jumps[jump][ii][1]][pos[0]+pos[4]*jumps[jump][ii][0]]):
neighbors.append([dist+1,(pos[0]+pos[4]*jumps[jump][ii][0],0,jump,ii,pos[4])])
if isSolid(levelStr[below[1]][below[0]]):
if pos[0]+1 <= maxX and not isSolid(levelStr[pos[1]][pos[0]+1]):
neighbors.append([dist+1,(pos[0]+1,pos[1],-1)])
if pos[0]-1 >= 0 and not isSolid(levelStr[pos[1]][pos[0]-1]):
neighbors.append([dist+1,(pos[0]-1,pos[1],-1)])
for jump in range(len(jumps)):
ii = 0
if not (pos[0]+jumps[jump][ii][0] > maxX or pos[1] < 0) and not isSolid(levelStr[pos[1]+jumps[jump][ii][1]][pos[0]+jumps[jump][ii][0]]):
neighbors.append([dist+ii+1,(pos[0]+jumps[jump][ii][0],pos[1]+jumps[jump][ii][1],jump,ii,1)])
if not (pos[0]-jumps[jump][ii][0] < 0 or pos[1] < 0) and not isSolid(levelStr[pos[1]+jumps[jump][ii][1]][pos[0]-jumps[jump][ii][0]]):
neighbors.append([dist+ii+1,(pos[0]-jumps[jump][ii][0],pos[1]+jumps[jump][ii][1],jump,ii,-1)])
else:
neighbors.append([dist+1,(pos[0],pos[1]+1,-1)])
if pos[1]+1 <= maxY:
if not isSolid(levelStr[pos[1]+1][pos[0]+1]):
neighbors.append([dist+1.4,(pos[0]+1,pos[1]+1,-1)])
if not isSolid(levelStr[pos[1]+1][pos[0]-1]):
neighbors.append([dist+1.4,(pos[0]-1,pos[1]+1,-1)])
if pos[1]+2 <= maxY:
if not isSolid(levelStr[pos[1]+2][pos[0]+1]):
neighbors.append([dist+2,(pos[0]+1,pos[1]+2,-1)])
if not isSolid(levelStr[pos[1]+2][pos[0]-1]):
neighbors.append([dist+2,(pos[0]-1,pos[1]+2,-1)])
return neighbors
return getNeighbors
def findPaths(subOptimal,solids,jumps,levelStr):
visited = set()
isSolid = makeIsSolid(solids)
getNeighbors = makeGetNeighbors(jumps,levelStr,visited,isSolid)
maxX = len(levelStr[0])-1
paths = pathfinding.astar_shortest_path( (2,2,-1), lambda pos: pos[0] == maxX, getNeighbors, subOptimal,lambda pos: 0)#lambda pos: abs(maxX-pos[0]))
return [[ (p[0],p[1]) for p in path] for path in paths]
if __name__ == "__main__":
import sys
import json
if len(sys.argv) < 3:
print 'Usage: {} <platformer json> <level text filename>'.format(sys.argv[0])
exit()
levelFilename = sys.argv[2]
level = []
with open(levelFilename) as level_file:
for line in level_file:
level.append(line.rstrip())
with open(sys.argv[1]) as data_file:
platformerDescription = json.load(data_file)
paths = findPaths(10,platformerDescription['solid'],platformerDescription['jumps'],level)
for p in paths:
print p
|
models/smpl_official.py | hwfan/STRAPS-3DHumanShapePose | 118 | 12710619 | import torch
import numpy as np
from smplx import SMPL as _SMPL
from smplx.body_models import ModelOutput
from smplx.lbs import vertices2joints
import config
class SMPL(_SMPL):
"""
Extension of the official SMPL (from the smplx python package) implementation to
support more joints.
"""
def __init__(self, *args, **kwargs):
super(SMPL, self).__init__(*args, **kwargs)
J_regressor_extra = np.load(config.J_REGRESSOR_EXTRA_PATH)
J_regressor_cocoplus = np.load(config.COCOPLUS_REGRESSOR_PATH)
J_regressor_h36m = np.load(config.H36M_REGRESSOR_PATH)
self.register_buffer('J_regressor_extra', torch.tensor(J_regressor_extra,
dtype=torch.float32))
self.register_buffer('J_regressor_cocoplus', torch.tensor(J_regressor_cocoplus,
dtype=torch.float32))
self.register_buffer('J_regressor_h36m', torch.tensor(J_regressor_h36m,
dtype=torch.float32))
def forward(self, *args, **kwargs):
kwargs['get_skin'] = True
smpl_output = super(SMPL, self).forward(*args, **kwargs)
extra_joints = vertices2joints(self.J_regressor_extra, smpl_output.vertices)
cocoplus_joints = vertices2joints(self.J_regressor_cocoplus, smpl_output.vertices)
h36m_joints = vertices2joints(self.J_regressor_h36m, smpl_output.vertices)
all_joints = torch.cat([smpl_output.joints, extra_joints, cocoplus_joints,
h36m_joints], dim=1)
output = ModelOutput(vertices=smpl_output.vertices,
global_orient=smpl_output.global_orient,
body_pose=smpl_output.body_pose,
joints=all_joints,
betas=smpl_output.betas,
full_pose=smpl_output.full_pose)
return output
|
fuzzers/ECP5/101-dtr/fuzzer.py | Keno/prjtrellis | 256 | 12710642 | from fuzzconfig import FuzzConfig
import nonrouting
import pytrellis
import fuzzloops
import interconnect
cfg = FuzzConfig(job="DTR", family="ECP5", device="LFE5U-45F", ncl="empty.ncl",
tiles=["CIB_R71C22:DTR"])
def get_substs(mode="DTR"):
if mode == "NONE":
comment = "//"
else:
comment = ""
return dict(comment=comment)
def main():
pytrellis.load_database("../../../database")
cfg.setup()
empty_bitfile = cfg.build_design(cfg.ncl, {})
cfg.ncl = "dtr.ncl"
nonrouting.fuzz_enum_setting(cfg, "DTR.MODE", ["NONE", "DTR"],
lambda x: get_substs(mode=x), empty_bitfile)
cfg.ncl = "dtr_routing.ncl"
interconnect.fuzz_interconnect_with_netnames(
cfg,
["R70C22_JSTARTPULSE_DTR"] + ["R70C22_JDTROUT{}_DTR".format(i) for i in range(8)],
bidir=True
)
if __name__ == "__main__":
main()
|
qa/rpc-tests/pingearly.py | MONIMAKER365/BitcoinUnlimited | 535 | 12710669 | #!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Unlimited developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import test_framework.loginit
import os
import os.path
import time
import sys
if sys.version_info[0] < 3:
raise "Use Python 3"
import logging
from binascii import unhexlify
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.mininode import NetworkThread
from test_framework.nodemessages import *
from test_framework.bumessages import *
from test_framework.bunode import BasicBUCashNode, VersionlessProtoHandler
class PingEarlyTest(BitcoinTestFramework):
def __init__(self):
self.nodes = []
BitcoinTestFramework.__init__(self)
def setup_chain(self):
pass
def setup_network(self, split=False):
pass
def restart_node(self, send_initial_version = True):
# remove any potential banlist
banlist_fn = os.path.join(
node_regtest_dir(self.options.tmpdir, 0),
"banlist.dat")
print("Banlist file name:", banlist_fn)
try:
os.remove(banlist_fn)
print("Removed old banlist %s.")
except:
pass
stop_nodes(self.nodes)
wait_bitcoinds()
print("Initializing test directory " + self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 1)
self.nodes = [ start_node(0, self.options.tmpdir, ["-debug"]) ]
self.pynode = pynode = BasicBUCashNode()
pynode.connect(0, '127.0.0.1', p2p_port(0), self.nodes[0],
protohandler = VersionlessProtoHandler(),
send_initial_version = send_initial_version)
return pynode.cnxns[0]
def run_test(self):
logging.info("Testing early ping replies")
conn = self.restart_node(send_initial_version = False)
conn.send_message(msg_ping(), pushbuf=True)
nt = NetworkThread()
nt.start()
conn.wait_for(lambda : conn.pong_counter)
conn.connection.disconnect_node()
nt.join()
if __name__ == '__main__':
xvt = PingEarlyTest()
xvt.main()
|
TradzQAI/tools/indicators/build_indicators.py | kkuette/AI_project | 164 | 12710674 | <reponame>kkuette/AI_project
import pandas as pd
from .exponential_moving_average import exponential_moving_average as ema
from .volatility import volatility as vol
from .stochastic import percent_k as K
from .stochastic import percent_d as D
from .relative_strength_index import relative_strength_index as RSI
from .moving_average_convergence_divergence import moving_average_convergence_divergence as macd
from .bollinger_bands import bandwidth as bb
class Indicators():
def __init__(self, settings=None):
self.bb_period = 20
self.rsi_period = 14
self.sd_period = 0
self.sv_period = 0
self.stoch_period = 14
self.volatility_period = 20
self.macd_long = 24
self.macd_short = 12
self.ema_periods = [20, 50, 100]
self.settings = settings
self.build_func = None
self.names = []
def add_building(self, settings=None):
if settings:
self.settings = settings
if self.settings:
self.build_func = []
for key, value in self.settings.items():
if not value:
continue
elif "RSI" == key and value:
self.names.append('RSI')
if 'default' != value:
self.rsi_period = value
self.build_func.append([RSI, 'RSI', self.rsi_period])
elif "MACD" == key and value:
self.names.append('MACD')
if 'default' != value:
self.macd_long = value[1],
self.macd_short = value[0]
self.build_func.append([macd, 'MACD', [self.macd_short, self.macd_long]])
elif "Volatility" == key and value:
self.names.append('Volatility')
if 'default' != value:
self.volatility_period = value
self.build_func.append([vol, 'Volatility', self.volatility_period])
elif "EMA" == key and value:
if 'default' != value:
for values in value:
self.names.append('EMA'+str(values))
self.build_func.append([ema, 'EMA'+str(values), values])
elif "Bollinger_bands" == key and value:
self.names.append('Bollinger_bands')
if 'default' != value:
self.bb_period = value
self.build_func.append([bb, 'Bollinger_bands', self.bb_period])
elif "Stochastic" == key and value:
self.names.append('Stochastic_D')
self.names.append('Stochastic_K')
if 'default' != value:
self.stoch_period = value
self.build_func.append([D, 'Stochastic_D', self.stoch_period])
self.build_func.append([K, 'Stochastic_K', self.stoch_period])
def build_indicators(self, data):
if not self.build_func:
raise ValueError("No indicators to build.")
indicators = pd.DataFrame(columns=self.names)
for idx in self.build_func:
if "MACD" in idx[1]:
indicators[idx[1]] = idx[0](data, idx[2][0], idx[2][1])
else:
indicators[idx[1]] = idx[0](data, idx[2])
return indicators
|
modules/dbnd-airflow/test_dbnd_airflow/airflow_home/dag_gcp_example/dag_with_remote_fs.py | ipattarapong/dbnd | 224 | 12710688 | <gh_stars>100-1000
from datetime import timedelta
from airflow import DAG
from airflow.utils.dates import days_ago
from dag_test_examples import t_A, t_B
default_args = {
"owner": "airflow",
"depends_on_past": False,
"start_date": days_ago(2),
"retries": 1,
"retry_delay": timedelta(minutes=5),
"dbnd_config": {"databand": {"env": "gcp"}},
}
with DAG(dag_id="dbnd_dag_at_gcp", default_args=default_args) as dag_remote_fs:
a = t_A()
b = t_B(a)
if __name__ == "__main__":
dag_remote_fs.clear()
dag_remote_fs.run(start_date=days_ago(0), end_date=days_ago(0))
|
spirit/user/forms.py | Ke-xueting/Spirit | 974 | 12710695 | <reponame>Ke-xueting/Spirit
# -*- coding: utf-8 -*-
import os
from django import forms
from django.utils.translation import gettext_lazy as _
from django.contrib.auth import get_user_model
from django.utils import timezone
from django.template import defaultfilters
from django.core.files.uploadedfile import UploadedFile
from spirit.core import tasks
from spirit.core.conf import settings
from spirit.core.utils.timezone import timezones
from .models import UserProfile
User = get_user_model()
TIMEZONE_CHOICES = timezones()
Notify = UserProfile.Notify
class CleanEmailMixin:
def clean_email(self):
email = self.cleaned_data["email"]
if settings.ST_CASE_INSENSITIVE_EMAILS:
email = email.lower()
if not settings.ST_UNIQUE_EMAILS:
return email
is_taken = (
User.objects
.filter(email=email)
.exists())
if is_taken:
raise forms.ValidationError(_("The email is taken."))
return email
def get_email(self):
return self.cleaned_data["email"]
class EmailCheckForm(CleanEmailMixin, forms.Form):
email = forms.CharField(label=_("Email"), widget=forms.EmailInput, max_length=254)
class EmailChangeForm(CleanEmailMixin, forms.Form):
email = forms.CharField(label=_("Email"), widget=forms.EmailInput, max_length=254)
password = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
def __init__(self, user=None, *args, **kwargs):
self.user = user
super(EmailChangeForm, self).__init__(*args, **kwargs)
def clean_password(self):
password = self.cleaned_data["password"]
if not self.user.check_password(password):
raise forms.ValidationError(_("The provided password is incorrect."))
return password
class UserForm(forms.ModelForm):
class Meta:
model = User
fields = ("first_name", "last_name")
class AvatarWidget(forms.ClearableFileInput):
template_name = 'spirit/user/_image_widget.html'
clear_checkbox_label = _('Remove avatar')
accept = ', '.join(
'.%s' % ext
for ext in sorted(settings.ST_ALLOWED_AVATAR_FORMAT))
class UserProfileForm(forms.ModelForm):
timezone = forms.ChoiceField(
label=_("Time zone"), choices=TIMEZONE_CHOICES)
notify_when = forms.TypedChoiceField(
label=_("Email notifications"), coerce=int, choices=Notify.WHEN)
notify_mentions = forms.BooleanField(
label=_("Email mentions"), required=False)
notify_replies = forms.BooleanField(
label=_("Email replies"), required=False)
class Meta:
model = UserProfile
fields = ("avatar", "location", "timezone")
widgets = {'avatar': AvatarWidget(attrs={'accept': AvatarWidget.accept})}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
now = timezone.localtime(timezone.now())
self.fields['timezone'].help_text = _('Current time is: %(date)s %(time)s') % {
'date': defaultfilters.date(now),
'time': defaultfilters.time(now)}
self.fields['notify_when'].initial = self.instance.notify_when
self.fields['notify_mentions'].initial = bool(
self.instance.notify & Notify.MENTION)
self.fields['notify_replies'].initial = bool(
self.instance.notify & Notify.REPLY)
def clean_avatar(self):
file = self.cleaned_data['avatar']
# can be bool (clear) or not an image (empty)
if not isinstance(file, UploadedFile):
return file
ext = os.path.splitext(file.name)[1].lstrip('.').lower()
if (ext not in settings.ST_ALLOWED_AVATAR_FORMAT or
file.image.format.lower() not in settings.ST_ALLOWED_AVATAR_FORMAT):
raise forms.ValidationError(
_("Unsupported file format. Supported formats are %s.") %
", ".join(settings.ST_ALLOWED_AVATAR_FORMAT))
return file
def clean_notify_mentions(self):
if self.cleaned_data['notify_mentions']:
return Notify.MENTION
return 0
def clean_notify_replies(self):
if self.cleaned_data['notify_replies']:
return Notify.REPLY
return 0
def save(self, *args, **kwargs):
self.instance.notify = (
self.cleaned_data['notify_when'] |
self.cleaned_data['notify_mentions'] |
self.cleaned_data['notify_replies'])
instance = super().save(*args, **kwargs)
if isinstance(self.cleaned_data['avatar'], UploadedFile):
tasks.make_avatars(self.instance.user_id)
return instance
|
bibliopixel/control/rest/flask_server.py | rec/leds | 253 | 12710714 | <reponame>rec/leds
import flask, werkzeug.serving
from werkzeug.datastructures import ImmutableOrderedMultiDict
from ... util import log
from ... util.threads import runnable
from ... animation.remote import opener
class OrderedFlask(flask.Flask):
# http://flask.pocoo.org/docs/1.0/patterns/subclassing/
class request_class(flask.Request):
parameter_storage_class = ImmutableOrderedMultiDict
class FlaskServer(runnable.LoopThread):
OPEN_DELAY = 1
def __init__(self, port, external_access, open_page, **kwds):
super().__init__()
self.port = port
self.hostname = '0.0.0.0' if external_access else 'localhost'
self.app = OrderedFlask(__name__, **kwds)
self.open_page = open_page
def run_once(self):
if self.open_page:
opener.raw_opener('localhost', self.port, self.OPEN_DELAY)
werkzeug.serving.run_simple(self.hostname, self.port, self.app)
super().stop()
def stop(self):
def error():
log.error('Unable to shut down REST server on port %d', self.port)
super().stop()
try:
flask.request.environ.get('werkzeug.server.shutdown', error)()
except Exception:
log.debug('Exception shutting werkzeug down')
|
legacy/components/split_gen/utils.py | ParikhKadam/zenml | 1,275 | 12710722 | <filename>legacy/components/split_gen/utils.py
# Copyright (c) ZenML GmbH 2020. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
import os
from typing import List, Text, Dict
import tensorflow_data_validation as tfdv
from tfx import types
from tfx.components.schema_gen.executor import _DEFAULT_FILE_NAME
from tfx.types import artifact_utils
from tfx.types.artifact import Artifact
from tfx.utils import io_utils
def parse_statistics(split_name: Text,
statistics: List[Artifact]) -> Dict[Text, int]:
stats_uri = io_utils.get_only_uri_in_dir(
artifact_utils.get_split_uri(statistics, split_name))
stats = tfdv.load_stats_binary(stats_uri)
return stats
def parse_schema(input_dict: Dict[Text, List[types.Artifact]]):
schema = input_dict.get('schema', None)
if not schema:
return schema
else:
schema_path = os.path.join(
artifact_utils.get_single_uri(schema),
_DEFAULT_FILE_NAME)
schema_reader = io_utils.SchemaReader()
parsed_schema = schema_reader.read(schema_path)
return parsed_schema
|
perma_web/perma/tests/test_views_user_management.py | rachelaus/perma | 317 | 12710727 | # -*- coding: utf-8 -*-
from django.urls import reverse
from django.core import mail
from django.conf import settings
from django.utils import timezone
from mock import patch, sentinel
from perma.models import LinkUser, Organization, Registrar, Sponsorship
from perma.exceptions import PermaPaymentsCommunicationException
from .utils import PermaTestCase
from random import random
import re
from bs4 import BeautifulSoup
from datetime import datetime
# Fixtures
GENESIS = datetime.fromtimestamp(0).replace(tzinfo=timezone.utc)
def spoof_current_monthly_subscription():
return {
"status": "Current",
"rate": "10.00",
"frequency": "monthly",
"paid_through": GENESIS,
"link_limit": 10
}
def spoof_current_monthly_subscription_with_scheduled_downgrade():
return {
"status": "Current",
"rate": "10.00",
"frequency": "monthly",
"paid_through": GENESIS,
"link_limit": 10,
"pending_change": {
"rate": "1.00",
"link_limit": 1,
"effective": GENESIS.replace(year=9999)
},
}
def spoof_on_hold_monthly_subscription():
return {
"status": "Hold",
"rate": "7777.77",
"frequency": "monthly",
"paid_through": GENESIS,
"link_limit": 10
}
def spoof_cancellation_requested_subscription():
return {
"status": "Cancellation Requested",
"rate": "3333.33",
"frequency": "monthly",
"paid_through": GENESIS,
"link_limit": 10
}
# Tests
class UserManagementViewsTestCase(PermaTestCase):
@classmethod
def setUpTestData(cls):
cls.admin_user = LinkUser.objects.get(pk=1)
cls.registrar_user = LinkUser.objects.get(pk=2)
cls.sponsored_user = LinkUser.objects.get(pk=20)
cls.another_sponsored_user = LinkUser.objects.get(pk=21)
cls.inactive_sponsored_user = LinkUser.objects.get(pk=22)
cls.another_inactive_sponsored_user = LinkUser.objects.get(pk=23)
cls.regular_user = LinkUser.objects.get(pk=4)
cls.registrar = cls.registrar_user.registrar
cls.pending_registrar = Registrar.objects.get(pk=2)
cls.unrelated_registrar = Registrar.objects.get(pk=2)
cls.unrelated_registrar_user = cls.unrelated_registrar.users.first()
cls.organization = Organization.objects.get(pk=1)
cls.organization_user = cls.organization.users.first()
cls.another_organization = Organization.objects.get(pk=2)
cls.unrelated_organization = cls.unrelated_registrar.organizations.first()
cls.unrelated_organization_user = cls.unrelated_organization.users.first()
cls.another_unrelated_organization_user = cls.unrelated_organization.users.get(pk=11)
cls.deletable_organization = Organization.objects.get(pk=3)
### Helpers ###
def pk_from_email(self, email):
return LinkUser.objects.get(email=email).pk
### REGISTRAR A/E/D VIEWS ###
def test_registrar_list_filters(self):
# test assumptions: two registrars, one pending, one approved
response = self.get('user_management_manage_registrar',
user=self.admin_user).content
soup = BeautifulSoup(response, 'html.parser')
count = soup.select('.sort-filter-count')[0].text
self.assertEqual("Found: 4 registrars", count)
self.assertEqual(response.count(b'needs approval'), 1)
# get just approved registrars
response = self.get('user_management_manage_registrar',
user=self.admin_user,
request_kwargs={'data':{'status':'approved'}}).content
soup = BeautifulSoup(response, 'html.parser')
count = soup.select('.sort-filter-count')[0].text
self.assertEqual("Found: 3 registrars", count)
self.assertEqual(response.count(b'needs approval'), 0)
# get just pending registrars
response = self.get('user_management_manage_registrar',
user=self.admin_user,
request_kwargs={'data': {'status': 'pending'}}).content
soup = BeautifulSoup(response, 'html.parser')
count = soup.select('.sort-filter-count')[0].text
self.assertEqual("Found: 1 registrar", count)
self.assertEqual(response.count(b'needs approval'), 1)
def test_registrar_user_list_filters(self):
# test assumptions: five users
# - one deactivated
# - one unactivated
# - one from Test Library, three from Another Library, one from Test Firm
response = self.get('user_management_manage_registrar_user',
user=self.admin_user).content
soup = BeautifulSoup(response, 'html.parser')
count = soup.select('.sort-filter-count')[0].text
self.assertEqual("Found: 5 users", count)
self.assertEqual(response.count(b'deactivated account'), 1)
self.assertEqual(response.count(b'User must activate account'), 1)
# registrar name appears by each user, and once in the filter dropdown
self.assertEqual(response.count(b'Test Library'), 2)
self.assertEqual(response.count(b'Another Library'), 4)
self.assertEqual(response.count(b'Test Firm'), 2)
# filter by registrar
response = self.get('user_management_manage_registrar_user',
user=self.admin_user,
request_kwargs={'data':{'registrar': 1}}).content
soup = BeautifulSoup(response, 'html.parser')
count = soup.select('.sort-filter-count')[0].text
self.assertEqual("Found: 1 user", count)
response = self.get('user_management_manage_registrar_user',
user=self.admin_user,
request_kwargs={'data':{'registrar': 2}}).content
soup = BeautifulSoup(response, 'html.parser')
count = soup.select('.sort-filter-count')[0].text
self.assertEqual("Found: 3 users", count)
response = self.get('user_management_manage_registrar_user',
user=self.admin_user,
request_kwargs={'data':{'registrar': 4}}).content
soup = BeautifulSoup(response, 'html.parser')
count = soup.select('.sort-filter-count')[0].text
self.assertEqual("Found: 1 user", count)
# filter by status
response = self.get('user_management_manage_registrar_user',
user=self.admin_user,
request_kwargs={'data':{'status': 'active'}}).content
soup = BeautifulSoup(response, 'html.parser')
count = soup.select('.sort-filter-count')[0].text
self.assertEqual("Found: 3 users", count)
self.assertEqual(response.count(b'deactivated account'), 0)
self.assertEqual(response.count(b'User must activate account'), 0)
response = self.get('user_management_manage_registrar_user',
user=self.admin_user,
request_kwargs={'data':{'status': 'deactivated'}}).content
soup = BeautifulSoup(response, 'html.parser')
count = soup.select('.sort-filter-count')[0].text
self.assertEqual("Found: 1 user", count)
self.assertEqual(response.count(b'deactivated account'), 1)
self.assertEqual(response.count(b'User must activate account'), 0)
response = self.get('user_management_manage_registrar_user',
user=self.admin_user,
request_kwargs={'data':{'status': 'unactivated'}}).content
soup = BeautifulSoup(response, 'html.parser')
count = soup.select('.sort-filter-count')[0].text
self.assertEqual("Found: 1 user", count)
self.assertEqual(response.count(b'deactivated account'), 0)
self.assertEqual(response.count(b'User must activate account'), 1)
def test_admin_can_create_registrar(self):
self.submit_form(
'user_management_manage_registrar', {
'a-name':'test_views_registrar',
'a-email':'<EMAIL>',
'a-website':'http://test.com'
},
user=self.admin_user,
success_url=reverse('user_management_manage_registrar'),
success_query=Registrar.objects.filter(name='test_views_registrar'))
def test_admin_can_update_registrar(self):
self.submit_form('user_management_manage_single_registrar',
user=self.admin_user,
reverse_kwargs={'args':[self.unrelated_registrar.pk]},
data={
'a-name': 'new_name',
'a-email': '<EMAIL>',
'a-website': 'http://test.com'},
success_url=reverse('user_management_manage_registrar'),
success_query=Registrar.objects.filter(name='new_name'))
def test_registrar_can_update_registrar(self):
self.submit_form('user_management_manage_single_registrar',
user=self.registrar_user,
reverse_kwargs={'args': [self.registrar.pk]},
data={
'a-name': 'new_name',
'a-email': '<EMAIL>',
'a-website': 'http://test.com'},
success_url=reverse('user_management_settings_affiliations'),
success_query=Registrar.objects.filter(name='new_name'))
def test_registrar_cannot_update_unrelated_registrar(self):
self.get('user_management_manage_single_registrar',
user=self.registrar_user,
reverse_kwargs={'args': [self.unrelated_registrar.pk]},
require_status_code=404)
def test_admin_can_approve_pending_registrar(self):
self.submit_form('user_management_approve_pending_registrar',
user=self.admin_user,
data={'status':'approved'},
reverse_kwargs={'args': [self.pending_registrar.pk]},
success_query=Registrar.objects.filter(pk=self.pending_registrar.pk,
status="approved").exists())
def test_admin_can_deny_pending_registrar(self):
self.submit_form('user_management_approve_pending_registrar',
user=self.admin_user,
data={'status': 'denied'},
reverse_kwargs={'args': [self.pending_registrar.pk]},
success_query=Registrar.objects.filter(pk=self.pending_registrar.pk,
status="denied").exists())
### ORGANIZATION A/E/D VIEWS ###
def test_organization_list_filters(self):
# test assumptions: six orgs, three for Test Library and one for Another Library, two for Test Firm
response = self.get('user_management_manage_organization',
user=self.admin_user).content
soup = BeautifulSoup(response, 'html.parser')
count = soup.select('.sort-filter-count')[0].text
self.assertEqual("Found: 6 organizations", count)
# registrar name appears by each org, once in the filter dropdown, once in the "add an org" markup
self.assertEqual(response.count(b'Test Library'), 3 + 2)
self.assertEqual(response.count(b'Test Firm'), 2 + 2)
# 'Another Library' needs special handling because the fixture's org is
# named 'Another Library's journal'. The "string" search finds the instance
# by the org and the instance in the filter dropdown, but not the <option> in the "add an org" markup
self.assertEqual(len(soup.find_all(string=re.compile(r"Another Library(?!')"))), 1 + 1)
# get orgs for a single registrar
response = self.get('user_management_manage_organization',
user=self.admin_user,
request_kwargs={'data': {'registrar': 1}}).content
soup = BeautifulSoup(response, 'html.parser')
count = soup.select('.sort-filter-count')[0].text
self.assertEqual("Found: 3 organizations", count)
response = self.get('user_management_manage_organization',
user=self.admin_user,
request_kwargs={'data': {'registrar': 2}}).content
soup = BeautifulSoup(response, 'html.parser')
count = soup.select('.sort-filter-count')[0].text
self.assertEqual("Found: 1 organization", count)
def test_org_user_list_filters(self):
# test assumptions: seven users
# - three from Test Journal
# - one from Another Journal
# - three from A Third Journal
# - three from Another Library's Journal
# - one from Some Case
response = self.get('user_management_manage_organization_user',
user=self.admin_user).content
soup = BeautifulSoup(response, 'html.parser')
count = soup.select('.sort-filter-count')[0].text
self.assertEqual("Found: 7 users", count)
# registrar name appears by each user, and once in the filter dropdown
self.assertEqual(response.count(b'Test Journal'), 3 + 1)
self.assertEqual(response.count(b'Another Journal'), 1 + 1)
self.assertEqual(response.count(b"A Third Journal"), 3 + 1)
self.assertEqual(response.count(b"Another Library's Journal"), 3 + 1)
self.assertEqual(response.count(b"Some Case"), 1 + 1)
# filter by org
response = self.get('user_management_manage_organization_user',
user=self.admin_user,
request_kwargs={'data':{'org': 1}}).content
soup = BeautifulSoup(response, 'html.parser')
count = soup.select('.sort-filter-count')[0].text
self.assertEqual("Found: 3 users", count)
response = self.get('user_management_manage_organization_user',
user=self.admin_user,
request_kwargs={'data':{'org': 2}}).content
soup = BeautifulSoup(response, 'html.parser')
count = soup.select('.sort-filter-count')[0].text
self.assertEqual("Found: 1 user", count)
response = self.get('user_management_manage_organization_user',
user=self.admin_user,
request_kwargs={'data':{'org': 3}}).content
soup = BeautifulSoup(response, 'html.parser')
count = soup.select('.sort-filter-count')[0].text
self.assertEqual("Found: 3 users", count)
response = self.get('user_management_manage_organization_user',
user=self.admin_user,
request_kwargs={'data':{'org': 4}}).content
soup = BeautifulSoup(response, 'html.parser')
count = soup.select('.sort-filter-count')[0].text
self.assertEqual("Found: 3 users", count)
response = self.get('user_management_manage_organization_user',
user=self.admin_user,
request_kwargs={'data':{'org': 5}}).content
soup = BeautifulSoup(response, 'html.parser')
count = soup.select('.sort-filter-count')[0].text
self.assertEqual("Found: 1 user", count)
# filter by registrar
response = self.get('user_management_manage_organization_user',
user=self.admin_user,
request_kwargs={'data':{'registrar': 1}}).content
soup = BeautifulSoup(response, 'html.parser')
count = soup.select('.sort-filter-count')[0].text
self.assertEqual("Found: 5 users", count)
response = self.get('user_management_manage_organization_user',
user=self.admin_user,
request_kwargs={'data':{'registrar': 2}}).content
soup = BeautifulSoup(response, 'html.parser')
count = soup.select('.sort-filter-count')[0].text
self.assertEqual("Found: 3 users", count)
response = self.get('user_management_manage_organization_user',
user=self.admin_user,
request_kwargs={'data':{'registrar': 4}}).content
soup = BeautifulSoup(response, 'html.parser')
count = soup.select('.sort-filter-count')[0].text
self.assertEqual("Found: 1 user", count)
# status filter tested in test_registrar_user_list_filters
def test_sponsored_user_list_filters(self):
# test assumptions: four users, with five sponsorships between them
# - two users with active sponsorships, two users with inactive sponsorships
# - two sponsored by Test Library, two from Another Library, one from A Third Library
response = self.get('user_management_manage_sponsored_user',
user=self.admin_user).content
soup = BeautifulSoup(response, 'html.parser')
count = soup.select('.sort-filter-count')[0].text
self.assertEqual("Found: 4 users", count)
self.assertEqual(response.count(b'(inactive sponsorship)'), 2)
# registrar name appears by each user, and once in the filter dropdown
self.assertEqual(response.count(b'Test Library'), 3)
self.assertEqual(response.count(b'Another Library'), 3)
self.assertEqual(response.count(b'A Third Library'), 2)
# filter by registrar
response = self.get('user_management_manage_sponsored_user',
user=self.admin_user,
request_kwargs={'data':{'registrar': 1}}).content
soup = BeautifulSoup(response, 'html.parser')
count = soup.select('.sort-filter-count')[0].text
self.assertEqual("Found: 2 users", count)
response = self.get('user_management_manage_sponsored_user',
user=self.admin_user,
request_kwargs={'data':{'registrar': 2}}).content
soup = BeautifulSoup(response, 'html.parser')
count = soup.select('.sort-filter-count')[0].text
self.assertEqual("Found: 2 users", count)
response = self.get('user_management_manage_sponsored_user',
user=self.admin_user,
request_kwargs={'data':{'registrar': 3}}).content
soup = BeautifulSoup(response, 'html.parser')
count = soup.select('.sort-filter-count')[0].text
self.assertEqual("Found: 1 user", count)
# filter by sponsorship status
response = self.get('user_management_manage_sponsored_user',
user=self.admin_user,
request_kwargs={'data':{'sponsorship_status': 'active'}}).content
soup = BeautifulSoup(response, 'html.parser')
count = soup.select('.sort-filter-count')[0].text
self.assertEqual("Found: 2 users", count)
response = self.get('user_management_manage_sponsored_user',
user=self.admin_user,
request_kwargs={'data':{'sponsorship_status': 'inactive'}}).content
soup = BeautifulSoup(response, 'html.parser')
count = soup.select('.sort-filter-count')[0].text
self.assertEqual("Found: 2 users", count)
# user status filter tested in test_registrar_user_list_filters
def test_admin_can_create_organization(self):
self.submit_form('user_management_manage_organization',
user=self.admin_user,
data={
'a-name': 'new_name',
'a-registrar': self.registrar.pk},
success_url=reverse('user_management_manage_organization'),
success_query=Organization.objects.filter(name='new_name'))
def test_registrar_can_create_organization(self):
self.submit_form('user_management_manage_organization',
user=self.registrar_user,
data={
'a-name': 'new_name'},
success_url=reverse('user_management_manage_organization'),
success_query=Organization.objects.filter(name='new_name'))
def test_admin_can_update_organization(self):
self.submit_form('user_management_manage_single_organization',
user=self.admin_user,
reverse_kwargs={'args':[self.organization.pk]},
data={
'a-name': 'new_name',
'a-registrar': self.registrar.pk},
success_url=reverse('user_management_manage_organization'),
success_query=Organization.objects.filter(name='new_name'))
def test_registrar_can_update_organization(self):
self.submit_form('user_management_manage_single_organization',
user=self.registrar_user,
reverse_kwargs={'args':[self.organization.pk]},
data={
'a-name': 'new_name'},
success_url=reverse('user_management_manage_organization'),
success_query=Organization.objects.filter(name='new_name'))
def test_org_user_can_update_organization(self):
self.submit_form('user_management_manage_single_organization',
user=self.organization_user,
reverse_kwargs={'args': [self.organization.pk]},
data={
'a-name': 'new_name'},
success_url=reverse('user_management_manage_organization'),
success_query=Organization.objects.filter(name='new_name'))
def test_registrar_cannot_update_unrelated_organization(self):
self.get('user_management_manage_single_organization',
user=self.registrar_user,
reverse_kwargs={'args': [self.unrelated_organization.pk]},
require_status_code=404)
def test_org_user_cannot_update_unrelated_organization(self):
self.get('user_management_manage_single_organization',
user=self.organization_user,
reverse_kwargs={'args': [self.unrelated_organization.pk]},
require_status_code=404)
def _delete_organization(self, user, should_succeed=True):
if should_succeed:
self.submit_form('user_management_manage_single_organization_delete',
user=user,
reverse_kwargs={'args': [self.deletable_organization.pk]},
success_url=reverse('user_management_manage_organization'),
success_query=Organization.objects.filter(user_deleted=True, pk=self.deletable_organization.pk))
else:
self.submit_form('user_management_manage_single_organization_delete',
user=user,
reverse_kwargs={'args': [self.deletable_organization.pk]},
require_status_code=404)
def test_admin_user_can_delete_empty_organization(self):
self._delete_organization(self.admin_user)
self._delete_organization(self.admin_user, False)
def test_registrar_user_can_delete_empty_organization(self):
self._delete_organization(self.deletable_organization.registrar.users.first())
self._delete_organization(self.deletable_organization.registrar.users.first(), False)
def test_org_user_can_delete_empty_organization(self):
self._delete_organization(self.deletable_organization.users.first())
self._delete_organization(self.deletable_organization.users.first(), False)
def test_cannot_delete_nonempty_organization(self):
self.submit_form('user_management_manage_single_organization_delete',
user=self.admin_user,
reverse_kwargs={'args': [self.organization.pk]},
require_status_code=404)
### USER A/E/D VIEWS ###
def test_user_list_filters(self):
# test assumptions: six users
# - one aspiring court user, faculty user, journal user
response = self.get('user_management_manage_user',
user=self.admin_user).content
soup = BeautifulSoup(response, 'html.parser')
count = soup.select('.sort-filter-count')[0].text
self.assertEqual("Found: 6 users", count)
self.assertEqual(response.count(b'Interested in a court account'), 1)
self.assertEqual(response.count(b'Interested in a journal account'), 1)
self.assertEqual(response.count(b'Interested in a faculty account'), 1)
# filter by requested_account_type ("upgrade")
response = self.get('user_management_manage_user',
user=self.admin_user,
request_kwargs={'data':{'upgrade': 'court'}}).content
soup = BeautifulSoup(response, 'html.parser')
count = soup.select('.sort-filter-count')[0].text
self.assertEqual("Found: 1 user", count)
self.assertEqual(response.count(b'Interested in a court account'), 1)
self.assertEqual(response.count(b'Interested in a journal account'), 0)
self.assertEqual(response.count(b'Interested in a faculty account'), 0)
response = self.get('user_management_manage_user',
user=self.admin_user,
request_kwargs={'data':{'upgrade': 'journal'}}).content
soup = BeautifulSoup(response, 'html.parser')
count = soup.select('.sort-filter-count')[0].text
self.assertEqual("Found: 1 user", count)
self.assertEqual(response.count(b'Interested in a court account'), 0)
self.assertEqual(response.count(b'Interested in a journal account'), 1)
self.assertEqual(response.count(b'Interested in a faculty account'), 0)
response = self.get('user_management_manage_user',
user=self.admin_user,
request_kwargs={'data':{'upgrade': 'faculty'}}).content
soup = BeautifulSoup(response, 'html.parser')
count = soup.select('.sort-filter-count')[0].text
self.assertEqual("Found: 1 user", count)
self.assertEqual(response.count(b'Interested in a court account'), 0)
self.assertEqual(response.count(b'Interested in a journal account'), 0)
self.assertEqual(response.count(b'Interested in a faculty account'), 1)
# status filter tested in test_registrar_user_list_filters
def test_create_and_delete_user(self):
self.log_in_user(self.admin_user)
base_user = {
'a-first_name':'First',
'a-last_name':'Last',
}
email = '<EMAIL>'
for view_name, form_extras in [
['registrar_user', {'a-registrar': 1}],
['user', {}],
['organization_user', {'a-organizations': 1}],
['sponsored_user', {'a-sponsoring_registrars': 1}],
]:
# create user
email += '1'
self.submit_form('user_management_' + view_name + '_add_user',
data=dict(list(base_user.items()) + list(form_extras.items()) + [['a-e-address', email]]),
success_url=reverse('user_management_manage_' + view_name),
success_query=LinkUser.objects.filter(email=email))
new_user = LinkUser.objects.get(email=email)
# delete user (deactivate)
new_user.is_confirmed = True
new_user.save()
self.submit_form('user_management_manage_single_' + view_name + '_delete',
reverse_kwargs={'args': [new_user.pk]},
success_url=reverse('user_management_manage_' + view_name))
# reactivate user
self.submit_form('user_management_manage_single_' + view_name + '_reactivate',
reverse_kwargs={'args': [new_user.pk]},
success_url=reverse('user_management_manage_' + view_name))
# delete user (really delete)
new_user.is_confirmed = False
new_user.save()
self.submit_form('user_management_manage_single_' + view_name + '_delete',
reverse_kwargs={'args': [new_user.pk]},
success_url=reverse('user_management_manage_' + view_name))
### ADDING NEW USERS TO ORGANIZATIONS ###
def test_admin_user_can_add_new_user_to_org(self):
self.log_in_user(self.admin_user)
self.submit_form('user_management_organization_user_add_user',
data={'a-organizations': self.organization.pk,
'a-first_name': 'First',
'a-last_name': 'Last',
'a-e-address': '<EMAIL>'},
query_params={'email': '<EMAIL>'},
success_url=reverse('user_management_manage_organization_user'),
success_query=LinkUser.objects.filter(email='<EMAIL>',
organizations=self.organization).exists())
def test_registrar_user_can_add_new_user_to_org(self):
self.log_in_user(self.registrar_user)
self.submit_form('user_management_organization_user_add_user',
data={'a-organizations': self.organization.pk,
'a-first_name': 'First',
'a-last_name': 'Last',
'a-e-address': '<EMAIL>'},
query_params={'email': '<EMAIL>'},
success_url=reverse('user_management_manage_organization_user'),
success_query=LinkUser.objects.filter(email='<EMAIL>',
organizations=self.organization).exists())
def test_org_user_can_add_new_user_to_org(self):
self.log_in_user(self.organization_user)
self.submit_form('user_management_organization_user_add_user',
data={'a-organizations': self.organization.pk,
'a-first_name': 'First',
'a-last_name': 'Last',
'a-e-address': '<EMAIL>'},
query_params={'email': '<EMAIL>'},
success_url=reverse('user_management_manage_organization_user'),
success_query=LinkUser.objects.filter(email='<EMAIL>',
organizations=self.organization).exists())
def test_registrar_user_cannot_add_new_user_to_inaccessible_org(self):
self.log_in_user(self.registrar_user)
self.submit_form('user_management_organization_user_add_user',
data={'a-organizations': self.unrelated_organization.pk,
'a-first_name': 'First',
'a-last_name': 'Last',
'a-e-address': '<EMAIL>'},
query_params={'email': '<EMAIL>'},
error_keys=['organizations'])
self.assertFalse(LinkUser.objects.filter(email='<EMAIL>',
organizations=self.unrelated_organization).exists())
def test_org_user_cannot_add_new_user_to_inaccessible_org(self):
self.log_in_user(self.organization_user)
self.submit_form('user_management_organization_user_add_user',
data={'a-organizations': self.unrelated_organization.pk,
'a-first_name': 'First',
'a-last_name': 'Last',
'a-e-address': '<EMAIL>'},
query_params={'email': '<EMAIL>'},
error_keys=['organizations'])
self.assertFalse(LinkUser.objects.filter(email='<EMAIL>',
organizations=self.unrelated_organization).exists())
### ADDING EXISTING USERS TO ORGANIZATIONS ###
def test_admin_user_can_add_existing_user_to_org(self):
self.log_in_user(self.admin_user)
self.submit_form('user_management_organization_user_add_user',
data={'a-organizations': self.organization.pk},
query_params={'email': self.regular_user.email},
success_url=reverse('user_management_manage_organization_user'),
success_query=self.regular_user.organizations.filter(pk=self.organization.pk))
def test_registrar_user_can_add_existing_user_to_org(self):
self.log_in_user(self.registrar_user)
self.submit_form('user_management_organization_user_add_user',
data={'a-organizations': self.organization.pk},
query_params={'email': self.regular_user.email},
success_url=reverse('user_management_manage_organization_user'),
success_query=self.regular_user.organizations.filter(pk=self.organization.pk))
def test_org_user_can_add_existing_user_to_org(self):
self.log_in_user(self.organization_user)
self.submit_form('user_management_organization_user_add_user',
data={'a-organizations': self.organization.pk},
query_params={'email': self.regular_user.email},
success_url=reverse('user_management_manage_organization_user'),
success_query=self.regular_user.organizations.filter(pk=self.organization.pk))
def test_registrar_user_cannot_add_existing_user_to_inaccessible_org(self):
self.log_in_user(self.registrar_user)
self.submit_form('user_management_organization_user_add_user',
data={'a-organizations': self.unrelated_organization.pk},
query_params={'email': self.regular_user.email},
error_keys=['organizations'])
self.assertFalse(self.regular_user.organizations.filter(pk=self.unrelated_organization.pk).exists())
def test_org_user_cannot_add_existing_user_to_inaccessible_org(self):
self.log_in_user(self.organization_user)
self.submit_form('user_management_organization_user_add_user',
data={'a-organizations': self.another_organization.pk},
query_params={'email': self.regular_user.email},
error_keys=['organizations'])
self.assertFalse(self.regular_user.organizations.filter(pk=self.another_organization.pk).exists())
def test_cannot_add_admin_user_to_org(self):
self.log_in_user(self.organization_user)
resp = self.submit_form('user_management_organization_user_add_user',
data={'a-organizations': self.organization.pk},
query_params={'email': self.admin_user.email})
self.assertIn(b"is an admin user", resp.content)
self.assertFalse(self.admin_user.organizations.exists())
def test_cannot_add_registrar_user_to_org(self):
self.log_in_user(self.organization_user)
resp = self.submit_form('user_management_organization_user_add_user',
data={'a-organizations': self.organization.pk},
query_params={'email': self.registrar_user.email})
self.assertIn(b"is already a registrar user", resp.content)
self.assertFalse(self.registrar_user.organizations.exists())
### VOLUNTARILY LEAVING ORGANIZATIONS ###
def test_org_user_can_leave_org(self):
u = LinkUser.objects.get(email='<EMAIL>')
orgs = u.organizations.all()
# check assumptions
self.assertEqual(len(orgs), 2)
# 404 if tries to leave non-existent org
self.submit_form('user_management_organization_user_leave_organization',
user=u,
data={},
reverse_kwargs={'args': [999]},
require_status_code=404)
# returns to affiliations page if still a member of at least one org
self.submit_form('user_management_organization_user_leave_organization',
user=u,
data={},
reverse_kwargs={'args': [orgs[0].pk]},
success_url=reverse('user_management_settings_affiliations'))
# returns to create/manage page if no longer a member of any orgs
self.submit_form('user_management_organization_user_leave_organization',
user=u,
data={},
reverse_kwargs={'args': [orgs[1].pk]},
success_url=reverse('create_link'))
# 404 if tries to leave an org they are not a member of
self.submit_form('user_management_organization_user_leave_organization',
user=u,
data={},
reverse_kwargs={'args': [orgs[1].pk]},
require_status_code=404)
### REMOVING USERS FROM ORGANIZATIONS ###
# Just try to access the page with remove/deactivate links
def test_registrar_can_edit_org_user(self):
# User from one of registrar's own orgs succeeds
self.log_in_user(self.registrar_user)
self.get('user_management_manage_single_organization_user',
reverse_kwargs={'args': [self.organization_user.pk]})
# User from another registrar's org fails
self.get('user_management_manage_single_organization_user',
reverse_kwargs={'args': [self.another_unrelated_organization_user.pk]},
require_status_code=404)
# Repeat with the other registrar, to confirm we're
# getting 404s because of permission reasons, not because the
# test fixtures are broken.
self.log_in_user(self.unrelated_registrar_user)
self.get('user_management_manage_single_organization_user',
reverse_kwargs={'args': [self.organization_user.pk]},
require_status_code=404)
self.get('user_management_manage_single_organization_user',
reverse_kwargs={'args': [self.another_unrelated_organization_user.pk]})
def test_org_can_edit_org_user(self):
# User from own org succeeds
org_one_users = ['<EMAIL>', '<EMAIL>']
org_two_users = ['<EMAIL>', '<EMAIL>']
self.log_in_user(org_one_users[0])
self.get('user_management_manage_single_organization_user',
reverse_kwargs={'args': [self.pk_from_email(org_one_users[1])]})
# User from another org fails
self.get('user_management_manage_single_organization_user',
reverse_kwargs={'args': [self.pk_from_email(org_two_users[0])]},
require_status_code=404)
# Repeat in reverse, to confirm we're
# getting 404s because of permission reasons, not because the
# test fixtures are broken.
self.log_in_user(org_two_users[1])
self.get('user_management_manage_single_organization_user',
reverse_kwargs={'args': [self.pk_from_email(org_one_users[1])]},
require_status_code=404)
# User from another org fails
self.get('user_management_manage_single_organization_user',
reverse_kwargs={'args': [self.pk_from_email(org_two_users[0])]})
# Actually try removing them
def test_can_remove_user_from_organization(self):
self.log_in_user(self.registrar_user)
self.submit_form('user_management_manage_single_organization_user_remove',
data={'org': self.organization.pk},
reverse_kwargs={'args': [self.organization_user.pk]},
success_url=reverse('user_management_manage_organization_user'))
self.assertFalse(self.organization_user.organizations.filter(pk=self.organization.pk).exists())
def test_registrar_cannot_remove_unrelated_user_from_organization(self):
self.log_in_user(self.registrar_user)
self.submit_form('user_management_manage_single_organization_user_remove',
data={'org': self.unrelated_organization.pk},
reverse_kwargs={'args': [self.unrelated_organization_user.pk]},
require_status_code=404)
def test_org_user_cannot_remove_unrelated_user_from_organization(self):
self.log_in_user(self.organization_user)
self.submit_form('user_management_manage_single_organization_user_remove',
data={'org': self.unrelated_organization.pk},
reverse_kwargs={'args': [self.unrelated_organization_user.pk]},
require_status_code=404)
def test_can_remove_self_from_organization(self):
self.log_in_user(self.organization_user)
self.submit_form('user_management_manage_single_organization_user_remove',
data={'org': self.organization.pk},
reverse_kwargs={'args': [self.organization_user.pk]},
success_url=reverse('create_link'))
self.assertFalse(self.organization_user.organizations.filter(pk=self.organization.pk).exists())
### ADDING NEW USERS TO REGISTRARS AS SPONSORED USERS ###
def test_admin_user_can_add_new_sponsored_user_to_registrar(self):
address = '<EMAIL>'
self.log_in_user(self.admin_user)
self.submit_form('user_management_sponsored_user_add_user',
data={'a-sponsoring_registrars': self.registrar.pk,
'a-first_name': 'First',
'a-last_name': 'Last',
'a-e-address': address},
query_params={'email': address},
success_url=reverse('user_management_manage_sponsored_user'))
# Check that everything is set up correctly (we'll do this once, here, and not repeat in other tests)
user = LinkUser.objects.get(email=address, sponsoring_registrars=self.registrar)
sponsorship = user.sponsorships.first()
sponsored_folder = sponsorship.folders.get()
self.assertEqual(sponsorship.status, 'active')
self.assertEqual(sponsored_folder.parent, user.sponsored_root_folder)
self.assertFalse(sponsored_folder.read_only)
# Try to add the same person again; should fail
response = self.submit_form('user_management_sponsored_user_add_user',
data={'a-sponsoring_registrars': self.registrar.pk,
'a-first_name': 'First',
'a-last_name': 'Last',
'a-e-address': address},
query_params={'email': address}).content
self.assertIn(bytes("Select a valid choice. That choice is not one of the available choices", 'utf-8'), response)
def test_registrar_user_can_add_new_sponsored_user_to_registrar(self):
address = '<EMAIL>'
self.log_in_user(self.registrar_user)
self.submit_form('user_management_sponsored_user_add_user',
data={'a-sponsoring_registrars': self.registrar.pk,
'a-first_name': 'First',
'a-last_name': 'Last',
'a-e-address': address},
query_params={'email': address},
success_url=reverse('user_management_manage_sponsored_user'),
success_query=LinkUser.objects.filter(email=address,
sponsoring_registrars=self.registrar).exists())
# Try to add the same person again; should fail
response = self.submit_form('user_management_sponsored_user_add_user',
data={'a-sponsoring_registrars': self.registrar.pk,
'a-first_name': 'First',
'a-last_name': 'Last',
'a-e-address': address},
query_params={'email': address}).content
self.assertIn(bytes("{} is already sponsored by your registrar.".format(address), 'utf-8'), response)
def test_registrar_user_cannot_add_sponsored_user_to_inaccessible_registrar(self):
self.log_in_user(self.registrar_user)
self.submit_form('user_management_sponsored_user_add_user',
data={'a-sponsoring_registrars': self.unrelated_registrar.pk,
'a-first_name': 'First',
'a-last_name': 'Last',
'a-e-address': '<EMAIL>'},
query_params={'email': '<EMAIL>'},
error_keys=['sponsoring_registrars'])
self.assertFalse(LinkUser.objects.filter(email='<EMAIL>',
sponsoring_registrars=self.unrelated_registrar).exists())
### ADDING EXISTING USERS TO REGISTRARS AS SPONSORED USERS ###
def test_admin_user_can_add_sponsorship_to_existing_user(self):
self.log_in_user(self.admin_user)
self.submit_form('user_management_sponsored_user_add_user',
data={'a-sponsoring_registrars': self.registrar.pk},
query_params={'email': self.regular_user.email},
success_url=reverse('user_management_manage_sponsored_user'),
success_query=LinkUser.objects.filter(pk=self.regular_user.pk, sponsoring_registrars=self.registrar))
def test_registrar_user_can_add_sponsorship_to_existing_user(self):
self.log_in_user(self.registrar_user)
self.submit_form('user_management_sponsored_user_add_user',
data={'a-sponsoring_registrars': self.registrar.pk},
query_params={'email': self.regular_user.email},
success_url=reverse('user_management_manage_sponsored_user'),
success_query=LinkUser.objects.filter(pk=self.regular_user.pk, sponsoring_registrars=self.registrar))
def test_registrar_user_cannot_add_sponsorship_for_other_registrar_to_existing_user(self):
self.log_in_user(self.registrar_user)
self.submit_form('user_management_sponsored_user_add_user',
data={'a-sponsoring_registrars': self.unrelated_registrar.pk},
query_params={'email': self.regular_user.email},
error_keys=['sponsoring_registrars'])
self.assertFalse(LinkUser.objects.filter(pk=self.regular_user.pk, sponsoring_registrars=self.unrelated_registrar).exists())
### TOGGLING THE STATUS OF SPONSORSHIPS ###
def test_admin_user_can_deactivate_active_sponsorship(self):
sponsorship = Sponsorship.objects.get(user=self.sponsored_user, registrar=self.registrar, status='active')
self.assertTrue(all(not folder.read_only for folder in sponsorship.folders))
self.log_in_user(self.admin_user)
self.submit_form('user_management_manage_single_sponsored_user_remove',
reverse_kwargs={'args': [self.sponsored_user.id, self.registrar.id]},
success_url=reverse('user_management_manage_single_sponsored_user', args=[self.sponsored_user.id]))
sponsorship.refresh_from_db()
self.assertEqual(sponsorship.status, 'inactive')
self.assertTrue(all(folder.read_only for folder in sponsorship.folders))
def test_admin_user_can_reactivate_inactive_sponsorship(self):
sponsorship = Sponsorship.objects.get(user=self.inactive_sponsored_user, registrar=self.registrar, status='inactive')
self.assertTrue(all(folder.read_only for folder in sponsorship.folders))
self.log_in_user(self.admin_user)
self.submit_form('user_management_manage_single_sponsored_user_readd',
reverse_kwargs={'args': [self.inactive_sponsored_user.id, self.registrar.id]},
success_url=reverse('user_management_manage_single_sponsored_user', args=[self.inactive_sponsored_user.id]))
sponsorship.refresh_from_db()
self.assertEqual(sponsorship.status, 'active')
self.assertTrue(all(not folder.read_only for folder in sponsorship.folders))
def test_registrar_user_can_deactivate_active_sponsorship(self):
sponsorship = Sponsorship.objects.get(user=self.sponsored_user, registrar=self.registrar, status='active')
self.assertTrue(all(not folder.read_only for folder in sponsorship.folders))
self.log_in_user(self.registrar_user)
self.submit_form('user_management_manage_single_sponsored_user_remove',
reverse_kwargs={'args': [self.sponsored_user.id, self.registrar.id]},
success_url=reverse('user_management_manage_single_sponsored_user', args=[self.sponsored_user.id]))
sponsorship.refresh_from_db()
self.assertEqual(sponsorship.status, 'inactive')
self.assertTrue(all(folder.read_only for folder in sponsorship.folders))
def test_registrar_user_cannot_deactivate_active_sponsorship_for_other_registrar(self):
self.assertTrue(self.unrelated_registrar in self.another_sponsored_user.sponsoring_registrars.all())
self.log_in_user(self.registrar_user)
self.submit_form('user_management_manage_single_sponsored_user_remove',
reverse_kwargs={'args': [self.another_sponsored_user.id, self.unrelated_registrar.id]},
require_status_code=404)
def test_registrar_user_can_reactivate_inactive_sponsorship(self):
sponsorship = Sponsorship.objects.get(user=self.inactive_sponsored_user, registrar=self.registrar, status='inactive')
self.assertTrue(all(folder.read_only for folder in sponsorship.folders))
self.log_in_user(self.registrar_user)
self.submit_form('user_management_manage_single_sponsored_user_readd',
reverse_kwargs={'args': [self.inactive_sponsored_user.id, self.registrar.id]},
success_url=reverse('user_management_manage_single_sponsored_user', args=[self.inactive_sponsored_user.id]))
sponsorship.refresh_from_db()
self.assertEqual(sponsorship.status, 'active')
self.assertTrue(all(not folder.read_only for folder in sponsorship.folders))
def test_registrar_user_cannot_reactivate_inactive_sponsorship_for_other_registrar(self):
sponsorship = Sponsorship.objects.get(user=self.another_inactive_sponsored_user, registrar=self.unrelated_registrar, status='inactive')
self.log_in_user(self.registrar_user)
self.submit_form('user_management_manage_single_sponsored_user_readd',
reverse_kwargs={'args': [self.another_inactive_sponsored_user.id, self.unrelated_registrar.id]},
require_status_code=404)
sponsorship.refresh_from_db()
self.assertEqual(sponsorship.status, 'inactive')
### ADDING NEW USERS TO REGISTRARS AS REGISTRAR USERS) ###
def test_admin_user_can_add_new_user_to_registrar(self):
address = '<EMAIL>'
self.log_in_user(self.admin_user)
self.submit_form('user_management_registrar_user_add_user',
data={'a-registrar': self.registrar.pk,
'a-first_name': 'First',
'a-last_name': 'Last',
'a-e-address': address},
query_params={'email': address},
success_url=reverse('user_management_manage_registrar_user'),
success_query=LinkUser.objects.filter(email=address,
registrar=self.registrar).exists())
def test_registrar_user_can_add_new_user_to_registrar(self):
address = '<EMAIL>'
self.log_in_user(self.registrar_user)
self.submit_form('user_management_registrar_user_add_user',
data={'a-registrar': self.registrar.pk,
'a-first_name': 'First',
'a-last_name': 'Last',
'a-e-address': address},
query_params={'email': address},
success_url=reverse('user_management_manage_registrar_user'),
success_query=LinkUser.objects.filter(email=address,
registrar=self.registrar).exists())
# Try to add the same person again; should fail
response = self.submit_form('user_management_registrar_user_add_user',
data={'a-registrar': self.registrar.pk,
'a-first_name': 'First',
'a-last_name': 'Last',
'a-e-address': address},
query_params={'email': address}).content
self.assertIn(bytes("{} is already a registrar user for your registrar.".format(address), 'utf-8'), response)
def test_registrar_user_cannot_add_new_user_to_inaccessible_registrar(self):
self.log_in_user(self.registrar_user)
self.submit_form('user_management_registrar_user_add_user',
data={'a-registrar': self.unrelated_registrar.pk,
'a-first_name': 'First',
'a-last_name': 'Last',
'a-e-address': '<EMAIL>'},
query_params={'email': '<EMAIL>'},
error_keys=['registrar'])
self.assertFalse(LinkUser.objects.filter(email='<EMAIL>',
registrar=self.unrelated_registrar).exists())
### ADDING EXISTING USERS TO REGISTRARS ###
def test_admin_user_can_add_existing_user_to_registrar(self):
self.log_in_user(self.admin_user)
self.submit_form('user_management_registrar_user_add_user',
data={'a-registrar': self.registrar.pk},
query_params={'email': self.regular_user.email},
success_url=reverse('user_management_manage_registrar_user'),
success_query=LinkUser.objects.filter(pk=self.regular_user.pk, registrar=self.registrar))
def test_registrar_user_can_add_existing_user_to_registrar(self):
self.log_in_user(self.registrar_user)
self.submit_form('user_management_registrar_user_add_user',
data={'a-registrar': self.registrar.pk},
query_params={'email': self.regular_user.email},
success_url=reverse('user_management_manage_registrar_user'),
success_query=LinkUser.objects.filter(pk=self.regular_user.pk, registrar=self.registrar))
def test_registrar_user_can_upgrade_org_user_to_registrar(self):
self.log_in_user(self.registrar_user)
self.submit_form('user_management_registrar_user_add_user',
data={'a-registrar': self.registrar.pk},
query_params={'email': self.organization_user.email},
success_url=reverse('user_management_manage_registrar_user'),
success_query=LinkUser.objects.filter(pk=self.organization_user.pk, registrar=self.registrar))
self.assertFalse(LinkUser.objects.filter(pk=self.organization_user.pk, organizations=self.organization).exists())
def test_registrar_user_cannot_upgrade_unrelated_org_user_to_registrar(self):
self.log_in_user(self.registrar_user)
resp = self.submit_form('user_management_registrar_user_add_user',
data={'a-registrar': self.registrar.pk},
query_params={'email': self.unrelated_organization_user.email})
self.assertIn(b"belongs to organizations that are not controlled by your registrar", resp.content)
self.assertFalse(LinkUser.objects.filter(pk=self.unrelated_organization_user.pk, registrar=self.registrar).exists())
def test_registrar_user_cannot_add_existing_user_to_inaccessible_registrar(self):
self.log_in_user(self.registrar_user)
self.submit_form('user_management_registrar_user_add_user',
data={'a-registrar': self.unrelated_registrar.pk},
query_params={'email': self.regular_user.email},
error_keys=['registrar'])
self.assertFalse(LinkUser.objects.filter(pk=self.regular_user.pk, registrar=self.unrelated_registrar).exists())
def test_cannot_add_admin_user_to_registrar(self):
self.log_in_user(self.registrar_user)
resp = self.submit_form('user_management_registrar_user_add_user',
data={'a-registrar': self.registrar.pk},
query_params={'email': self.admin_user.email})
self.assertIn(b"is an admin user", resp.content)
self.assertFalse(LinkUser.objects.filter(pk=self.admin_user.pk, registrar=self.registrar).exists())
def test_cannot_add_registrar_user_to_registrar(self):
self.log_in_user(self.registrar_user)
resp = self.submit_form('user_management_registrar_user_add_user',
data={'a-registrar': self.registrar.pk},
query_params={'email': self.unrelated_registrar_user.email})
self.assertIn(b"is already a member of another registrar", resp.content)
self.assertFalse(LinkUser.objects.filter(pk=self.unrelated_registrar_user.pk, registrar=self.registrar).exists())
### REMOVING REGISTRAR USERS FROM REGISTRARS ###
def test_can_remove_user_from_registrar(self):
self.log_in_user(self.registrar_user)
self.regular_user.registrar = self.registrar
self.regular_user.save()
self.submit_form('user_management_manage_single_registrar_user_remove',
reverse_kwargs={'args': [self.regular_user.pk]},
success_url=reverse('user_management_manage_registrar_user'))
self.assertFalse(LinkUser.objects.filter(pk=self.regular_user.pk, registrar=self.registrar).exists())
def test_registrar_cannot_remove_unrelated_user_from_registrar(self):
self.log_in_user(self.registrar_user)
self.submit_form('user_management_manage_single_registrar_user_remove',
reverse_kwargs={'args': [self.unrelated_registrar_user.pk]},
require_status_code=404)
def test_can_remove_self_from_registrar(self):
self.log_in_user(self.registrar_user)
self.submit_form('user_management_manage_single_registrar_user_remove',
reverse_kwargs={'args': [self.registrar_user.pk]},
success_url=reverse('create_link'))
self.assertFalse(LinkUser.objects.filter(pk=self.registrar_user.pk, registrar=self.registrar).exists())
### ADDING NEW USERS AS ADMINS ###
def test_admin_user_can_add_new_user_as_admin(self):
self.log_in_user(self.admin_user)
self.submit_form('user_management_admin_user_add_user',
data={'a-first_name': 'First',
'a-last_name': 'Last',
'a-e-address': '<EMAIL>'},
query_params={'email': '<EMAIL>'},
success_url=reverse('user_management_manage_admin_user'),
success_query=LinkUser.objects.filter(email='<EMAIL>',
is_staff=True).exists())
### ADDING EXISTING USERS AS ADMINS ###
def test_admin_user_can_add_existing_user_as_admin(self):
self.log_in_user(self.admin_user)
self.submit_form('user_management_admin_user_add_user',
query_params={'email': self.regular_user.email},
success_url=reverse('user_management_manage_admin_user'),
success_query=LinkUser.objects.filter(pk=self.regular_user.pk, is_staff=True))
### REMOVING USERS AS ADMINS ###
def test_can_remove_user_from_admin(self):
self.log_in_user(self.admin_user)
self.regular_user.is_staff = True
self.regular_user.save()
self.submit_form('user_management_manage_single_admin_user_remove',
reverse_kwargs={'args': [self.regular_user.pk]},
success_url=reverse('user_management_manage_admin_user'))
self.assertFalse(LinkUser.objects.filter(pk=self.regular_user.pk, is_staff=True).exists())
def test_can_remove_self_from_admin(self):
self.log_in_user(self.admin_user)
self.submit_form('user_management_manage_single_admin_user_remove',
reverse_kwargs={'args': [self.admin_user.pk]},
success_url=reverse('create_link'))
self.assertFalse(LinkUser.objects.filter(pk=self.admin_user.pk, is_staff=True).exists())
### SETTINGS ###
def test_user_can_change_own_settings(self):
self.submit_form('user_management_settings_profile',
user=self.admin_user,
data={
'a-first_name': 'Newfirst',
'a-last_name': 'Newlast',
'a-e-address': '<EMAIL>'
},
success_url=reverse('user_management_settings_profile'),
success_query=LinkUser.objects.filter(first_name='Newfirst'))
def test_user_can_request_deletion_once(self):
deletion_url = reverse('user_management_delete_account')
self.assertNotIn('Requested account deletion', self.regular_user.notes)
response1 = self.get('user_management_settings_profile',
user=self.regular_user).content
self.assertIn(bytes('<form method="post" action="{}"'.format(deletion_url), 'utf-8'), response1)
response2 = self.post('user_management_delete_account',
user=self.regular_user,
request_kwargs={"follow": True}).content
self.assertNotIn(bytes('<form method="post" action="{}"'.format(deletion_url), 'utf-8'), response2)
self.assertIn(b'Deletion Request Received', response2)
self.regular_user.refresh_from_db()
self.assertIn('Requested account deletion', self.regular_user.notes)
self.assertEqual(len(mail.outbox),1)
message = mail.outbox[0]
self.assertEqual(message.subject, 'Perma.cc account deletion request')
def test_edit_org_privacy(self):
'''
Can an authorized user change the privacy setting of an org?
'''
# Toggle as an org user
response = self.get('user_management_settings_organizations_change_privacy', reverse_kwargs={'args':[1]},
user='<EMAIL>').content
self.assertIn(b"Your Perma Links are currently <strong>Public</strong> by default.", response)
self.submit_form('user_management_settings_organizations_change_privacy', reverse_kwargs={'args':[1]},
user='<EMAIL>',
data={},
success_url=reverse('user_management_settings_affiliations'))
response = self.get('user_management_settings_organizations_change_privacy', reverse_kwargs={'args':[1]},
user='<EMAIL>').content
self.assertIn(b"Your Perma Links are currently <strong>Private</strong> by default.", response)
# Toggle as a registrar user
self.submit_form('user_management_settings_organizations_change_privacy', reverse_kwargs={'args':[1]},
user='<EMAIL>',
data={},
success_url=reverse('user_management_manage_organization'))
response = self.get('user_management_settings_organizations_change_privacy', reverse_kwargs={'args':[1]},
user='<EMAIL>').content
self.assertIn(b"Your Perma Links are currently <strong>Public</strong> by default.", response)
# Toggle as a staff user
self.submit_form('user_management_settings_organizations_change_privacy', reverse_kwargs={'args':[1]},
user='<EMAIL>',
data={},
success_url=reverse('user_management_manage_organization'))
response = self.get('user_management_settings_organizations_change_privacy', reverse_kwargs={'args':[1]},
user='<EMAIL>').content
self.assertIn(b"Your Perma Links are currently <strong>Private</strong> by default.", response)
# As staff, try to access non-existent org
self.get('user_management_settings_organizations_change_privacy', reverse_kwargs={'args':[99999]},
user='<EMAIL>',
require_status_code=404)
# Subscription, Individuals (except registrar users)
def test_nonpaying_user_cannot_see_usage_plan_page(self):
u = LinkUser.objects.get(email='<EMAIL>')
assert not u.can_view_usage_plan()
self.get('user_management_settings_usage_plan',
user=u,
require_status_code=403)
def test_regular_user_can_see_usage_plan_page(self):
u = LinkUser.objects.get(email='<EMAIL>')
assert u.can_view_usage_plan()
self.get('user_management_settings_usage_plan',
user=u,
require_status_code=200)
@patch('perma.models.prep_for_perma_payments', autospec=True)
@patch('perma.models.LinkUser.get_subscription', autospec=True)
@patch('perma.models.LinkUser.get_purchase_history', autospec=True)
def test_no_purchase_history_section_if_no_one_time_purchases(self, get_purchase_history, get_subscription, prepped):
u = LinkUser.objects.get(email='<EMAIL>')
get_subscription.return_value = None
get_purchase_history.return_value = {'purchases': [], 'total_links': 0}
prepped.return_value = bytes(str(sentinel.prepped), 'utf-8')
r = self.get('user_management_settings_usage_plan',
user=u)
self.assertNotIn(b'Purchase History', r.content)
get_subscription.assert_called_once_with(u)
get_purchase_history.assert_called_once_with(u)
@patch('perma.models.prep_for_perma_payments', autospec=True)
@patch('perma.models.LinkUser.get_subscription', autospec=True)
@patch('perma.models.LinkUser.get_purchase_history', autospec=True)
def test_purchase_history_present_if_one_time_purchases(self, get_purchase_history, get_subscription, prepped):
u = LinkUser.objects.get(email='<EMAIL>')
get_subscription.return_value = None
get_purchase_history.return_value = {
'purchases': [
{'link_quantity': 10, 'date': GENESIS},
{'link_quantity': 3, 'date': GENESIS}
],
'total_links': 13
}
prepped.return_value = bytes(str(sentinel.prepped), 'utf-8')
r = self.get('user_management_settings_usage_plan',
user=u)
self.assertIn(b'Purchase History', r.content)
self.assertIn(b'10 Links', r.content)
self.assertIn(b'3 Links', r.content)
self.assertIn(b'13 Links', r.content)
self.assertIn(b'January 1, 1970', r.content)
get_subscription.assert_called_once_with(u)
get_purchase_history.assert_called_once_with(u)
@patch('perma.models.prep_for_perma_payments', autospec=True)
@patch('perma.models.LinkUser.get_subscription', autospec=True)
@patch('perma.models.LinkUser.get_purchase_history', autospec=True)
def test_subscribe_form_if_no_standing_subscription(self, get_purchase_history, get_subscription, prepped):
u = LinkUser.objects.get(email='<EMAIL>')
get_subscription.return_value = None
get_purchase_history.return_value = {'purchases': [], 'total_links': 0}
prepped.return_value = bytes(str(sentinel.prepped), 'utf-8')
r = self.get('user_management_settings_usage_plan',
user=u)
individual_tier_count = len(settings.TIERS['Individual'])
bonus_package_count = len(settings.BONUS_PACKAGES)
self.assertIn(b'Get More Personal Links', r.content)
self.assertIn(b'Purchase a personal subscription', r.content)
self.assertIn(b'<form class="purchase-form', r.content, bonus_package_count)
self.assertIn(b'<form class="upgrade-form', r.content, individual_tier_count)
self.assertIn(b'<input type="hidden" name="encrypted_data"', r.content, individual_tier_count + bonus_package_count)
self.assertIn(prepped.return_value, r.content)
get_subscription.assert_called_once_with(u)
get_purchase_history.assert_called_once_with(u)
@patch('perma.models.prep_for_perma_payments', autospec=True)
@patch('perma.models.LinkUser.get_subscription', autospec=True)
@patch('perma.models.LinkUser.get_purchase_history', autospec=True)
def test_update_button_cancel_button_and_subscription_info_present_if_standing_subscription(self, get_purchase_history, get_subscription, prepped):
u = LinkUser.objects.get(email='<EMAIL>')
subscription = spoof_current_monthly_subscription()
get_subscription.return_value = subscription
get_purchase_history.return_value = {'purchases': [], 'total_links': 0}
prepped.return_value = bytes(str(sentinel.prepped), 'utf-8')
r = self.get('user_management_settings_usage_plan',
user=u)
self.assertIn(b'Rate', r.content)
self.assertIn(b'Paid Through', r.content)
self.assertIn(bytes(subscription['status'].lower(), 'utf-8'), r.content)
self.assertIn(b'Modify Subscription', r.content)
self.assertContains(r, '<input type="hidden" name="account_type"', 2)
self.assertIn(b'Cancel Subscription', r.content)
get_subscription.assert_called_once_with(u)
get_purchase_history.assert_called_once_with(u)
@patch('perma.models.LinkUser.get_subscription', autospec=True)
@patch('perma.models.LinkUser.get_purchase_history', autospec=True)
def test_help_present_if_subscription_on_hold(self, get_purchase_history, get_subscription):
u = LinkUser.objects.get(email='<EMAIL>')
subscription = spoof_on_hold_monthly_subscription()
get_subscription.return_value = subscription
get_purchase_history.return_value = {'purchases': [], 'total_links': 0}
r = self.get('user_management_settings_usage_plan',
user=u)
self.assertIn(b'problem with your credit card', r.content)
get_subscription.assert_called_once_with(u)
get_purchase_history.assert_called_once_with(u)
@patch('perma.models.LinkUser.get_subscription', autospec=True)
@patch('perma.models.LinkUser.get_purchase_history', autospec=True)
def test_cancellation_info_present_if_cancellation_requested(self, get_purchase_history, get_subscription):
u = LinkUser.objects.get(email='<EMAIL>')
subscription = spoof_cancellation_requested_subscription()
get_subscription.return_value = subscription
get_purchase_history.return_value = {'purchases': [], 'total_links': 0}
r = self.get('user_management_settings_usage_plan',
user=u)
bonus_package_count = len(settings.BONUS_PACKAGES)
self.assertIn(b'Get More Personal Links', r.content)
self.assertIn(b'<input type="hidden" name="encrypted_data"', r.content, bonus_package_count)
self.assertIn(b'received the request to cancel', r.content)
get_subscription.assert_called_once_with(u)
get_purchase_history.assert_called_once_with(u)
@patch('perma.models.LinkUser.get_subscription', autospec=True)
def test_apology_page_displayed_if_perma_payments_is_down(self, get_subscription):
u = LinkUser.objects.get(email='<EMAIL>')
get_subscription.side_effect = PermaPaymentsCommunicationException
r = self.get('user_management_settings_usage_plan',
user=u)
self.assertNotIn(b'<input type="hidden" name="encrypted_data"', r.content)
self.assertIn(b'is currently unavailable', r.content)
get_subscription.assert_called_once_with(u)
def test_unauthorized_user_cannot_see_cancellation_page(self):
u = LinkUser.objects.get(email='<EMAIL>')
assert not u.can_view_usage_plan()
self.post('user_management_settings_subscription_cancel',
user=u,
require_status_code=403)
def test_authorized_user_cant_use_get_for_cancellation_page(self):
u = LinkUser.objects.get(email='<EMAIL>')
assert u.can_view_usage_plan()
self.get('user_management_settings_subscription_cancel',
user=u,
require_status_code=405)
@patch('perma.views.user_management.prep_for_perma_payments', autospec=True)
@patch('perma.models.LinkUser.get_subscription', autospec=True)
def test_authorized_user_cancellation_confirm_form(self, get_subscription, prepped):
u = LinkUser.objects.get(email='<EMAIL>')
assert u.can_view_usage_plan()
subscription = spoof_current_monthly_subscription()
get_subscription.return_value = subscription
prepped.return_value = bytes(str(sentinel.prepped), 'utf-8')
r = self.post('user_management_settings_subscription_cancel',
user=u,
data={'account_type':'Individual'})
self.assertIn(b'<input type="hidden" name="encrypted_data"', r.content)
self.assertIn(prepped.return_value, r.content)
self.assertIn(b'Are you sure you want to cancel', r.content)
get_subscription.assert_called_once_with(u)
@patch('perma.models.LinkUser.get_subscription', autospec=True)
def test_update_page_if_no_standing_subscription(self, get_subscription):
u = LinkUser.objects.get(email='<EMAIL>')
get_subscription.return_value = None
self.post('user_management_settings_subscription_update',
user=u,
data={'account_type':'Individual'},
require_status_code=403)
@patch('perma.views.user_management.prep_for_perma_payments', autospec=True)
@patch('perma.models.prep_for_perma_payments', autospec=True)
@patch('perma.models.LinkUser.get_subscription', autospec=True)
def test_update_page_if_standing_subscription(self, get_subscription, prepped, prepped_v):
u = LinkUser.objects.get(email='<EMAIL>')
subscription = spoof_current_monthly_subscription()
get_subscription.return_value = subscription
prepped.return_value = bytes(str(sentinel.prepped), 'utf-8')
prepped_v.return_value = bytes(str(sentinel.prepped), 'utf-8')
r = self.post('user_management_settings_subscription_update',
user=u,
data={'account_type':'Individual'})
# Should be able to up/downgrade to all monthly individual tiers, except the current tier
available_tiers = len([tier for tier in settings.TIERS['Individual'] if tier['period'] == 'monthly']) - 1
self.assertContains(r, 'Update Credit Card Information')
self.assertContains(r, '<input type="hidden" name="encrypted_data"', 1)
self.assertContains(r, 'Change Plan')
self.assertNotContains(r, 'Cancel Scheduled Downgrade')
self.assertContains(r, '<input required type="radio" name="encrypted_data"', available_tiers)
self.assertContains(r, prepped.return_value, available_tiers + 1)
get_subscription.assert_called_once_with(u)
@patch('perma.views.user_management.prep_for_perma_payments', autospec=True)
@patch('perma.models.prep_for_perma_payments', autospec=True)
@patch('perma.models.LinkUser.get_subscription', autospec=True)
def test_update_page_if_downgrade_scheduled(self, get_subscription, prepped, prepped_v):
u = LinkUser.objects.get(email='<EMAIL>')
subscription = spoof_current_monthly_subscription_with_scheduled_downgrade()
get_subscription.return_value = subscription
prepped.return_value = bytes(str(sentinel.prepped), 'utf-8')
prepped_v.return_value = bytes(str(sentinel.prepped), 'utf-8')
r = self.post('user_management_settings_subscription_update',
user=u,
data={'account_type':'Individual'})
self.assertContains(r, 'Update Credit Card Information')
self.assertContains(r, 'Cancel Scheduled Downgrade')
self.assertContains(r, '<input type="hidden" name="encrypted_data"', 2)
self.assertNotContains(r, '<input required type="radio" name="encrypted_data"')
self.assertContains(r, prepped.return_value, 2)
get_subscription.assert_called_once_with(u)
@patch('perma.views.user_management.prep_for_perma_payments', autospec=True)
@patch('perma.models.LinkUser.get_subscription', autospec=True)
def test_update_page_if_subscription_on_hold(self, get_subscription, prepped):
u = LinkUser.objects.get(email='<EMAIL>')
subscription = spoof_on_hold_monthly_subscription()
get_subscription.return_value = subscription
prepped.return_value = bytes(str(sentinel.prepped), 'utf-8')
r = self.post('user_management_settings_subscription_update',
user=u,
data={'account_type':'Individual'})
self.assertContains(r, 'Update Credit Card Information')
self.assertContains(r, '<input type="hidden" name="encrypted_data"', 1)
self.assertContains(r, prepped.return_value, 1)
self.assertNotContains(r, 'Change Plan')
get_subscription.assert_called_once_with(u)
@patch('perma.views.user_management.prep_for_perma_payments', autospec=True)
@patch('perma.models.LinkUser.get_subscription', autospec=True)
def test_update_page_if_cancellation_requested(self, get_subscription, prepped):
u = LinkUser.objects.get(email='<EMAIL>')
subscription = spoof_cancellation_requested_subscription()
get_subscription.return_value = subscription
prepped.return_value = bytes(str(sentinel.prepped), 'utf-8')
r = self.post('user_management_settings_subscription_update',
user=u,
data={'account_type':'Individual'})
self.assertContains(r, 'Update Credit Card Information')
self.assertContains(r, '<input type="hidden" name="encrypted_data"', 1)
self.assertContains(r, prepped.return_value, 1)
self.assertNotContains(r, 'Change Plan')
get_subscription.assert_called_once_with(u)
# Subscription, Registrar Users
@patch('perma.models.prep_for_perma_payments', autospec=True)
@patch('perma.models.Registrar.get_subscription', autospec=True)
@patch('perma.models.LinkUser.get_subscription', autospec=True)
@patch('perma.models.LinkUser.get_purchase_history', autospec=True)
def test_registrar_user_nonpaying_registrar(self, get_purchase_history, get_subscription_u, get_subscription_r, prepped):
u = LinkUser.objects.get(email='<EMAIL>')
get_subscription_u.return_value = None
get_purchase_history.return_value = {'purchases': [], 'total_links': 0}
prepped.return_value = bytes(str(sentinel.prepped), 'utf-8')
r = self.get('user_management_settings_usage_plan',
user=u)
# Individual tiers should be available; no registrar section should be present
individual_tier_count = len(settings.TIERS['Individual'])
bonus_package_count = len(settings.BONUS_PACKAGES)
self.assertIn(b'Get More Personal Links', r.content)
self.assertIn(b'Purchase a personal subscription', r.content)
self.assertNotIn(b'Purchase a subscription for Test Firm', r.content)
self.assertContains(r, '<form class="purchase-form', bonus_package_count)
self.assertContains(r, '<form class="upgrade-form', individual_tier_count)
self.assertContains(r, '<input type="hidden" name="encrypted_data"', individual_tier_count + bonus_package_count)
self.assertContains(r, prepped.return_value, individual_tier_count + bonus_package_count)
get_subscription_u.assert_called_once_with(u)
self.assertEqual(get_subscription_r.call_count, 0)
get_purchase_history.assert_called_once_with(u)
@patch('perma.models.prep_for_perma_payments', autospec=True)
@patch('perma.models.Registrar.get_subscription', autospec=True)
@patch('perma.models.LinkUser.get_subscription', autospec=True)
@patch('perma.models.LinkUser.get_purchase_history', autospec=True)
def test_allpaying_registrar_user_sees_both_subscribe_forms(self, get_purchase_history, get_subscription_u, get_subscription_r, prepped):
u = LinkUser.objects.get(email='<EMAIL>')
get_subscription_u.return_value = None
get_subscription_r.return_value = None
get_purchase_history.return_value = {'purchases': [], 'total_links': 0}
prepped.return_value = bytes(str(sentinel.prepped), 'utf-8')
r = self.get('user_management_settings_usage_plan',
user=u)
# all tiers should be offered, both individual and registrar-level
tier_count = len(settings.TIERS['Individual']) + len(settings.TIERS['Registrar'])
bonus_package_count = len(settings.BONUS_PACKAGES)
self.assertIn(b'Get More Personal Links', r.content)
self.assertIn(b'Purchase a personal subscription', r.content)
self.assertIn(b'Purchase a subscription for Test Firm', r.content)
self.assertContains(r, '<form class="purchase-form', bonus_package_count)
self.assertContains(r, '<form class="upgrade-form', tier_count)
self.assertContains(r, '<input type="hidden" name="encrypted_data"', tier_count + bonus_package_count)
self.assertContains(r, prepped.return_value, tier_count + bonus_package_count)
get_subscription_u.assert_called_once_with(u)
get_subscription_r.assert_called_once_with(u.registrar)
get_purchase_history.assert_called_once_with(u)
@patch('perma.models.prep_for_perma_payments', autospec=True)
@patch('perma.models.Registrar.get_subscription', autospec=True)
@patch('perma.models.LinkUser.get_subscription', autospec=True)
@patch('perma.models.LinkUser.get_purchase_history', autospec=True)
def test_allpaying_registrar_user_sees_subscriptions_independently(self, get_purchase_history, get_subscription_u, get_subscription_r, prepped):
u = LinkUser.objects.get(email='<EMAIL>')
get_subscription_u.return_value = None
subscription = spoof_current_monthly_subscription()
get_subscription_r.return_value = subscription
get_purchase_history.return_value = {'purchases': [], 'total_links': 0}
prepped.return_value = bytes(str(sentinel.prepped), 'utf-8')
r = self.get('user_management_settings_usage_plan',
user=u)
# Individual tiers should be available; the registrar's subscription should be present
individual_tier_count = len(settings.TIERS['Individual'])
bonus_package_count = len(settings.BONUS_PACKAGES)
self.assertIn(b'Get More Personal Links', r.content)
self.assertIn(b'Purchase a personal subscription', r.content)
self.assertNotIn(b'Purchase a subscription for Test Firm', r.content)
self.assertContains(r, '<form class="purchase-form', bonus_package_count)
self.assertContains(r, '<form class="upgrade-form', individual_tier_count)
self.assertContains(r, '<input type="hidden" name="encrypted_data"', individual_tier_count + bonus_package_count)
self.assertContains(r, prepped.return_value, individual_tier_count+ bonus_package_count)
self.assertIn(b'Rate', r.content)
self.assertIn(b'Paid Through', r.content)
self.assertIn(bytes(subscription['status'].lower(), 'utf-8'), r.content)
self.assertIn(b'Modify Subscription', r.content)
self.assertContains(r, '<input type="hidden" name="account_type"', 2)
self.assertIn(b'Cancel Subscription', r.content)
get_subscription_u.assert_called_once_with(u)
get_subscription_r.assert_called_once_with(u.registrar)
get_purchase_history.assert_called_once_with(u)
@patch('perma.views.user_management.prep_for_perma_payments', autospec=True)
@patch('perma.models.Registrar.get_subscription', autospec=True)
@patch('perma.models.LinkUser.get_subscription', autospec=True)
def test_allpaying_registrar_user_personal_cancellation_confirm_form(self, get_subscription_u, get_subscription_r, prepped):
u = LinkUser.objects.get(email='<EMAIL>')
assert u.can_view_usage_plan()
subscription = spoof_current_monthly_subscription()
get_subscription_u.return_value = subscription
prepped.return_value = bytes(str(sentinel.prepped), 'utf-8')
r = self.post('user_management_settings_subscription_cancel',
user=u,
data={'account_type':'Individual'})
self.assertIn(b'<input type="hidden" name="encrypted_data"', r.content)
self.assertIn(prepped.return_value, r.content)
self.assertIn(b'Are you sure you want to cancel', r.content)
self.assertNotIn(b'Test Firm', r.content)
self.assertIn(b'personal', r.content)
get_subscription_u.assert_called_once_with(u)
@patch('perma.views.user_management.prep_for_perma_payments', autospec=True)
@patch('perma.models.Registrar.get_subscription', autospec=True)
@patch('perma.models.LinkUser.get_subscription', autospec=True)
def test_allpaying_registrar_user_institutional_cancellation_confirm_form(self, get_subscription_u, get_subscription_r, prepped):
u = LinkUser.objects.get(email='<EMAIL>')
assert u.can_view_usage_plan()
subscription = spoof_current_monthly_subscription()
get_subscription_r.return_value = subscription
prepped.return_value = bytes(str(sentinel.prepped), 'utf-8')
r = self.post('user_management_settings_subscription_cancel',
user=u,
data={'account_type':'Registrar'})
self.assertIn(b'<input type="hidden" name="encrypted_data"', r.content)
self.assertIn(prepped.return_value, r.content)
self.assertIn(b'Are you sure you want to cancel', r.content)
self.assertIn(b'Test Firm', r.content)
self.assertNotIn(b'Personal', r.content)
get_subscription_r.assert_called_once_with(u.registrar)
@patch('perma.views.user_management.prep_for_perma_payments', autospec=True)
@patch('perma.models.LinkUser.get_subscription', autospec=True)
def test_allpaying_registrar_user_personal_update_form(self, get_subscription, prepped):
u = LinkUser.objects.get(email='<EMAIL>')
assert u.can_view_usage_plan()
subscription = spoof_current_monthly_subscription()
get_subscription.return_value = subscription
prepped.return_value = bytes(str(sentinel.prepped), 'utf-8')
r = self.post('user_management_settings_subscription_update',
user=u,
data={'account_type':'Individual'})
self.assertNotIn(b'Test Firm', r.content)
self.assertIn(b'Personal', r.content)
get_subscription.assert_called_once_with(u)
@patch('perma.views.user_management.prep_for_perma_payments', autospec=True)
@patch('perma.models.Registrar.get_subscription', autospec=True)
def test_allpaying_registrar_user_institutional_update_form(self, get_subscription, prepped):
u = LinkUser.objects.get(email='<EMAIL>')
assert u.can_view_usage_plan()
subscription = spoof_current_monthly_subscription()
get_subscription.return_value = subscription
prepped.return_value = bytes(str(sentinel.prepped), 'utf-8')
r = self.post('user_management_settings_subscription_update',
user=u,
data={'account_type':'Registrar'})
self.assertIn(b'Test Firm', r.content)
self.assertNotIn(b'personal', r.content)
get_subscription.assert_called_once_with(u.registrar)
# Tools
def test_api_key(self):
response = self.get('user_management_settings_tools',
user='<EMAIL>').content
self.assertNotIn(b'id="id_api_key"', response)
self.submit_form('api_key_create',
user='<EMAIL>',
data={},
success_url=reverse('user_management_settings_tools'))
response = self.get('user_management_settings_tools',
user='<EMAIL>').content
soup = BeautifulSoup(response, 'html.parser')
key = soup.find('input', {'id': 'id_api_key'})
val = key.get('value', '')
self.assertTrue(val)
# do it again, and make sure the key changes
self.submit_form('api_key_create',
user='<EMAIL>',
data={},
success_url=reverse('user_management_settings_tools'))
response = self.get('user_management_settings_tools',
user='<EMAIL>').content
soup = BeautifulSoup(response, 'html.parser')
key = soup.find('input', {'id': 'id_api_key'})
new_val = key.get('value', '')
self.assertTrue(new_val)
self.assertFalse(val == new_val)
# Affiliations
def test_affiliations(self):
'''
Does the expected information show up on the affliations page?
(Tries not to be overly picky about the page design and markup.)
'''
# As an org user
response = self.get('user_management_settings_affiliations',
user='<EMAIL>').content
soup = BeautifulSoup(response, 'html.parser')
registrars = soup.select('h4 a')
self.assertEqual(len(registrars), 2)
for registrar in registrars:
self.assertTrue(registrar.text.strip())
orgs = soup.select('.settings-block p')
self.assertEqual(len(orgs), 4)
for org in orgs:
self.assertTrue(org.text.strip())
# As a registrar user
response = self.get('user_management_settings_affiliations',
user='test_registrar_<EMAIL>').content
soup = BeautifulSoup(response, 'html.parser')
registrars = soup.select('h4')
self.assertEqual(len(registrars), 1)
for registrar in registrars:
self.assertTrue(registrar.text.strip())
settings = soup.select('dt')
self.assertEqual(len(settings), 2)
for setting in settings:
self.assertTrue(org.text.strip())
# As a pending registrar user
response = self.get('user_management_settings_affiliations',
user='<EMAIL>').content
self.assertIn(b'Pending Registrar', response)
self.assertIn(b'Thank you for requesting an account for your library. Perma.cc will review your request as soon as possible.', response)
soup = BeautifulSoup(response, 'html.parser')
registrars = soup.select('.sponsor-name')
self.assertEqual(len(registrars), 1)
for registrar in registrars:
self.assertTrue(registrar.text.strip())
settings = soup.select('dt')
self.assertEqual(len(settings), 2)
for setting in settings:
self.assertTrue(org.text.strip())
###
### SIGNUP
###
### Libraries ###
def new_lib(self):
rand = random()
return { 'email': u'<EMAIL>'.<EMAIL>(rand),
'name': u'University Library {}'.format(rand),
'website': u'http://website{}.org'.format(rand),
'address': u'{} Main St., Boston MA 02144'.format(rand)}
def new_lib_user(self):
rand = random()
return { 'email': u'<EMAIL>'.<EMAIL>(rand),
'first': u'Joe',
'last': u'Yacobówski' }
def check_library_labels(self, soup):
name_label = soup.find('label', {'for': 'id_b-name'})
self.assertEqual(name_label.text, "Library name")
email_label = soup.find('label', {'for': 'id_b-email'})
self.assertEqual(email_label.text, "Library email")
website_label = soup.find('label', {'for': 'id_b-website'})
self.assertEqual(website_label.text, "Library website")
def check_lib_user_labels(self, soup):
email_label = soup.find('label', {'for': 'id_a-e-address'})
self.assertEqual(email_label.text, "Your email")
def check_lib_email(self, message, new_lib, user):
our_address = settings.DEFAULT_FROM_EMAIL
self.assertIn(new_lib['name'], message.body)
self.assertIn(new_lib['email'], message.body)
self.assertIn(user['email'], message.body)
id = Registrar.objects.get(email=new_lib['email']).id
approve_url = "http://testserver{}".format(reverse('user_management_approve_pending_registrar', args=[id]))
self.assertIn(approve_url, message.body)
self.assertEqual(message.subject, "Perma.cc new library registrar account request")
self.assertEqual(message.from_email, our_address)
self.assertEqual(message.recipients(), [our_address])
self.assertDictEqual(message.extra_headers, {'Reply-To': user['email']})
def test_new_library_render(self):
'''
Does the library signup form display as expected?
'''
# NOT LOGGED IN
# Registrar and user forms are displayed,
# inputs are blank, and labels are customized as expected
response = self.get('libraries').content
soup = BeautifulSoup(response, 'html.parser')
self.check_library_labels(soup)
self.check_lib_user_labels(soup)
inputs = soup.select('input')
self.assertEqual(len(inputs), 9)
for input in inputs:
if input['name'] in ['csrfmiddlewaretoken', 'telephone']:
self.assertTrue(input.get('value', ''))
else:
self.assertFalse(input.get('value', ''))
# If request_data is present in session, registrar form is prepopulated,
# and labels are still customized as expected
session = self.client.session
new_lib = self.new_lib()
new_lib_user = self.new_lib_user()
session['request_data'] = { u'b-email': new_lib['email'],
u'b-website': new_lib['website'],
u'b-name': new_lib['name'],
u'b-address': new_lib['address'],
u'a-e-address': new_lib_user['email'],
u'a-first_name': new_lib_user['first'],
u'a-last_name': new_lib_user['last'],
u'csrfmiddlewaretoken': u'11YY3S2DgOw2DHoWVEbBArnBMdEA2svu' }
session.save()
response = self.get('libraries').content
soup = BeautifulSoup(response, 'html.parser')
self.check_library_labels(soup)
self.check_lib_user_labels(soup)
inputs = soup.select('input')
self.assertEqual(len(inputs), 9)
for input in inputs:
if input['name'] in ['csrfmiddlewaretoken', 'telephone']:
self.assertTrue(input.get('value', ''))
elif input['name'][:2] == "b-":
self.assertTrue(input.get('value', ''))
else:
self.assertFalse(input.get('value', ''))
# If there's an unsuccessful submission, field labels are still as expected.
response = self.post('libraries').content
soup = BeautifulSoup(response, 'html.parser')
self.check_library_labels(soup)
self.check_lib_user_labels(soup)
# LOGGED IN
# Registrar form is displayed, but user form is not,
# inputs are blank, and labels are still customized as expected
response = self.get('libraries', user="<EMAIL>").content
soup = BeautifulSoup(response, 'html.parser')
self.check_library_labels(soup)
inputs = soup.select('input')
self.assertEqual(len(inputs), 6) # 6 because csrf is here and in the logout form
for input in inputs:
self.assertIn(input['name'],['csrfmiddlewaretoken', 'b-name', 'b-email', 'b-website', 'b-address'])
if input['name'] == 'csrfmiddlewaretoken':
self.assertTrue(input.get('value', ''))
else:
self.assertFalse(input.get('value', ''))
def test_new_library_submit_success(self):
'''
Does the library signup form submit as expected? Success cases.
'''
expected_emails_sent = 0
# Not logged in, submit all fields sans first and last name
new_lib = self.new_lib()
new_lib_user = self.new_lib_user()
self.submit_form('libraries',
data = { u'b-email': new_lib['email'],
u'b-website': new_lib['website'],
u'b-name': new_lib['name'],
u'a-e-address': new_lib_user['email'] },
success_url=reverse('register_library_instructions'))
expected_emails_sent += 2
self.assertEqual(len(mail.outbox), expected_emails_sent)
self.check_lib_email(mail.outbox[expected_emails_sent - 2], new_lib, new_lib_user)
self.check_new_activation_email(mail.outbox[expected_emails_sent - 1], new_lib_user['email'])
# Not logged in, submit all fields including first and last name
new_lib = self.new_lib()
new_lib_user = self.new_lib_user()
self.submit_form('libraries',
data = { u'b-email': new_lib['email'],
u'b-website': new_lib['website'],
u'b-name': new_lib['name'],
u'a-e-address': new_lib_user['email'],
u'a-first_name': new_lib_user['first'],
u'a-last_name': new_lib_user['last']},
success_url=reverse('register_library_instructions'))
expected_emails_sent += 2
self.assertEqual(len(mail.outbox), expected_emails_sent)
self.check_lib_email(mail.outbox[expected_emails_sent - 2], new_lib, new_lib_user)
self.check_new_activation_email(mail.outbox[expected_emails_sent - 1], new_lib_user['email'])
# Logged in
new_lib = self.new_lib()
existing_lib_user = { 'email': '<EMAIL>'}
self.submit_form('libraries',
data = { u'b-email': new_lib['email'],
u'b-website': new_lib['website'],
u'b-name': new_lib['name'] },
success_url=reverse('user_management_settings_affiliations'),
user=existing_lib_user['email'])
expected_emails_sent += 1
self.assertEqual(len(mail.outbox), expected_emails_sent)
self.check_lib_email(mail.outbox[expected_emails_sent - 1], new_lib, existing_lib_user)
def test_new_library_form_honeypot(self):
new_lib = self.new_lib()
new_lib_user = self.new_lib_user()
self.submit_form('libraries',
data = { u'b-email': new_lib['email'],
u'b-website': new_lib['website'],
u'b-name': new_lib['name'],
u'a-e-address': new_lib_user['email'],
u'a-first_name': new_lib_user['first'],
u'a-last_name': new_lib_user['last'],
u'a-telephone': "I'm a bot."},
success_url=reverse('register_library_instructions'))
self.assertEqual(len(mail.outbox), 0)
self.assertFalse(Registrar.objects.filter(name=new_lib['name']).exists())
def test_new_library_submit_failure(self):
'''
Does the library signup form submit as expected? Failures.
'''
new_lib = self.new_lib()
existing_lib_user = { 'email': '<EMAIL>'}
# Not logged in, blank submission reports correct fields required
# ('email' catches both registrar and user email errors, unavoidably,
# so test with just that missing separately)
self.submit_form('libraries',
data = {},
form_keys = ['registrar_form', 'user_form'],
error_keys = ['website', 'name', 'email'])
self.assertEqual(len(mail.outbox), 0)
# (checking user email missing separately)
self.submit_form('libraries',
data = {u'b-email': new_lib['email'],
u'b-website': new_lib['website'],
u'b-name': new_lib['name']},
form_keys = ['registrar_form', 'user_form'],
error_keys = ['email'])
self.assertEqual(len(mail.outbox), 0)
# Not logged in, user appears to have already registered
data = {u'b-email': new_lib['email'],
u'b-website': new_lib['website'],
u'b-name': new_lib['name'],
u'a-e-address': existing_lib_user['email']}
self.submit_form('libraries',
data = data,
form_keys = ['registrar_form', 'user_form'],
success_url = '/login?next=/libraries/')
self.assertDictEqual(self.client.session['request_data'], data)
self.assertEqual(len(mail.outbox), 0)
# Not logged in, registrar apepars to exist already
# (actually, this doesn't currently fail)
# Logged in, blank submission reports all fields required
self.submit_form('libraries',
data = {},
user = existing_lib_user['email'],
error_keys = ['website', 'name', 'email'])
self.assertEqual(len(mail.outbox), 0)
# Logged in, registrar appears to exist already
# (actually, this doesn't currently fail)
### Courts ###
def new_court(self):
rand = random()
return { 'requested_account_note': u'Court {}'.format(rand) }
def new_court_user(self):
rand = random()
return { 'email': u'user{}<EMAIL>'.<EMAIL>(rand),
'first': u'Joe',
'last': u'Yacobówski' }
def check_court_email(self, message, court_email):
our_address = settings.DEFAULT_FROM_EMAIL
# Doesn't check email contents yet; too many variations possible presently
self.assertEqual(message.subject, "Perma.cc new library court account information request")
self.assertEqual(message.from_email, our_address)
self.assertEqual(message.recipients(), [our_address])
self.assertDictEqual(message.extra_headers, {'Reply-To': court_email})
def test_new_court_success(self):
'''
Does the court signup form submit as expected? Success cases.
'''
new_court = self.new_court()
new_user = self.new_court_user()
existing_user = { 'email': '<EMAIL>'}
another_existing_user = { 'email': '<EMAIL>'}
expected_emails_sent = 0
# NOT LOGGED IN
# Existing user's email address, no court info
# (currently succeeds, should probably fail; see issue 1746)
self.submit_form('sign_up_courts',
data = { 'e-address': existing_user['email']},
success_url = reverse('court_request_response'))
expected_emails_sent += 1
self.assertEqual(len(mail.outbox), expected_emails_sent)
self.check_court_email(mail.outbox[expected_emails_sent - 1], existing_user['email'])
# Existing user's email address + court info
self.submit_form('sign_up_courts',
data = { 'e-address': existing_user['email'],
'requested_account_note': new_court['requested_account_note']},
success_url = reverse('court_request_response'))
expected_emails_sent += 1
self.assertEqual(len(mail.outbox), expected_emails_sent)
self.check_court_email(mail.outbox[expected_emails_sent - 1], existing_user['email'])
# New user email address, don't create account
self.submit_form('sign_up_courts',
data = { 'e-address': new_user['email'],
'requested_account_note': new_court['requested_account_note']},
success_url = reverse('court_request_response'))
expected_emails_sent += 1
self.assertEqual(len(mail.outbox), expected_emails_sent)
self.check_court_email(mail.outbox[expected_emails_sent - 1], new_user['email'])
# New user email address, create account
self.submit_form('sign_up_courts',
data = { 'e-address': new_user['email'],
'requested_account_note': new_court['requested_account_note'],
'create_account': True },
success_url = reverse('register_email_instructions'))
expected_emails_sent += 2
self.assertEqual(len(mail.outbox), expected_emails_sent)
self.check_new_activation_email(mail.outbox[expected_emails_sent - 2], new_user['email'])
self.check_court_email(mail.outbox[expected_emails_sent - 1], new_user['email'])
# LOGGED IN
# New user email address
# (This succeeds and creates a new account; see issue 1749)
new_user = self.new_court_user()
self.submit_form('sign_up_courts',
data = { 'e-address': new_user['email'],
'requested_account_note': new_court['requested_account_note'],
'create_account': True },
user = existing_user['email'],
success_url = reverse('register_email_instructions'))
expected_emails_sent += 2
self.assertEqual(len(mail.outbox), expected_emails_sent)
self.check_new_activation_email(mail.outbox[expected_emails_sent - 2], new_user['email'])
self.check_court_email(mail.outbox[expected_emails_sent - 1], new_user['email'])
# Existing user's email address, not that of the user logged in.
# (This is odd; see issue 1749)
self.submit_form('sign_up_courts',
data = { 'e-address': existing_user['email'],
'requested_account_note': new_court['requested_account_note'],
'create_account': True },
user = another_existing_user['email'],
success_url = reverse('court_request_response'))
expected_emails_sent += 1
self.assertEqual(len(mail.outbox), expected_emails_sent)
self.check_court_email(mail.outbox[expected_emails_sent - 1], existing_user['email'])
def test_new_court_form_honeypot(self):
new_court = self.new_court()
new_user = self.new_court_user()
self.submit_form('sign_up_courts',
data = { 'e-address': new_user['email'],
'requested_account_note': new_court['requested_account_note'],
'create_account': True,
'telephone': "I'm a bot." },
success_url = reverse('register_email_instructions'))
self.assertEqual(len(mail.outbox), 0)
self.assertFalse(LinkUser.objects.filter(email=new_user['email']).exists())
def test_new_court_failure(self):
'''
Does the court signup form submit as expected? Failure cases.
'''
# Not logged in, blank submission reports correct fields required
self.submit_form('sign_up_courts',
data = {},
error_keys = ['email', 'requested_account_note'])
self.assertEqual(len(mail.outbox), 0)
# Logged in, blank submission reports same fields required
# (This is odd; see issue 1749)
self.submit_form('sign_up_courts',
data = {},
user = '<EMAIL>',
error_keys = ['email', 'requested_account_note'])
self.assertEqual(len(mail.outbox), 0)
### Firms ###
def new_firm(self):
rand = random()
return {'requested_account_note': u'Firm {}'.format(rand)}
def new_firm_user(self):
rand = random()
return {'email': u'<EMAIL>'.format(rand),
'first': u'Joe',
'last': u'Yacobówski'}
def check_firm_email(self, message, firm_email):
our_address = settings.DEFAULT_FROM_EMAIL
# Doesn't check email contents yet; too many variations possible presently
self.assertEqual(message.subject, "Perma.cc new law firm account information request")
self.assertEqual(message.from_email, our_address)
self.assertEqual(message.recipients(), [our_address])
self.assertDictEqual(message.extra_headers, {'Reply-To': firm_email})
def test_new_firm_success(self):
'''
Does the firm signup form submit as expected? Success cases.
'''
new_firm = self.new_firm()
new_user = self.new_firm_user()
existing_user = {'email': '<EMAIL>'}
another_existing_user = {'email': '<EMAIL>'}
expected_emails_sent = 0
# NOT LOGGED IN
# Existing user's email address, no court info
# (currently succeeds, should probably fail; see issue 1746)
self.submit_form('sign_up_firm',
data={'e-address': existing_user['email']},
success_url=reverse('firm_request_response'))
expected_emails_sent += 1
self.assertEqual(len(mail.outbox), expected_emails_sent)
self.check_firm_email(mail.outbox[expected_emails_sent - 1], existing_user['email'])
# Existing user's email address + firm info
self.submit_form('sign_up_firm',
data={'e-address': existing_user['email'],
'requested_account_note': new_firm['requested_account_note']},
success_url=reverse('firm_request_response'))
expected_emails_sent += 1
self.assertEqual(len(mail.outbox), expected_emails_sent)
self.check_firm_email(mail.outbox[expected_emails_sent - 1], existing_user['email'])
# New user email address, don't create account
self.submit_form('sign_up_firm',
data={'e-address': new_user['email'],
'requested_account_note': new_firm['requested_account_note']},
success_url=reverse('firm_request_response'))
expected_emails_sent += 1
self.assertEqual(len(mail.outbox), expected_emails_sent)
self.check_firm_email(mail.outbox[expected_emails_sent - 1], new_user['email'])
# New user email address, create account
self.submit_form('sign_up_firm',
data={'e-address': new_user['email'],
'requested_account_note': new_firm['requested_account_note'],
'create_account': True},
success_url=reverse('register_email_instructions'))
expected_emails_sent += 2
self.assertEqual(len(mail.outbox), expected_emails_sent)
self.check_new_activation_email(mail.outbox[expected_emails_sent - 2], new_user['email'])
self.check_firm_email(mail.outbox[expected_emails_sent - 1], new_user['email'])
# LOGGED IN
# New user email address
# (This succeeds and creates a new account; see issue 1749)
new_user = self.new_firm_user()
self.submit_form('sign_up_firm',
data={'e-address': new_user['email'],
'requested_account_note': new_firm['requested_account_note'],
'create_account': True},
user=existing_user['email'],
success_url=reverse('register_email_instructions'))
expected_emails_sent += 2
self.assertEqual(len(mail.outbox), expected_emails_sent)
self.check_new_activation_email(mail.outbox[expected_emails_sent - 2], new_user['email'])
self.check_firm_email(mail.outbox[expected_emails_sent - 1], new_user['email'])
# Existing user's email address, not that of the user logged in.
# (This is odd; see issue 1749)
self.submit_form('sign_up_firm',
data={'e-address': existing_user['email'],
'requested_account_note': new_firm['requested_account_note'],
'create_account': True},
user=another_existing_user['email'],
success_url=reverse('firm_request_response'))
expected_emails_sent += 1
self.assertEqual(len(mail.outbox), expected_emails_sent)
self.check_firm_email(mail.outbox[expected_emails_sent - 1], existing_user['email'])
def test_new_firm_form_honeypot(self):
new_firm = self.new_firm()
new_user = self.new_firm_user()
self.submit_form('sign_up_firm',
data = { 'e-address': new_user['email'],
'requested_account_note': new_firm['requested_account_note'],
'create_account': True,
'telephone': "I'm a bot." },
success_url = reverse('register_email_instructions'))
self.assertEqual(len(mail.outbox), 0)
self.assertFalse(LinkUser.objects.filter(email=new_user['email']).exists())
def test_new_firm_failure(self):
'''
Does the firm signup form submit as expected? Failure cases.
'''
# Not logged in, blank submission reports correct fields required
self.submit_form('sign_up_firm',
data={},
error_keys=['email', 'requested_account_note'])
self.assertEqual(len(mail.outbox), 0)
# Logged in, blank submission reports same fields required
# (This is odd; see issue 1749)
self.submit_form('sign_up_firm',
data={},
user='<EMAIL>',
error_keys=['email', 'requested_account_note'])
self.assertEqual(len(mail.outbox), 0)
### Journals ###
def new_journal(self):
rand = random()
return { 'requested_account_note': u'Journal {}'.format(rand)}
def new_journal_user(self):
rand = random()
return { 'email': u'<EMAIL>'.format(rand),
'first': u'Joe',
'last': u'Yacobówski' }
def test_new_journal_success(self):
'''
Does the journal signup form submit as expected? Success cases.
'''
new_journal = self.new_journal()
new_user = self.new_journal_user()
existing_user = {'email': '<EMAIL>'}
expected_emails_sent = 0
# NOT LOGGED IN
# New user email address + journal info
self.submit_form('sign_up_journals',
data = { 'e-address': new_user['email'],
'requested_account_note': new_journal['requested_account_note']},
success_url = reverse('register_email_instructions'))
expected_emails_sent += 1
self.assertEqual(len(mail.outbox), expected_emails_sent)
self.check_new_activation_email(mail.outbox[expected_emails_sent - 1], new_user['email'])
# LOGGED IN
# New user email address + journal info
# (This succeeds and creates a new account; see issue 1749)
new_user = self.new_journal_user()
self.submit_form('sign_up_journals',
data = { 'e-address': new_user['email'],
'requested_account_note': new_journal['requested_account_note']},
user = existing_user['email'],
success_url = reverse('register_email_instructions'))
expected_emails_sent += 1
self.assertEqual(len(mail.outbox), expected_emails_sent)
self.check_new_activation_email(mail.outbox[expected_emails_sent - 1], new_user['email'])
def test_new_journal_form_honeypot(self):
new_journal = self.new_journal()
new_user = self.new_journal_user()
self.submit_form('sign_up_journals',
data = { 'e-address': new_user['email'],
'requested_account_note': new_journal['requested_account_note'],
'telephone': "I'm a bot." },
success_url = reverse('register_email_instructions'))
self.assertEqual(len(mail.outbox), 0)
self.assertFalse(LinkUser.objects.filter(email=new_user['email']).exists())
def test_new_journal_failure(self):
'''
Does the journal signup form submit as expected? Failure cases.
'''
# NOT LOGGED IN
# Blank submission reports correct fields required
self.submit_form('sign_up_journals',
data = {},
error_keys = ['email', 'requested_account_note'])
self.assertEqual(len(mail.outbox), 0)
# If email address already belongs to an account, validation fails
self.submit_form('sign_up_journals',
data = { 'e-address': '<EMAIL>',
'requested_account_note': 'Here'},
error_keys = ['email'])
self.assertEqual(len(mail.outbox), 0)
# LOGGED IN
# (This is odd; see issue 1749)
# Blank submission reports correct fields required
self.submit_form('sign_up_journals',
data = {},
user = '<EMAIL>',
error_keys = ['email', 'requested_account_note'])
self.assertEqual(len(mail.outbox), 0)
# If email address already belongs to an account, validation fails
self.submit_form('sign_up_journals',
data = { 'e-address': '<EMAIL>',
'requested_account_note': 'Here'},
user = '<EMAIL>',
error_keys = ['email'])
self.assertEqual(len(mail.outbox), 0)
### Faculty ###
def new_faculty_user(self):
rand = random()
return { 'email': u'<EMAIL>(rand),
'first': u'Joe',
'last': u'Yacobówski',
'requested_account_note': u'Journal {}'.format(rand) }
def test_new_faculty_success(self):
'''
Does the faculty signup form submit as expected? Success cases.
'''
new_user = self.new_faculty_user()
existing_user = {'email': '<EMAIL>'}
expected_emails_sent = 0
# NOT LOGGED IN
# New user email address + journal info
self.submit_form('sign_up_faculty',
data = { 'e-address': new_user['email'],
'requested_account_note': new_user['requested_account_note']},
success_url = reverse('register_email_instructions'))
expected_emails_sent += 1
self.assertEqual(len(mail.outbox), expected_emails_sent)
self.check_new_activation_email(mail.outbox[expected_emails_sent - 1], new_user['email'])
# LOGGED IN
# New user email address + journal info
# (This succeeds and creates a new account; see issue 1749)
new_user = self.new_faculty_user()
self.submit_form('sign_up_faculty',
data = { 'e-address': new_user['email'],
'requested_account_note': new_user['requested_account_note']},
user = existing_user['email'],
success_url = reverse('register_email_instructions'))
expected_emails_sent += 1
self.assertEqual(len(mail.outbox), expected_emails_sent)
self.check_new_activation_email(mail.outbox[expected_emails_sent - 1], new_user['email'])
def test_new_faculty_form_honeypot(self):
new_user = self.new_faculty_user()
self.submit_form('sign_up_faculty',
data = { 'e-address': new_user['email'],
'requested_account_note': new_user['requested_account_note'],
'telephone': "I'm a bot." },
success_url = reverse('register_email_instructions'))
self.assertEqual(len(mail.outbox), 0)
self.assertFalse(LinkUser.objects.filter(email=new_user['email']).exists())
def test_new_faculty_failure(self):
'''
Does the faculty signup form submit as expected? Failure cases.
'''
# NOT LOGGED IN
# Blank submission reports correct fields required
self.submit_form('sign_up_faculty',
data = {},
error_keys = ['email', 'requested_account_note'])
self.assertEqual(len(mail.outbox), 0)
# If email address already belongs to an account, validation fails
self.submit_form('sign_up_faculty',
data = { 'e-address': '<EMAIL>',
'requested_account_note': 'Here'},
error_keys = ['email'])
self.assertEqual(len(mail.outbox), 0)
# LOGGED IN
# (This is odd; see issue 1749)
# Blank submission reports correct fields required
self.submit_form('sign_up_faculty',
data = {},
user = '<EMAIL>',
error_keys = ['email', 'requested_account_note'])
self.assertEqual(len(mail.outbox), 0)
# If email address already belongs to an account, validation fails
self.submit_form('sign_up_faculty',
data = { 'e-address': '<EMAIL>',
'requested_account_note': 'Here'},
user = '<EMAIL>',
error_keys = ['email'])
self.assertEqual(len(mail.outbox), 0)
### Individual Users ###
def check_new_activation_email(self, message, user_email):
self.assertEqual(message.subject, "A Perma.cc account has been created for you")
self.assertEqual(message.from_email, settings.DEFAULT_FROM_EMAIL)
self.assertEqual(message.recipients(), [user_email])
activation_url = next(line for line in message.body.rstrip().split("\n") if line.startswith('http'))
return activation_url
def test_account_creation_views(self):
# user registration
new_user_email = "<EMAIL>"
self.submit_form('sign_up', {'e-address': new_user_email, 'first_name': 'Test', 'last_name': 'Test'},
success_url=reverse('register_email_instructions'),
success_query=LinkUser.objects.filter(email=new_user_email))
# email sent
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
activation_url = self.check_new_activation_email(message, new_user_email)
# the new user is created, but is unactivated
user = LinkUser.objects.get(email=new_user_email)
self.assertFalse(user.is_active)
self.assertFalse(user.is_confirmed)
# if you tamper with the code, it is rejected
response = self.client.get(activation_url[:-1]+'wrong/')
self.assertContains(response, 'This activation/reset link is invalid')
# reg confirm - non-matching passwords
response = self.client.get(activation_url, follow=True)
post_url = response.redirect_chain[0][0]
self.assertTemplateUsed(response, 'registration/password_reset_confirm.html')
response = self.client.post(post_url, {'new_password1': '<PASSWORD>', 'new_password2': '<PASSWORD>'}, follow=True)
self.assertNotContains(response, 'Your password has been set')
self.assertContains(response, "The two password fields didn't match")
# reg confirm - correct
response = self.client.post(post_url, {'new_password1': '<PASSWORD>', 'new_password2': '<PASSWORD>'}, follow=True)
self.assertContains(response, 'Your password has been set')
# Doesn't work twice.
response = self.client.post(post_url, {'new_password1': '<PASSWORD>', 'new_password2': '<PASSWORD>'}, follow=True)
self.assertContains(response, 'This activation/reset link is invalid')
# the new user is now activated and can log in
user.refresh_from_db()
self.assertTrue(user.is_active)
self.assertTrue(user.is_confirmed)
response = self.client.post(reverse('user_management_limited_login'), {'username': new_user_email, 'password': '<PASSWORD>'}, follow=True)
self.assertContains(response, 'Enter any URL to preserve it forever')
def test_signup_with_existing_email_rejected(self):
self.submit_form('sign_up',
{'e-address': self.registrar_user.email, 'first_name': 'Test', 'last_name': 'Test'},
error_keys=['email'])
def test_new_user_form_honeypot(self):
new_user_email = "<EMAIL>"
self.submit_form('sign_up',
data = { 'e-address': new_user_email,
'telephone': "I'm a bot." },
success_url = reverse('register_email_instructions'))
self.assertEqual(len(mail.outbox), 0)
self.assertFalse(LinkUser.objects.filter(email=new_user_email).exists())
def test_get_new_activation_code(self):
self.submit_form('user_management_not_active',
user = '<EMAIL>',
data = {},
success_url=reverse('user_management_limited_login'))
self.assertEqual(len(mail.outbox), 1)
self.check_new_activation_email(mail.outbox[0], '<EMAIL>')
### RESENDING ACTIVATION EMAILS ###
def check_activation_resent(self, user, other_user):
self.get('user_management_resend_activation',
reverse_kwargs={'args':[LinkUser.objects.get(email=other_user).id]},
user = user)
self.assertEqual(len(mail.outbox), 1)
self.check_new_activation_email(mail.outbox[0], other_user)
def check_activation_not_resent(self, user, other_user):
self.get('user_management_resend_activation',
reverse_kwargs={'args':[LinkUser.objects.get(email=other_user).id]},
user = user,
require_status_code = 403)
self.assertEqual(len(mail.outbox), 0)
# Registrar Users
def test_registrar_can_resend_activation_to_org_user(self):
self.check_activation_resent('<EMAIL>','<EMAIL>')
def test_registrar_can_resend_activation_to_registrar_user(self):
self.check_activation_resent('<EMAIL>','<EMAIL>')
def test_registrar_cannot_resend_activation_to_unrelated_org_user(self):
self.check_activation_not_resent('<EMAIL>','<EMAIL>library_<EMAIL>')
def test_registrar_cannot_resend_activation_to_regular_user(self):
self.check_activation_not_resent('<EMAIL>','<EMAIL>')
def test_registrar_cannot_resend_activation_to_unrelated_registrar_user(self):
self.check_activation_not_resent('<EMAIL>','<EMAIL>')
# Org Users
def test_org_user_can_resend_activation_to_org_user(self):
self.check_activation_resent('<EMAIL>','<EMAIL>')
def test_org_user_cannot_resend_activation_to_unrelated_org_user(self):
self.check_activation_not_resent('<EMAIL>','<EMAIL>')
def test_org_user_cannot_resend_activation_to_regular_user(self):
self.check_activation_not_resent('<EMAIL>','<EMAIL>')
def test_org_user_cannot_resend_activation_to_registrar_user(self):
self.check_activation_not_resent('<EMAIL>','<EMAIL>')
# Admin Users
def test_admin_can_resend_activation_to_regular_user(self):
self.check_activation_resent('<EMAIL>','<EMAIL>')
def test_admin_can_resend_activation_to_org_user(self):
self.check_activation_resent('<EMAIL>','<EMAIL>')
def test_admin_can_resend_activation_to_registrar_user(self):
self.check_activation_resent('<EMAIL>','<EMAIL>')
### ADMIN STATS ###
def test_admin_stats(self):
self.log_in_user(self.admin_user)
self.get('user_management_stats', reverse_kwargs={'args':['days']})
self.get('user_management_stats', reverse_kwargs={'args':['celery']})
self.get('user_management_stats', reverse_kwargs={'args':['random']})
self.get('user_management_stats', reverse_kwargs={'args':['emails']})
self.get('user_management_stats', reverse_kwargs={'args':['job_queue']})
|
src/lib/memory_efficient.py | shah0lin/data | 316 | 12710740 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import sys
import json
import shutil
import warnings
import traceback
from pathlib import Path
from typing import Dict, Iterable, List
from .io import read_lines, read_table
# Any CSV file under 50 MB can use the fast JSON converter
JSON_FAST_CONVERTER_SIZE_BYTES = 50 * 1000 * 1000
# Any CSV file above 150 MB should not be converted to JSON
JSON_MAX_SIZE_BYTES = 150 * 1000 * 1000
def get_table_columns(table_path: Path) -> List[str]:
"""
Memory-efficient method used to extract the columns of a table without reading the entire
file into memory.
Arguments:
table_path: Path to the table
Returns:
List[str]: Column names from the table header
"""
with open(table_path, "r") as fd:
reader = csv.reader(fd)
return next(reader)
def table_sort(table_path: Path, output_path: Path) -> None:
"""
Memory-efficient method used to perform a lexical sort of all the rows of this table, excluding
the table header.
Arguments:
table_path: Path of the table to be sorted.
output_path: Output location for the sorted table.
"""
with open(table_path, "r") as fd_in:
header = next(fd_in)
with open(output_path, "w") as fd_out:
fd_out.write(f"{header}")
records = []
for record in fd_in:
records.append(record)
for record in sorted(records):
fd_out.write(f"{record}")
def table_join(left: Path, right: Path, on: List[str], output: Path, how: str = "outer") -> None:
"""
Performs a memory efficient left join between two CSV files. The records of the right table
are held in memory, so in case of inner joins where order does not matter it is more efficient
to pass the bigger table as `left` and smaller one as `right`.
Arguments:
left: Left table to join. Only rows present in this table will be present in the output.
right: Right table to join. All of its columns will be added to those of `left`.
on: Column names to perform the join.
output: Path to write the joined table to.
how: Either "inner" or "outer" indicating whether records present only in the `left` table
should be dropped or not.
"""
def compute_join_indices(columns: List[str]) -> List[str]:
assert all(
name in columns.keys() for name in on
), f"Column provided in `on` not present in right table. Expected {on} but found {columns}"
join_indices = [columns[name] for name in on]
return join_indices
records_right = {}
with open(right, "r") as fd:
reader = csv.reader(fd)
columns_right = {name: idx for idx, name in enumerate(next(reader))}
join_indices = compute_join_indices(columns_right)
# Only save the data which is not part of the join, which will be added by the left table
columns_right_output = {
name: idx for name, idx in columns_right.items() if idx not in join_indices
}
for record in reader:
key = tuple([record[idx] for idx in join_indices])
data = [record[idx] for idx in columns_right_output.values()]
records_right[key] = data
with open(output, "w") as fd_out:
writer = csv.writer(fd_out)
with open(left, "r") as fd_in:
reader = csv.reader(fd_in)
columns_left = {name: idx for idx, name in enumerate(next(reader))}
join_indices = compute_join_indices(columns_left)
# Write the output columns as a header
columns_output = list(columns_left.keys()) + list(columns_right_output.keys())
writer.writerow(columns_output)
for record_left in reader:
key = tuple([record_left[idx] for idx in join_indices])
data_left = [record_left[idx] for idx in columns_left.values()]
# If this is an inner join and the key is not in the right table, drop it
if how == "inner" and not key in records_right:
continue
# Get the data from the right table and write to output
data_right = records_right.get(key, [None] * len(columns_right_output))
writer.writerow(data_left + data_right)
def skip_head_reader(path: Path, n: int = 1, **read_opts) -> Iterable[str]:
fd = read_lines(path, **read_opts)
for _ in range(n):
next(fd)
yield from fd
def table_cross_product(left: Path, right: Path, output: Path) -> None:
"""
Memory efficient method to perform the cross product of all columns in two tables. Columns
which are present in both tables will be duplicated in the output.
Arguments:
left: Left table. All columns from this table will be present in the output.
right: Right table. All columns from this table will be present in the output.
output: Path to write the joined table to.
"""
columns_left = get_table_columns(left)
columns_right = get_table_columns(right)
with open(output, "w") as fd:
writer = csv.writer(fd)
writer.writerow(columns_left + columns_right)
reader_left = csv.reader(skip_head_reader(left))
for record_left in reader_left:
reader_right = csv.reader(skip_head_reader(right))
for record_right in reader_right:
writer.writerow(record_left + record_right)
def table_group_tail(table: Path, output: Path) -> None:
""" Outputs latest data for each key, assumes records are indexed by <key, date> """
reader = csv.reader(read_lines(table))
columns = {name: idx for idx, name in enumerate(next(reader))}
if not "date" in columns.keys():
# Degenerate case: this table has no date
shutil.copyfile(table, output)
else:
has_epi = "total_confirmed" in columns
# To stay memory-efficient, do the latest subset "by hand" instead of using pandas grouping
# This assumes that the CSV file is sorted in ascending order, which should always be true
latest_date: Dict[str, str] = {}
records: Dict[str, List[str]] = {}
for record in reader:
try:
key = record[columns["key"]]
date = record[columns["date"]]
total_confirmed = record[columns["total_confirmed"]] if has_epi else True
latest_seen = latest_date.get(key, date) < date and total_confirmed is not None
if key not in records or latest_seen:
latest_date[key] = date
records[key] = record
except Exception as exc:
print(f"Error parsing record {record} in table {table}: {exc}", file=sys.stderr)
traceback.print_exc()
with open(output, "w") as fd_out:
writer = csv.writer(fd_out)
writer.writerow(columns.keys())
for key, record in records.items():
writer.writerow(record)
def convert_csv_to_json_records(
schema: Dict[str, type],
csv_file: Path,
output_file: Path,
skip_size_threshold: int = None,
fast_size_threshold: int = None,
) -> None:
if skip_size_threshold is None:
skip_size_threshold = JSON_MAX_SIZE_BYTES
if fast_size_threshold is None:
fast_size_threshold = JSON_FAST_CONVERTER_SIZE_BYTES
file_size = csv_file.stat().st_size
json_coverter_method = _convert_csv_to_json_records_fast
if skip_size_threshold > 0 and file_size > skip_size_threshold:
raise ValueError(f"Size of {csv_file} too large for conversion: {file_size // 1E6} MB")
if fast_size_threshold > 0 and file_size > fast_size_threshold:
warnings.warn(f"Size of {csv_file} too large for fast method: {file_size // 1E6} MB")
json_coverter_method = _convert_csv_to_json_records_slow
json_coverter_method(schema, csv_file, output_file)
def _convert_csv_to_json_records_slow(schema: Dict[str, type], csv_file: Path, output_file) -> None:
"""
Slow but memory efficient method to convert the provided CSV file to a record-like JSON format
"""
with output_file.open("w") as fd_out:
# Write the header first
columns = get_table_columns(csv_file)
columns_str = ",".join([f'"{col}"' for col in columns])
fd_out.write(f'{{"columns":[{columns_str}],"data":[')
# Read the CSV file in chunks but keep only the values
first_record = True
for chunk in read_table(csv_file, schema=schema, chunksize=256):
if first_record:
first_record = False
else:
fd_out.write(",")
fd_out.write(chunk.to_json(orient="values")[1:-1])
fd_out.write("]}")
def _convert_csv_to_json_records_fast(
schema: Dict[str, type], csv_file: Path, output_file: Path
) -> None:
"""
Fast but memory intensive method to convert the provided CSV file to a record-like JSON format
"""
table = read_table(csv_file, schema=schema)
json_dict = json.loads(table.to_json(orient="split"))
del json_dict["index"]
with open(output_file, "w") as fd:
json.dump(json_dict, fd)
|
scripts/predict.py | sreesxlnc/kaggle-right-whale | 200 | 12710751 | import argparse
import importlib
from time import strftime
import numpy as np
import pandas as pd
import cPickle as pickle
def load_data(fname):
n = 6925
size = int(fname.split('_')[0])
X_fname = 'cache/X_test_%s.npy' % fname
X_shape = (n, 3, size, size)
X = np.memmap(X_fname, dtype=np.float32, mode='r', shape=X_shape)
return X
def load_model(fname):
model = importlib.import_module('model_definitions.%s' % fname)
return model
def load_encoder(fname='models/encoder.pkl'):
encoder = pickle.load(open(fname, 'r'))
return encoder
def get_current_datetime():
return strftime('%Y%m%d_%H%M%S')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data', required=True)
parser.add_argument('--model', required=True)
args = parser.parse_args()
output_fname = 'submissions/%s.csv' % get_current_datetime()
print 'Will write output to %s' % output_fname
print
print 'Loading sample submission'
sample_df = pd.read_csv('data/sample_submission.csv')
print
print 'Loading encoder'
encoder = load_encoder()
classes = map(lambda x: 'whale_%05d' % x, encoder.classes_)
print 'Loading model: %s' % args.model
model = load_model(args.model)
net = model.net
net.initialize()
print 'Loading model weights from %s' % model.model_fname
net.load_weights_from(model.model_fname)
print
print 'Loading data: %s' % args.data
X = load_data(args.data)
print
print 'Predicting...'
y_test_pred_proba = net.predict_proba(X)
print
print 'Assembling final dataframe'
fnames = sample_df[['Image']].values
values = np.hstack([fnames, y_test_pred_proba])
submission_df = pd.DataFrame(values, columns=['Image'] + classes)
print submission_df.head(1)
print
print len(submission_df.columns)
print
submission_df.to_csv(output_fname, index=False)
|
third_party/ibis/ibis_oracle/tests/conftest.py | ajw0100/professional-services-data-validator | 167 | 12710754 | # Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
import ibis
import third_party.ibis.ibis_oracle.api
OL_HOST= os.environ.get(
'IBIS_TEST_ORACLE_HOST', os.environ.get('OLHOST','host')
)
OL_PORT= os.environ.get(
'IBIS_TEST_ORACLE_PORT', os.environ.get('OLPORT','port')
)
OL_USER = os.environ.get(
'IBIS_TEST_ORACLE_USER', os.environ.get('OLUSER', 'username')
)
OL_PASS = os.environ.get(
'IBIS_TEST_ORACLE_PASSWORD', os.environ.get('OLPASSWORD', 'password')
)
IBIS_TEST_ORACLE_DB = os.environ.get(
'IBIS_TEST_ORACLE_DATABASE', os.environ.get('OLDATABASE', 'database_name')
)
IBIS_TEST_ORACLE_protocol = os.environ.get(
'IBIS_TEST_ORACLE_PROTOCOL', os.environ.get('OLPROTOCOL', 'protocol')
)
def _random_identifier(suffix):
return '__ibis_test_{}_{}'.format(suffix, ibis.util.guid())
@pytest.fixture(scope='session')
def con():
return third_party.ibis.ibis_oracle.api.connect(
host=OL_HOST,
port=OL_PORT,
user=OL_USER,
password=<PASSWORD>,
database=IBIS_TEST_ORACLE_DB,
protocol=IBIS_TEST_ORACLE_protocol,
)
@pytest.fixture(scope='module')
def db(con):
return con.database()
@pytest.fixture(scope='module')
def alltypes(db):
return db.functional_alltypes
@pytest.fixture(scope='module')
def df(alltypes):
return alltypes.execute()
@pytest.fixture(scope='module')
def at(alltypes):
return alltypes.op().sqla_table
@pytest.fixture(scope='module')
def intervals(con):
return con.table("intervals")
@pytest.fixture
def translate():
from third_party.ibis.ibis_oracle.compiler import OracleDialect
dialect = OracleDialect()
context = dialect.make_context()
return lambda expr: dialect.translator(expr, context).get_result()
@pytest.fixture
def temp_table(con) -> str:
"""
Return a temporary table name.
Parameters
----------
con : third_party.ibis.ibis_oracle.compiler.OracleDialect
Yields
------
name : string
Random table name for a temporary usage.
"""
name = _random_identifier('table')
try:
yield name
finally:
con.drop_table(name, force=True)
|
examples/05_vision/05_gesture.py | yukaryote/RoboMaster-SDK | 204 | 12710810 | <filename>examples/05_vision/05_gesture.py
# -*-coding:utf-8-*-
# Copyright (c) 2020 DJI.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the file LICENSE.txt or at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cv2
from robomaster import robot
import threading
class GestureInfo:
def __init__(self, x, y, w, h, info):
self._x = x
self._y = y
self._w = w
self._h = h
self._info = info
@property
def pt1(self):
return int((self._x - self._w / 2) * 1280), int((self._y - self._h / 2) * 720)
@property
def pt2(self):
return int((self._x + self._w / 2) * 1280), int((self._y + self._h / 2) * 720)
@property
def center(self):
return int(self._x * 1280), int(self._y * 720)
@property
def text(self):
return str(self._info)
gestures = []
def on_detect_person(gesture_info):
number = len(gesture_info)
value_lock.acquire()
gestures.clear()
for i in range(0, number):
x, y, w, h, info = gesture_info[i]
gestures.append(GestureInfo(x, y, w, h, info))
print("gesture: info:{0}, x:{1}, y:{2}, w:{3}, h:{4}".format(info, x, y, w, h))
value_lock.release()
if __name__ == '__main__':
ep_robot = robot.Robot()
ep_robot.initialize(conn_type="rndis")
ep_vision = ep_robot.vision
ep_camera = ep_robot.camera
ep_camera.start_video_stream(False)
result = ep_vision.sub_detect_info(name="gesture", callback=on_detect_person)
value_lock = threading.Lock()
for i in range(0, 500):
img = ep_camera.read_cv2_image(strategy="newest", timeout=1.5)
for j in range(0, len(gestures)):
value_lock.acquire()
cv2.rectangle(img, gestures[j].pt1, gestures[j].pt2, (255, 255, 255))
cv2.putText(img, gestures[j].text, gestures[j].center, cv2.FONT_HERSHEY_SIMPLEX, 1.5, (255, 255, 255), 3)
value_lock.release()
cv2.imshow("Gestures", img)
key = cv2.waitKey(1)
cv2.destroyAllWindows()
result = ep_vision.unsub_detect_info("gesture")
cv2.destroyAllWindows()
ep_camera.stop_video_stream()
ep_robot.close()
|
codegen/python/fixtures/sanic/server/congo/deliveries_if.py | mrpotes/go-raml | 142 | 12710839 | # DO NOT EDIT THIS FILE. This file will be overwritten when re-running go-raml.
from sanic import Blueprint
from sanic.views import HTTPMethodView
from sanic.response import text
from . import deliveries_api
deliveries_if = Blueprint('deliveries_if')
class deliveriesView(HTTPMethodView):
async def get(self, request):
return await deliveries_api.deliveries_get(request)
async def post(self, request):
return await deliveries_api.deliveries_post(request)
deliveries_if.add_route(deliveriesView.as_view(), '/deliveries')
class deliveries_bydeliveryIdView(HTTPMethodView):
async def get(self, request, deliveryId):
return await deliveries_api.deliveries_byDeliveryId_get(request, deliveryId)
async def patch(self, request, deliveryId):
return await deliveries_api.deliveries_byDeliveryId_patch(request, deliveryId)
async def delete(self, request, deliveryId):
return await deliveries_api.deliveries_byDeliveryId_delete(request, deliveryId)
deliveries_if.add_route(deliveries_bydeliveryIdView.as_view(), '/deliveries/<deliveryId>')
|
Packs/ApiModules/Scripts/IAMApiModule/IAMApiModule_test.py | diCagri/content | 799 | 12710851 | from IAMApiModule import *
APP_USER_OUTPUT = {
"user_id": "mock_id",
"user_name": "mock_user_name",
"first_name": "mock_first_name",
"last_name": "mock_last_name",
"active": "true",
"email": "<EMAIL>"
}
USER_APP_DATA = IAMUserAppData("mock_id", "mock_user_name", is_active=True, app_data=APP_USER_OUTPUT)
APP_DISABLED_USER_OUTPUT = {
"user_id": "mock_id",
"user_name": "mock_user_name",
"first_name": "mock_first_name",
"last_name": "mock_last_name",
"active": "false",
"email": "<EMAIL>"
}
DISABLED_USER_APP_DATA = IAMUserAppData("mock_id", "mock_user_name", is_active=False, app_data=APP_DISABLED_USER_OUTPUT)
class MockCLient():
def get_user(self):
return None
def create_user(self):
return None
def update_user(self):
return None
def enable_user(self):
return None
def disable_user(self):
return None
def get_outputs_from_user_profile(user_profile):
entry_context = user_profile.to_entry()
outputs = entry_context.get('Contents')
return outputs
def test_get_user_command__existing_user(mocker):
"""
Given:
- An app client object
- A user-profile argument that contains an email of a user
When:
- The user exists in the application
- Calling function get_user_command
Then:
- Ensure the resulted User Profile object holds the correct user details
"""
client = MockCLient()
args = {'user-profile': {'email': '<EMAIL>'}}
mocker.patch.object(client, 'get_user', return_value=USER_APP_DATA)
mocker.patch.object(IAMUserProfile, 'update_with_app_data', return_value={})
user_profile = IAMCommand().get_user(client, args)
outputs = get_outputs_from_user_profile(user_profile)
assert outputs.get('action') == IAMActions.GET_USER
assert outputs.get('success') is True
assert outputs.get('active') is True
assert outputs.get('id') == 'mock_id'
assert outputs.get('username') == 'mock_user_name'
assert outputs.get('details', {}).get('first_name') == 'mock_first_name'
assert outputs.get('details', {}).get('last_name') == 'mock_last_name'
def test_get_user_command__non_existing_user(mocker):
"""
Given:
- An app client object
- A user-profile argument that contains an email a user
When:
- The user does not exist in the application
- Calling function get_user_command
Then:
- Ensure the resulted User Profile object holds information about an unsuccessful result.
"""
client = MockCLient()
args = {'user-profile': {'email': '<EMAIL>'}}
mocker.patch.object(client, 'get_user', return_value=None)
user_profile = IAMCommand().get_user(client, args)
outputs = get_outputs_from_user_profile(user_profile)
assert outputs.get('action') == IAMActions.GET_USER
assert outputs.get('success') is False
assert outputs.get('errorCode') == IAMErrors.USER_DOES_NOT_EXIST[0]
assert outputs.get('errorMessage') == IAMErrors.USER_DOES_NOT_EXIST[1]
def test_create_user_command__success(mocker):
"""
Given:
- An app client object
- A user-profile argument that contains an email of a non-existing user in the application
When:
- Calling function create_user_command
Then:
- Ensure a User Profile object with the user data is returned
"""
client = MockCLient()
args = {'user-profile': {'email': '<EMAIL>'}}
mocker.patch.object(client, 'get_user', return_value=None)
mocker.patch.object(client, 'create_user', return_value=USER_APP_DATA)
user_profile = IAMCommand(get_user_iam_attrs=['email']).create_user(client, args)
outputs = get_outputs_from_user_profile(user_profile)
assert outputs.get('action') == IAMActions.CREATE_USER
assert outputs.get('success') is True
assert outputs.get('active') is True
assert outputs.get('id') == 'mock_id'
assert outputs.get('username') == 'mock_user_name'
assert outputs.get('details', {}).get('first_name') == 'mock_first_name'
assert outputs.get('details', {}).get('last_name') == 'mock_last_name'
def test_create_user_command__user_already_exists(mocker):
"""
Given:
- An app client object
- A user-profile argument that contains an email of a user
When:
- The user already exists in the application and disabled
- allow-enable argument is false
- Calling function create_user_command
Then:
- Ensure the command is considered successful and the user is still disabled
"""
client = MockCLient()
args = {'user-profile': {'email': '<EMAIL>'}, 'allow-enable': 'false'}
mocker.patch.object(client, 'get_user', return_value=DISABLED_USER_APP_DATA)
mocker.patch.object(client, 'update_user', return_value=DISABLED_USER_APP_DATA)
user_profile = IAMCommand().create_user(client, args)
outputs = get_outputs_from_user_profile(user_profile)
assert outputs.get('action') == IAMActions.UPDATE_USER
assert outputs.get('success') is True
assert outputs.get('active') is False
assert outputs.get('id') == 'mock_id'
assert outputs.get('username') == 'mock_user_name'
assert outputs.get('details', {}).get('first_name') == 'mock_first_name'
assert outputs.get('details', {}).get('last_name') == 'mock_last_name'
def test_update_user_command__non_existing_user(mocker):
"""
Given:
- An app client object
- A user-profile argument that contains user data
When:
- The user does not exist in the application
- create-if-not-exists parameter is checked
- Create User command is enabled
- Calling function update_user_command
Then:
- Ensure the create action is executed
- Ensure a User Profile object with the user data is returned
"""
client = MockCLient()
args = {'user-profile': {'email': '<EMAIL>', 'givenname': 'mock_first_name'}}
mocker.patch.object(client, 'get_user', return_value=None)
mocker.patch.object(client, 'create_user', return_value=USER_APP_DATA)
user_profile = IAMCommand(create_if_not_exists=True).update_user(client, args)
outputs = get_outputs_from_user_profile(user_profile)
assert outputs.get('action') == IAMActions.CREATE_USER
assert outputs.get('success') is True
assert outputs.get('active') is True
assert outputs.get('id') == 'mock_id'
assert outputs.get('username') == 'mock_user_name'
assert outputs.get('details', {}).get('first_name') == 'mock_first_name'
assert outputs.get('details', {}).get('last_name') == 'mock_last_name'
def test_update_user_command__command_is_disabled(mocker):
"""
Given:
- An app client object
- A user-profile argument that contains user data
When:
- Update User command is disabled
- Calling function update_user_command
Then:
- Ensure the command is considered successful and skipped
"""
client = MockCLient()
args = {'user-profile': {'email': '<EMAIL>', 'givenname': 'mock_first_name'}}
mocker.patch.object(client, 'get_user', return_value=None)
mocker.patch.object(client, 'update_user', return_value=USER_APP_DATA)
user_profile = IAMCommand(is_update_enabled=False).update_user(client, args)
outputs = get_outputs_from_user_profile(user_profile)
assert outputs.get('action') == IAMActions.UPDATE_USER
assert outputs.get('success') is True
assert outputs.get('skipped') is True
assert outputs.get('reason') == 'Command is disabled.'
def test_disable_user_command__non_existing_user(mocker):
"""
Given:
- An app client object
- A user-profile argument that contains an email of a user
When:
- create-if-not-exists parameter is unchecked
- The user does not exist in the application
- Calling function disable_user_command
Then:
- Ensure the command is considered successful and skipped
"""
client = MockCLient()
args = {'user-profile': {'email': '<EMAIL>'}}
mocker.patch.object(client, 'get_user', return_value=None)
user_profile = IAMCommand().disable_user(client, args)
outputs = get_outputs_from_user_profile(user_profile)
assert outputs.get('action') == IAMActions.DISABLE_USER
assert outputs.get('success') is True
assert outputs.get('skipped') is True
assert outputs.get('reason') == IAMErrors.USER_DOES_NOT_EXIST[1]
|
embark/uploader/templatetags/filters.py | YulianaPoliakova/embark | 149 | 12710871 | from django import template
from django.forms.fields import CheckboxInput
register = template.Library()
@register.filter(name='is_checkbox')
def is_checkbox(value):
return isinstance(value, CheckboxInput)
|
templates.py | tommccoy1/hans | 109 | 12710898 | <reponame>tommccoy1/hans
import random
import numpy as np
def despace(string):
new_string = string.replace(" ", " ")
if new_string == string:
return string
else:
return despace(new_string)
def remove_terminals(tree):
words = tree.split()
new_words = []
for word in words:
if word[0] == "(":
new_words.append("(")
else:
new_words.append(word)
new_tree = " ".join(new_words)
new_tree = new_tree.replace("(", " ( ").replace(")", " ) ")
return despace(new_tree.strip())
def binarize_helper(tree):
prev_word = ""
for index, word in enumerate(tree):
if word != "(" and word != ")":
if prev_word == "(":
if index < len(tree) - 1:
if tree[index + 1] == ")":
return binarize_helper(tree[:index - 1] + [tree[index]] + tree[index+2:])
prev_word = word
for index, word in enumerate(tree):
if word != "(" and word != ")":
if prev_word == "(":
if index < len(tree) - 2:
if tree[index + 1] != ")" and tree[index + 1] != "(" and tree[index + 2] == ")":
return binarize_helper(tree[:index-1] + [" ".join(tree[index-1:index+3])] + tree[index+3:])
prev_word = word
for index, word in enumerate(tree):
if word == ")":
if index > 2:
if tree[index - 1] != "(" and tree[index - 1] != ")" and tree[index - 2] != "(" and tree[index - 2] != ")" and tree[index - 3] != "(" and tree[index - 3] != ")":
return binarize_helper(tree[:index-2] + ["( " + tree[index-2] + " " + tree[index-1] + " )"] + tree[index:])
return tree
def valid_tree(binary_tree):
if len(binary_tree) > 1:
return False
tree = binary_tree[0]
parts = tree.split()
count_brackets = 0
for part in parts:
if part == "(":
count_brackets += 1
elif part == ")":
count_brackets -= 1
if count_brackets < 0:
return False
if count_brackets == 0:
return True
def binarize_tree(tree):
unterminaled = remove_terminals(tree)
words = unterminaled.split()
tree = binarize_helper(words)
if not valid_tree(tree):
print("WRONG TREE")
return binarize_helper(words)[0]
def generate_vp():
verb = random.choice(verbs)
if verb in intransitive_verbs:
return verb, "(VP (VBD " + verb + "))"
else:
obj = random.choice(nouns)
return verb + " the " + obj, "(VP (VBD " + verb +") (NP (DT the) (NN " + obj + ")))"
def parse_vp(vp):
words = vp.split()
if len(words) == 1:
return "(VP (VBD " + words[0] + "))"
else:
return "(VP (VBD " + words[0] +") (NP (DT the) (NN " + words[2] + ")))"
def parse_pp(pp):
words = pp.split()
if words[:2] == ["next","to"]:
return "(ADVP (JJ next) (PP (TO to) (NP (DT the) (" + noun_tag(words[-1]) + " " + words[-1] + "))))"
elif words[:3] == ["in", "front", "of"]:
return "(PP (IN in) (NP (NP (NN front)) (PP (IN of) (NP (NP (DT the) (" + noun_tag(words[-1]) + " " + words[-1] + "))))))"
else:
return "(PP (IN " + words[0] + ") (NP (DT the) (" + noun_tag(words[-1]) + " " + words[-1] + ")))"
def generate_rc():
rel = random.choice(rels)
verb = random.choice(verbs)
if verb in intransitive_verbs:
return rel + " " + verb
else:
arg = random.choice(nouns)
if random.randint(0, 1) == 0:
return rel + " the " + arg + " " + verb
else:
return rel + " " + verb + " the " + arg
def noun_tag(noun):
if noun in nouns_sg or noun in told_objects or noun in food_words or noun in location_nouns or noun in location_nouns_b or noun in won_objects or noun in read_wrote_objects:
return "NN"
elif noun in nouns_pl:
return "NNS"
else:
print "INVALID WORD", noun
def parse_rc(rc):
words = rc.split()
if words[0] == "that":
if len(words) == 2:
return "(SBAR (WHNP (WDT that)) (S (VP (VBD " + words[1] + "))))"
else:
if words[1] == "the":
return "(SBAR (WHNP (WDT that)) (S (NP (DT the) (" + noun_tag(words[2]) + " " + words[2] + ")) (VP (VBD " + words[3] + "))))"
else:
return "(SBAR (WHNP (WDT that)) (S (VP (VBD " + words[1] + ") (NP (DT the) (" + noun_tag(words[3]) + " " + words[3] + ")))))"
elif words[0] == "who":
if len(words) == 2:
return "(SBAR (WHNP (WP who)) (S (VP (VBD " + words[1] + "))))"
else:
if words[1] == "the":
return "(SBAR (WHNP (WP who)) (S (NP (DT the) (" + noun_tag(words[2]) + " " + words[2] + ")) (VP (VBD " + words[3] + "))))"
else:
return "(SBAR (WHNP (WP who)) (S (VP (VBD " + words[1] + ") (NP (DT the) (" + noun_tag(words[3]) + " " + words[3] + ")))))"
else:
print("INVALID RELATIVIZER")
def template_filler(template_list):
probs = []
templates = []
for template_pair in template_list:
probs.append(template_pair[0])
templates.append(template_pair[1])
template_index = np.random.choice(range(len(templates)), p=probs)
template_tuple = templates[template_index]
template = template_tuple[0]
hypothesis_template = template_tuple[1]
template_tag = template_tuple[2]
premise_tree_template = template_tuple[3]
hypothesis_tree_template = template_tuple[4]
premise_list = []
index_dict = {}
for (index, element) in template:
if element == "VP":
vp, vp_parse = generate_vp()
premise_list.append(vp)
index_dict[index] = vp
elif element == "RC":
rc = generate_rc()
premise_list.append(rc)
index_dict[index] = rc
elif "vobj" in element:
obj = random.choice(object_dict[index_dict[int(element[-1])]])
premise_list.append(obj)
index_dict[index] = obj
elif isinstance(element, str):
premise_list.append(element)
index_dict[index] = element
else:
word = random.choice(element)
premise_list.append(word)
index_dict[index] = word
hypothesis_list = [index_dict[ind] for ind in hypothesis_template]
premise_tree_list = []
hypothesis_tree_list = []
for elt in premise_tree_template:
if isinstance(elt,int):
premise_tree_list.append(index_dict[elt])
elif elt[:3] == "prc":
comma_split = elt.split(",")
start_ind = int(comma_split[1])
end_ind = int(comma_split[2])
rc_tree = ""
for i in range(start_ind, end_ind + 1):
rc_tree += index_dict[i] + " "
premise_tree_list.append(parse_rc(rc_tree.strip()))
elif elt[:2] == "nn":
comma_split = elt.split(",")
this_ind = int(comma_split[1])
premise_tree_list.append(noun_tag(index_dict[this_ind]))
elif elt[:3] == "pvp":
comma_split = elt.split(",")
this_ind = int(comma_split[1])
premise_tree_list.append(parse_vp(index_dict[this_ind]))
elif elt[:3] == "ppp":
comma_split = elt.split(",")
start_ind = int(comma_split[1])
end_ind = int(comma_split[2])
pp_tree = ""
for i in range(start_ind, end_ind + 1):
pp_tree += index_dict[i] + " "
premise_tree_list.append(parse_pp(pp_tree.strip()))
elif elt[:3] == "cap":
comma_split = elt.split(",")
this_ind = int(comma_split[1])
this_word = index_dict[this_ind]
premise_tree_list.append(this_word[0].upper() + this_word[1:])
else:
premise_tree_list.append(elt)
for elt in hypothesis_tree_template:
if isinstance(elt,int):
hypothesis_tree_list.append(index_dict[elt])
elif elt[:3] == "prc":
comma_split = elt.split(",")
start_ind = int(comma_split[1])
end_ind = int(comma_split[2])
rc_tree = ""
for i in range(start_ind, end_ind + 1):
rc_tree += index_dict[i] + " "
hypothesis_tree_list.append(parse_rc(rc_tree.strip()))
elif elt[:2] == "nn":
comma_split = elt.split(",")
this_ind = int(comma_split[1])
hypothesis_tree_list.append(noun_tag(index_dict[this_ind]))
elif elt[:3] == "pvp":
comma_split = elt.split(",")
this_ind = int(comma_split[1])
hypothesis_tree_list.append(parse_vp(index_dict[this_ind]))
elif elt[:3] == "cap":
comma_split = elt.split(",")
this_ind = int(comma_split[1])
this_word = index_dict[this_ind]
hypothesis_tree_list.append(this_word[0].upper() + this_word[1:])
else:
hypothesis_tree_list.append(elt)
premise_tree = "".join(premise_tree_list)
hypothesis_tree = "".join(hypothesis_tree_list)
return postprocess(" ".join(premise_list)), postprocess(" ".join(hypothesis_list)), template_tag, premise_tree, hypothesis_tree, binarize_tree(premise_tree), binarize_tree(hypothesis_tree)
def postprocess(sentence):
sentence = sentence[0].upper() + sentence[1:]
return sentence
nouns_sg = ["professor", "student", "president","judge","senator","secretary","doctor","lawyer","scientist","banker","tourist","manager","artist","author","actor","athlete"]
nouns_pl = ["professors", "students", "presidents","judges","senators","secretaries","doctors","lawyers","scientists","bankers","tourists","managers","artists","authors","actors","athletes"]
nouns = nouns_sg + nouns_pl
transitive_verbs = ["recommended", "called", "helped","supported","contacted","believed","avoided","advised","saw","stopped","introduced","mentioned","encouraged","thanked","recognized","admired"]
passive_verbs = ["recommended","helped","supported","contacted","believed","avoided","advised","stopped","introduced","mentioned","encouraged","thanked","recognized","admired"]
intransitive_verbs = ["slept", "danced", "ran","shouted","resigned","waited", "arrived", "performed"]
verbs = transitive_verbs + intransitive_verbs
nps_verbs = ["believed", "knew", "heard"]
#"forgot", "preferred", "claimed", "wanted", "needed", "found", "suggested", "expected"] # These all appear at least 100 times with both NP and S arguments
npz_verbs = ["hid", "moved", "presented", "paid","studied","stopped"]
npz_verbs_plural = ["fought"] #"paid", "changed", "studied", "answered", "stopped", "grew", "moved", "returned", "left","improved", "lost", "visited", "ate", "played"] # All appear at least 100 times with both transitive and intransitive frames
understood_argument_verbs = ["paid", "explored", "won", "wrote", "left", "read", "ate"] # All appear at least 100 times with both transitive and intransitive frames
nonentailing_quot_vebs = ["hoped", "claimed", "thought", "believed", "said", "assumed"]
question_embedding_verbs = ["wondered", "understood", "knew", "asked", "explained", "realized"]
preps = ["near", "behind", "by", "in front of", "next to"] # Each appears at least 100 times in MNLI
conjs = ["while", "after", "before", "when", "although", "because", "since"]
past_participles = ["studied", "paid", "helped","investigated", "presented"]
called_objects = ["coward", "liar", "hero", "fool"]
told_objects = ["story", "lie", "truth", "secret"]
food_words = ["fruit", "salad", "broccoli", "sandwich", "rice", "corn", "ice cream"]
location_nouns = ["neighborhood", "region", "country", "town", "valley", "forest", "garden", "museum", "desert", "island", "town"]
location_nouns_b = ["museum", "school", "library", "office","laboratory"]
won_objects = ["race", "contest", "war", "prize", "competition", "election", "battle", "award", "tournament"]
read_wrote_objects = ["book", "column", "report", "poem", "letter", "novel", "story", "play", "speech"]
adjs = ["important", "popular", "famous", "young", "happy", "helpful", "serious", "angry"] # All at least 100 times in MNLI
adj_comp_nonent = ["afraid", "sure", "certain"]
adj_comp_ent = ["sorry", "aware", "glad"]
advs = ["quickly", "slowly", "happily", "easily", "quietly", "thoughtfully"] # All at least 100 times in MNLI
const_adv = ["after", "before", "because", "although", "though", "since", "while"]
const_quot_entailed = ["forgot", "learned", "remembered", "knew"]
advs_nonentailed = ["supposedly", "probably", "maybe", "hopefully"]
advs_entailed = ["certainly", "definitely", "clearly", "obviously", "suddenly"]
rels = ["who", "that"]
quest = ["why", "how"]
nonent_complement_nouns = ["feeling", "evidence", "idea", "belief"]
ent_complement_nouns = ["fact", "reason", "news", "time"]
object_dict = {}
object_dict["called"] = called_objects
object_dict["told"] = told_objects
object_dict["brought"] = food_words
object_dict["made"] = food_words
object_dict["saved"] = food_words
object_dict["offered"] = food_words
object_dict["paid"] = nouns
object_dict["explored"] = location_nouns
object_dict["won"] = won_objects
object_dict["wrote"] = read_wrote_objects
object_dict["left"] = location_nouns
object_dict["read"] = read_wrote_objects
object_dict["ate"] = food_words
advs_embed_not_entailed = ["if","unless"]
advs_embed_entailed = ["after", "before", "because", "although", "though", "since", "while"]
advs_outside_not_entailed = ["if", "unless"]
advs_outside_entailed = ["after", "before", "because", "although", "though", "since", "while"]
# Lexical Overlap: Simple sentence
lex_simple_templates = [(1.0, ([(0,"the"), (1,nouns), (2,transitive_verbs), (3,"the"), (4,nouns), (5,".")], [3,4,2,0,1,5],"temp1",["(ROOT (S (NP (DT The) (", "nn,1", " ", 1,")) (VP (VBD ", 2, ") (NP (DT the) (", "nn,4", " ",4,"))) (. .)))"], ["(ROOT (S (NP (DT The) (","nn,4"," ", 4,")) (VP (VBD ", 2, ") (NP (DT the) (","nn,1"," ",1,"))) (. .)))"]))]
# Lexical Overlap: Preposition on subject
lex_prep_templates = [
(1.0/6, ([(0,"the"), (1,nouns), (2,preps), (3,"the"), (4,nouns), (5,transitive_verbs), (6,"the"), (7,nouns), (8,".")], [3,4,5,0,1,8],"temp2", ["(ROOT (S (NP (NP (DT The) (","nn,1"," ",1,")) ","ppp,2,4",") (VP (VBD ",5,") (NP (DT the) (","nn,7"," ",7,"))) (. .)))"], ["(ROOT (S (NP (DT The) (","nn,4"," ", 4,")) (VP (VBD ", 5, ") (NP (DT the) (","nn,1"," ",1,"))) (. .)))"])),
(1.0/6,([(0,"the"), (1,nouns), (2,preps), (3,"the"), (4,nouns), (5,transitive_verbs), (6,"the"), (7,nouns), (8,".")], [6,7,5,0,1,8],"temp3", ["(ROOT (S (NP (NP (DT The) (","nn,1"," ",1,")) ","ppp,2,4",") (VP (VBD ",5,") (NP (DT the) (","nn,7"," ",7,"))) (. .)))"], ["(ROOT (S (NP (DT The) (","nn,7"," ", 7,")) (VP (VBD ", 5, ") (NP (DT the) (","nn,1"," ",1,"))) (. .)))"])),
(1.0/6,([(0,"the"), (1,nouns), (2,preps), (3,"the"), (4,nouns), (5,transitive_verbs), (6,"the"), (7,nouns), (8,".")], [6,7,5,3,4,8],"temp4",["(ROOT (S (NP (NP (DT The) (","nn,1"," ",1,")) ","ppp,2,4",") (VP (VBD ",5,") (NP (DT the) (","nn,7"," ",7,"))) (. .)))"], ["(ROOT (S (NP (DT The) (","nn,7"," ", 7,")) (VP (VBD ", 5, ") (NP (DT the) (","nn,4"," ",4,"))) (. .)))"])),
(1.0/6, ([(0, "the"), (1, nouns), (2, transitive_verbs), (3, "the"), (4,nouns), (5,preps), (6, "the"), (7, nouns), (8, ".")], [3,4,2,0,1,8],"temp5", ["(ROOT (S (NP (DT The) (","nn,1"," ",1,")) (VP (VBD ",2,") (NP (NP (DT the) (","nn,4"," ",4,")) ","ppp,5,7",")) (. .)))"], ["(ROOT (S (NP (DT The) (","nn,4"," ", 4,")) (VP (VBD ", 2, ") (NP (DT the) (","nn,1"," ",1,"))) (. .)))"])),
(1.0/6, ([(0, "the"), (1, nouns), (2, transitive_verbs), (3, "the"), (4,nouns), (5,preps), (6, "the"), (7, nouns), (8, ".")], [3,4,2,6,7,8],"temp6", ["(ROOT (S (NP (DT The) (","nn,1"," ",1,")) (VP (VBD ",2,") (NP (NP (DT the) (","nn,4"," ",4,")) ","ppp,5,7",")) (. .)))"], ["(ROOT (S (NP (DT The) (","nn,4"," ", 4,")) (VP (VBD ", 2, ") (NP (DT the) (","nn,7"," ",7,"))) (. .)))"])),
(1.0/6, ([(0, "the"), (1, nouns), (2, transitive_verbs), (3, "the"), (4,nouns), (5,preps), (6, "the"), (7, nouns), (8, ".")], [6,7,2,0,1,8],"temp7", ["(ROOT (S (NP (DT The) (","nn,1"," ",1,")) (VP (VBD ",2,") (NP (NP (DT the) (","nn,4"," ",4,")) ","ppp,5,7",")) (. .)))"], ["(ROOT (S (NP (DT The) (","nn,7"," ", 7,")) (VP (VBD ", 2, ") (NP (DT the) (","nn,1"," ",1,"))) (. .)))"]))
]
# Lexical Overlap: Relative clause on subject
lex_rc_templates = [
(1.0/12,([(0,"the"), (1,nouns), (2,rels), (3,"the"), (4,nouns), (5,transitive_verbs), (6,transitive_verbs), (7,"the"), (8,nouns), (9,".")], [3,4,6,0,1,9],"temp8", ["(ROOT (S (NP (NP (DT The) (","nn,1"," ", 1,")) ", "prc,2,5", ") (VP (VBD ", 6, ") (NP (DT the) (","nn,8"," ", 8, "))) (. .)))"], ["(ROOT (S (NP (DT The) (","nn,4"," ", 4,")) (VP (VBD ", 6, ") (NP (DT the) (", "nn,1"," ",1,"))) (. .)))"])),
(1.0/12,([(0,"the"), (1,nouns), (2,rels), (3,"the"), (4,nouns), (5,transitive_verbs), (6,transitive_verbs), (7,"the"), (8,nouns), (9,".")], [7,8,6,0,1,9],"temp9", ["(ROOT (S (NP (NP (DT The) (","nn,1"," ", 1,")) ", "prc,2,5", ") (VP (VBD ", 6, ") (NP (DT the) (","nn,8"," ", 8, "))) (. .)))"], ["(ROOT (S (NP (DT The) (","nn,8"," ", 8,")) (VP (VBD ", 6, ") (NP (DT the) (","nn,1"," ",1,"))) (. .)))"])),
(1.0/12,([(0,"the"), (1,nouns), (2,rels), (3,"the"), (4,nouns), (5,transitive_verbs), (6,transitive_verbs), (7,"the"), (8,nouns), (9,".")], [7,8,6,3,4,9],"temp10", ["(ROOT (S (NP (NP (DT The) (","nn,1"," ", 1,")) ", "prc,2,5", ") (VP (VBD ", 6, ") (NP (DT the) (","nn,8", " ", 8, "))) (. .)))"], ["(ROOT (S (NP (DT The) (","nn,8", " ", 8,")) (VP (VBD ", 6, ") (NP (DT the) (", "nn,4", " ",4,"))) (. .)))"])),
(1.0/12,([(0,"the"), (1,nouns), (2,rels), (3,transitive_verbs), (4,"the"), (5,nouns), (6,transitive_verbs), (7,"the"), (8,nouns), (9,".")], [4,5,6,0,1,9],"temp11", ["(ROOT (S (NP (NP (DT The) (", "nn,1"," ", 1,")) ", "prc,2,5", ") (VP (VBD ", 6, ") (NP (DT the) (", "nn,8"," ", 8, "))) (. .)))"], ["(ROOT (S (NP (DT The) (", "nn,5", " ", 5,")) (VP (VBD ", 6, ") (NP (DT the) (", "nn,1"," ",1,"))) (. .)))"])),
(1.0/12,([(0,"the"), (1,nouns), (2,rels), (3,transitive_verbs), (4,"the"), (5,nouns), (6,transitive_verbs), (7,"the"), (8,nouns), (9,".")], [7,8,6,0,1,9],"temp12", ["(ROOT (S (NP (NP (DT The) (","nn,1"," ", 1,")) ", "prc,2,5", ") (VP (VBD ", 6, ") (NP (DT the) (", "nn,8", " ", 8, "))) (. .)))"], ["(ROOT (S (NP (DT The) (", "nn,8"," ", 8,")) (VP (VBD ", 6, ") (NP (DT the) (", "nn,1"," ",1,"))) (. .)))"])),
(1.0/12,([(0,"the"), (1,nouns), (2,rels), (3,transitive_verbs), (4,"the"), (5,nouns), (6,transitive_verbs), (7,"the"), (8,nouns), (9,".")], [7,8,6,4,5,9],"temp13",["(ROOT (S (NP (NP (DT The) (", "nn,1", " ", 1,")) ", "prc,2,5", ") (VP (VBD ", 6, ") (NP (DT the) (", "nn,8", " ", 8, "))) (. .)))"], ["(ROOT (S (NP (DT The) (", "nn,8"," ", 8,")) (VP (VBD ", 6, ") (NP (DT the) (", "nn,5", " ",5,"))) (. .)))"])),
(1.0/12,([(0,"the"), (1,nouns), (2,transitive_verbs), (3, "the"), (4, nouns), (5,rels), (6, transitive_verbs), (7, "the"), (8,nouns), (9,".")], [3,4,2,0,1,9],"temp14", ["(ROOT (S (NP (DT The) (", "nn,1", " ", 1, ")) (VP (VBD ", 2, ") (NP (NP (DT the) (","nn,4"," ", 4, ")) ", "prc,5,8", ")) (. .)))"], ["(ROOT (S (NP (DT The) (","nn,4"," ", 4,")) (VP (VBD ", 2, ") (NP (DT the) (","nn,1"," ",1,"))) (. .)))"])),
(1.0/12,([(0,"the"), (1,nouns), (2,transitive_verbs), (3, "the"), (4, nouns), (5,rels), (6, transitive_verbs), (7, "the"), (8,nouns), (9,".")], [3,4,2,0,7,8,9],"temp15", ["(ROOT (S (NP (DT The) (","nn,1"," ", 1, ")) (VP (VBD ", 2, ") (NP (NP (DT the) (", "nn,4"," ", 4, ")) ", "prc,5,8", ")) (. .)))"], ["(ROOT (S (NP (DT The) (","nn,4"," ", 4,")) (VP (VBD ", 2, ") (NP (DT the) (","nn,8"," ",8,"))) (. .)))"])),
(1.0/12,([(0,"the"), (1,nouns), (2,transitive_verbs), (3, "the"), (4, nouns), (5,rels), (6, transitive_verbs), (7, "the"), (8,nouns), (9,".")], [7,8,2,0,1,9],"temp16", ["(ROOT (S (NP (DT The) (","nn,1"," ", 1, ")) (VP (VBD ", 2, ") (NP (NP (DT the) (","nn,4"," ", 4, ")) ", "prc,5,8", ")) (. .)))"], ["(ROOT (S (NP (DT The) (","nn,8"," ", 8,")) (VP (VBD ", 2, ") (NP (DT the) (","nn,1"," ",1,"))) (. .)))"])),
(1.0/12,([(0,"the"), (1,nouns), (2,transitive_verbs), (3, "the"), (4, nouns), (5,rels), (6, "the"), (7,nouns), (8,transitive_verbs), (9,".")], [3,4,2,0,1,9],"temp17", ["(ROOT (S (NP (DT The) (","nn,1"," ", 1, ")) (VP (VBD ", 2, ") (NP (NP (DT the) (","nn,4"," ", 4, ")) ", "prc,5,8", ")) (. .)))"], ["(ROOT (S (NP (DT The) (","nn,4"," ", 4,")) (VP (VBD ", 2, ") (NP (DT the) (","nn,1"," ",1,"))) (. .)))"])),
(1.0/12,([(0,"the"), (1,nouns), (2,transitive_verbs), (3, "the"), (4, nouns), (5,rels), (6, "the"), (7,nouns), (8,transitive_verbs), (9,".")], [3,4,2,6,7,9],"temp18", ["(ROOT (S (NP (DT The) (","nn,1"," ", 1, ")) (VP (VBD ", 2, ") (NP (NP (DT the) (","nn,4"," ", 4, ")) ", "prc,5,8", ")) (. .)))"], ["(ROOT (S (NP (DT The) (","nn,4"," ", 4,")) (VP (VBD ", 2, ") (NP (DT the) (","nn,7"," ",7,"))) (. .)))"])),
(1.0/12,([(0,"the"), (1,nouns), (2,transitive_verbs), (3, "the"), (4, nouns), (5,rels), (6, "the"), (7,nouns), (8,transitive_verbs), (9,".")], [6,7,2,0,1,9],"temp19", ["(ROOT (S (NP (DT The) (","nn,1"," ", 1, ")) (VP (VBD ", 2, ") (NP (NP (DT the) (","nn,4"," ", 4, ")) ", "prc,5,8", ")) (. .)))"], ["(ROOT (S (NP (DT The) (","nn,7"," ", 7,")) (VP (VBD ", 2, ") (NP (DT the) (","nn,1"," ",1,"))) (. .)))"]))
]
# Lexical Overlap: Passive incorrect
lex_pass_templates = [
(0.5,([(0,"the"), (1,nouns_sg), (2,"was"), (3,passive_verbs), (4,"by"), (5,"the"), (6,nouns), (7,".")], [0,1,3,5,6,7],"temp20", ["(ROOT (S (NP (DT The) (NN ",1,")) (VP (VBD was) (VP (VBN ",3,") (PP (IN by) (NP (DT the) (","nn,6"," ", 6,"))))) (. .)))"], ["(ROOT (S (NP (DT The) (","nn,1"," ", 1,")) (VP (VBD ", 3, ") (NP (DT the) (","nn,6"," ",6,"))) (. .)))"])),
(0.5,([(0,"the"), (1,nouns_pl), (2,"were"), (3,passive_verbs), (4,"by"), (5,"the"), (6,nouns), (7,".")], [0,1,3,5,6,7],"temp21", ["(ROOT (S (NP (DT The) (NNS ",1,")) (VP (VBD were) (VP (VBN ",3,") (PP (IN by) (NP (DT the) (","nn,6"," ", 6,"))))) (. .)))"], ["(ROOT (S (NP (DT The) (","nn,1"," ", 1,")) (VP (VBD ", 3, ") (NP (DT the) (","nn,6"," ",6,"))) (. .)))"]))
]
# Lexical Overlap: Conjunctions
lex_conj_templates = [
(0.25, ([(0,"the"), (1,nouns), (2,"and"), (3,"the"), (4,nouns), (5,transitive_verbs), (6,"the"), (7,nouns), (8,".")], [0,1,5,3,4,8],"temp22", ["(ROOT (S (NP (NP (DT The) (", "nn,1"," ", 1,")) (CC and) (NP (DT the) (","nn,4"," ",4,"))) (VP (VBD ",5,") (NP (DT the) (","nn,7"," ", 7,"))) (. .)))"], ["(ROOT (S (NP (DT The) (","nn,1"," ", 1,")) (VP (VBD ", 5, ") (NP (DT the) (","nn,4"," ",4,"))) (. .)))"] )),
(0.25, ([(0,"the"), (1,nouns), (2,"and"), (3,"the"), (4,nouns), (5,transitive_verbs), (6,"the"), (7,nouns), (8,".")], [3,4,5,0,1,8],"temp23", ["(ROOT (S (NP (NP (DT The) (", "nn,1"," ", 1,")) (CC and) (NP (DT the) (","nn,4"," ",4,"))) (VP (VBD ",5,") (NP (DT the) (","nn,7"," ", 7,"))) (. .)))"], ["(ROOT (S (NP (DT The) (","nn,4"," ", 4,")) (VP (VBD ", 5, ") (NP (DT the) (","nn,1"," ",1,"))) (. .)))"])),
(0.25, ([(0,"the"), (1,nouns), (2,transitive_verbs), (3,"the"), (4,nouns), (5, "and"), (6,"the"), (7,nouns), (8,".")], [3,4,2,6,7,8],"temp24", ["(ROOT (S (NP (DT The) (","nn,1", " ",1,")) (VP (VBD ", 2,") (NP (NP (DT the) (","nn,4", " ",4,")) (CC and) (NP (DT the) (","nn,7"," ",7,")))) (. .)))"], ["(ROOT (S (NP (DT The) (","nn,4"," ", 4,")) (VP (VBD ", 2, ") (NP (DT the) (","nn,7"," ",7,"))) (. .)))"])),
(0.25, ([(0,"the"), (1,nouns), (2,transitive_verbs), (3,"the"), (4,nouns), (5, "and"), (6,"the"), (7,nouns), (8,".")], [6,7,2,3,4,8],"temp25", ["(ROOT (S (NP (DT The) (","nn,1", " ",1,")) (VP (VBD ", 2,") (NP (NP (DT the) (","nn,4", " ",4,")) (CC and) (NP (DT the) (","nn,7"," ",7,")))) (. .)))"], ["(ROOT (S (NP (DT The) (","nn,7"," ", 7,")) (VP (VBD ", 2, ") (NP (DT the) (","nn,4"," ",4,"))) (. .)))"]))
]
# Lexical Overlap: Relative clause
lex_rc_ent_templates = [
(0.25, ([(0,"the"), (1,nouns), (2,rels), (3,"the"), (4,nouns), (5,transitive_verbs), (6,transitive_verbs), (7,"the"), (8,nouns), (9,".")], [3,4,5,0,1,9],"temp26", ["(ROOT (S (NP (NP (DT The) (","nn,1"," ", 1,")) ", "prc,2,5", ") (VP (VBD ", 6, ") (NP (DT the) (","nn,8"," ", 8, "))) (. .)))"], ["(ROOT (S (NP (DT The) (","nn,4"," ", 4,")) (VP (VBD ", 5, ") (NP (DT the) (", "nn,1"," ",1,"))) (. .)))"])),
(0.25, ([(0,"the"), (1,nouns), (2,rels), (3,transitive_verbs), (4,"the"), (5,nouns), (6,transitive_verbs), (7,"the"), (8,nouns), (9,".")], [0,1,3,4,5,9],"temp27", ["(ROOT (S (NP (NP (DT The) (","nn,1"," ", 1,")) ", "prc,2,5", ") (VP (VBD ", 6, ") (NP (DT the) (","nn,8"," ", 8, "))) (. .)))"], ["(ROOT (S (NP (DT The) (","nn,1"," ", 1,")) (VP (VBD ", 3, ") (NP (DT the) (", "nn,5"," ",5,"))) (. .)))"])),
(0.25, ([(0,"the"), (1,nouns), (2,transitive_verbs), (3,"the"), (4,nouns), (5,rels), (6,transitive_verbs), (7,"the"), (8,nouns), (9,".")], [3,4,6,7,8,9],"temp28", ["(ROOT (S (NP (DT The) (","nn,1"," ", 1, ")) (VP (VBD ", 2, ") (NP (NP (DT the) (","nn,4"," ", 4, ")) ", "prc,5,8", ")) (. .)))"], ["(ROOT (S (NP (DT The) (","nn,4"," ", 4,")) (VP (VBD ", 6, ") (NP (DT the) (","nn,8"," ",8,"))) (. .)))"])),
(0.25, ([(0,"the"), (1,nouns), (2,transitive_verbs), (3,"the"), (4,nouns), (5,rels), (6,"the"), (7,nouns), (8,transitive_verbs),(9,".")], [6,7,8,3,4,9],"temp29", ["(ROOT (S (NP (DT The) (","nn,1"," ", 1, ")) (VP (VBD ", 2, ") (NP (NP (DT the) (","nn,4"," ", 4, ")) ", "prc,5,8", ")) (. .)))"], ["(ROOT (S (NP (DT The) (","nn,7"," ", 7,")) (VP (VBD ", 8, ") (NP (DT the) (","nn,4"," ",4,"))) (. .)))"]))
]
# Lexical Overlap: Across PP
lex_cross_pp_ent_templates = [(1.0,([(0,"the"), (1,nouns), (2,preps), (3,"the"), (4,nouns), (5,transitive_verbs), (6, "the"), (7,nouns), (8,".")], [0,1,5,6,7,8],"temp30", ["(ROOT (S (NP (NP (DT The) (","nn,1"," ",1,")) ","ppp,2,4",") (VP (VBD ",5,") (NP (DT the) (","nn,7"," ",7,"))) (. .)))"], ["(ROOT (S (NP (DT The) (","nn,1"," ", 1,")) (VP (VBD ", 5, ") (NP (DT the) (","nn,7"," ",7,"))) (. .)))"]))]
# Lexical Overlap: Across relative clause
lex_cross_rc_ent_templates = [(1.0,([(0,"the"), (1,nouns), (2,"RC"), (3,transitive_verbs), (4,"the"), (5, nouns), (6,".")], [0,1,3,4,5,6],"temp31", ["(ROOT (S (NP (NP (DT The) (","nn,1"," ", 1,")) ", "prc,2,2", ") (VP (VBD ", 3, ") (NP (DT the) (","nn,5"," ", 5, "))) (. .)))"], ["(ROOT (S (NP (DT The) (","nn,1"," ", 1,")) (VP (VBD ", 3, ") (NP (DT the) (", "nn,5"," ",5,"))) (. .)))"]))]
# Lexical Overlap: Conjunctions
lex_ent_conj_templates = [
(0.5, ([(0,"the"), (1,nouns), (2,"and"), (3,"the"), (4,nouns), (5,transitive_verbs), (6,"the"), (7,nouns), (8,".")], [0,1,5,6,7,8],"temp32", ["(ROOT (S (NP (NP (DT The) (", "nn,1"," ", 1,")) (CC and) (NP (DT the) (","nn,4"," ",4,"))) (VP (VBD ",5,") (NP (DT the) (","nn,7"," ", 7,"))) (. .)))"], ["(ROOT (S (NP (DT The) (","nn,1"," ", 1,")) (VP (VBD ", 5, ") (NP (DT the) (","nn,7"," ",7,"))) (. .)))"])),
(0.5, ([(0,"the"), (1,nouns), (2,transitive_verbs), (3,"the"), (4,nouns), (5, "and"), (6,"the"), (7,nouns), (8,".")], [0,1,2,6,7,8],"temp33", ["(ROOT (S (NP (DT The) (","nn,1", " ",1,")) (VP (VBD ", 2,") (NP (NP (DT the) (","nn,4", " ",4,")) (CC and) (NP (DT the) (","nn,7"," ",7,")))) (. .)))"], ["(ROOT (S (NP (DT The) (","nn,1"," ", 1,")) (VP (VBD ", 2, ") (NP (DT the) (","nn,7"," ",7,"))) (. .)))"]))
]
# Lexical Overlap: Across adverb
lex_cross_adv_ent_templates = [(1.0,([(0,"the"), (1,nouns), (2,advs), (3,verbs), (4,".")], [0,1,3,4],"temp34"))]
# Lexical Overlap: Passive
lex_ent_pass_templates = [
(0.5,([(0,"the"), (1,nouns_sg), (2,"was"), (3,passive_verbs), (4,"by"), (5,"the"), (6,nouns), (7,".")], [5,6,3,0,1,7],"temp35", ["(ROOT (S (NP (DT The) (NN ",1,")) (VP (VBD was) (VP (VBN ",3,") (PP (IN by) (NP (DT the) (","nn,6"," ", 6,"))))) (. .)))"], ["(ROOT (S (NP (DT The) (","nn,6"," ", 6,")) (VP (VBD ", 3, ") (NP (DT the) (","nn,1"," ",1,"))) (. .)))"])),
(0.5,([(0,"the"), (1,nouns_pl), (2,"were"), (3,passive_verbs), (4,"by"), (5,"the"), (6,nouns), (7,".")], [5,6,3,0,1,7],"temp36", ["(ROOT (S (NP (DT The) (NNS ",1,")) (VP (VBD were) (VP (VBN ",3,") (PP (IN by) (NP (DT the) (","nn,6"," ", 6,"))))) (. .)))"], ["(ROOT (S (NP (DT The) (","nn,6"," ", 6,")) (VP (VBD ", 3, ") (NP (DT the) (","nn,1"," ",1,"))) (. .)))"]))
]
# Subsequence: NPS
subseq_nps_templates = [(1.0, ([(0,"the"), (1,nouns),(2,nps_verbs), (3,"the"), (4,nouns), (5,"VP"), (6, ".")], [0,1,2,3,4,6],"temp37", ["(ROOT (S (NP (DT The) (","nn,1", " ",1,")) (VP (VBD ",2,") (SBAR (S (NP (DT the) (","nn,4"," ",4,")) ", "pvp,5","))) (. .)))"], ["(ROOT (S (NP (DT The) (","nn,1"," ", 1,")) (VP (VBD ", 2, ") (NP (DT the) (","nn,4"," ",4,"))) (. .)))"]))]
# Subsequence: PP on subject
subseq_pp_on_subj_templates = [(1.0, ([(0,"the"), (1,nouns), (2,preps), (3,"the"), (4,nouns), (5,"VP"), (6, ".")], [3,4,5,6],"temp38",["(ROOT (S (NP (NP (DT The) (","nn,1"," ",1,")) ","ppp,2,4",") ","pvp,5"," (. .)))"], ["(ROOT (S (NP (DT The) (","nn,4"," ", 4,")) ","pvp,5"," (. .)))"]))]
# Subsequence: Rel clause on subject
subseq_rel_on_subj_templates = [(1.0, ([(0,"the"), (1,nouns), (2,rels), (3,transitive_verbs), (4,"the"), (5,nouns), (6,"VP"), (7,".")], [4,5,6,7],"temp39", ["(ROOT (S (NP (NP (DT The) (","nn,1"," ", 1,")) ", "prc,2,5", ") ","pvp,6"," (. .)))"], ["(ROOT (S (NP (DT The) (","nn,5"," ", 5,")) ","pvp,6"," (. .)))"]))]
# Subsequence: Past participles
subseq_past_participle_templates = [
((1.0 * len(intransitive_verbs) + len(transitive_verbs)) / (len(intransitive_verbs) + 2*len(transitive_verbs)), ([(0,"the"), (1,nouns), (2,past_participles), (3,"in"), (4,"the"), (5,location_nouns_b),(6,"VP"),(7,".")],[0,1,2,3,4,5,7],"temp40", ["(ROOT (S (NP (NP (DT The) (","nn,1"," ",1, ")) (VP (VBN ",2,") (PP (IN in) (NP (DT the) (NN ",5,"))))) ","pvp,6"," (. .)))"], ["(ROOT (S (NP (DT The) (","nn,1"," ",1,")) (VP (VBD ",2,") (PP (IN in) (NP (DT the) (NN ",5,")))) (. .)))"])),
((1.0 * len(transitive_verbs)) / (len(intransitive_verbs) + 2*len(transitive_verbs)), ([(0,"the"), (1,nouns), (2,transitive_verbs),(3,"the"),(4,nouns), (5,past_participles), (6,"in"), (7,"the"), (8,location_nouns_b),(9,".")],[3,4,5,6,7,8,9],"temp41", ["(ROOT (S (NP (DT The) (","nn,1"," ", 1,")) (VP (VBD ",2,") (NP (NP (DT the) (","nn,4"," ",4,")) (VP (VBN ",5,") (PP (IN in) (NP (DT the) (NN ",8,")))))) (. .)))"], ["(ROOT (S (NP (DT The) (","nn,4"," ",4,")) (VP (VBD ",5,") (PP (IN in) (NP (DT the) (NN ",8,")))) (. .)))"]))
]
# Subsequence: NP/Z
subseq_npz_templates = [
((1.0 * len(npz_verbs))/(len(npz_verbs) + len(npz_verbs_plural)), ([(0,conjs), (1,"the"), (2,nouns), (3,npz_verbs), (4,"the"), (5,nouns), (6,"VP"), (7, ".")], [1,2,3,4,5,7],"temp42", ["(ROOT (S (SBAR (IN ", "cap,0",") (S (NP (DT the) (","nn,2"," ", 2,")) (VP (VBD ",3,")))) (NP (DT the) (","nn,5"," ", 5,")) ","pvp,6"," (. .)))"], ["(ROOT (S (NP (DT The) (","nn,2"," ", 2,")) (VP (VBD ", 3, ") (NP (DT the) (","nn,5"," ",5,"))) (. .)))"])),
((1.0 * len(npz_verbs_plural))/(len(npz_verbs) + len(npz_verbs_plural)),([(0,conjs), (1,"the"), (2,nouns_pl), (3,npz_verbs_plural), (4,"the"), (5,nouns), (6,"VP"), (7, ".")], [1,2,3,4,5,7],"temp43", ["(ROOT (S (SBAR (IN ", "cap,0",") (S (NP (DT the) (","nn,2"," ", 2,")) (VP (VBD ",3,")))) (NP (DT the) (","nn,5"," ", 5,")) ","pvp,6"," (. .)))"], ["(ROOT (S (NP (DT The) (","nn,2"," ", 2,")) (VP (VBD ", 3, ") (NP (DT the) (","nn,5"," ",5,"))) (. .)))"]))
]
# Subsequence: Conjoined subject
subseq_conj_templates = [
((1.0 * len(intransitive_verbs) + len(transitive_verbs)) / (len(intransitive_verbs) + 2*len(transitive_verbs)),([(0, "the"), (1,nouns), (2,"and"), (3,"the"), (4,nouns), (5,"VP"), (6, ".")], [3,4,5,6],"temp44",["(ROOT (S (NP (NP (DT The) (", "nn,1"," ", 1,")) (CC and) (NP (DT the) (","nn,4"," ",4,"))) ","pvp,5"," (. .)))"], ["(ROOT (S (NP (DT The) (","nn,4"," ", 4,")) ","pvp,5"," (. .)))"])),
((1.0 * len(transitive_verbs)) / (len(intransitive_verbs) + 2*len(transitive_verbs)),([(0, "the"), (1,nouns),(2, transitive_verbs), (3, "the"), (4, nouns), (5, "and"), (6, "the"), (7, nouns), (8, ".")], [0,1,2,3,4,8],"temp45",["(ROOT (S (NP (DT The) (","nn,1", " ",1,")) (VP (VBD ", 2,") (NP (NP (DT the) (","nn,4", " ",4,")) (CC and) (NP (DT the) (","nn,7"," ",7,")))) (. .)))"], ["(ROOT (S (NP (DT The) (","nn,1"," ", 1,")) (VP (VBD ", 2, ") (NP (DT the) (","nn,4"," ",4,"))) (. .)))"]))
]
# Subsequence: Modified plural subject
subseq_adj_templates = [(1.0, ([(0, adjs), (1,nouns_pl), (2,"VP"), (3, ".")], [1,2,3],"temp46", ["(ROOT (S (NP (JJ ","cap,0",") (NNS ",1,")) ","pvp,2"," (. .)))"], ["(ROOT (S (NP (NNS ", "cap,1",")) ","pvp,2"," (. .)))"]))]
# Subsequence: Adverb
subseq_adv_templates = [(1.0, ([(0, "the"), (1,nouns), (2,"VP"), (3, advs), (4,".")], [0,1,2]))]
# Subsequence: Understood argument
subseq_understood_templates = [(1.0, ([(0,"the"), (1,nouns), (2,understood_argument_verbs), (3, "the"), (4,"vobj:2"), (5, ".")], [0,1,2,5],"temp47", ["(ROOT (S (NP (DT The) (","nn,1"," ", 1,")) (VP (VBD ", 2, ") (NP (DT the) (","nn,4"," ",4,"))) (. .)))"], ["(ROOT (S (NP (DT The) (","nn,1"," ", 1,")) (VP (VBD ", 2,")) (. .)))"]))]
# Subsequence: Relative clause
subseq_rel_on_obj_templates = [(1.0, ([(0, "the"), (1,nouns), (2,transitive_verbs), (3,"the"), (4,nouns), (5,"RC"), (6,".")], [0,1,2,3,4,6],"temp48",["(ROOT (S (NP (DT The) (","nn,1"," ", 1, ")) (VP (VBD ", 2, ") (NP (NP (DT the) (","nn,4"," ", 4, ")) ", "prc,5,5", ")) (. .)))"], ["(ROOT (S (NP (DT The) (","nn,1"," ", 1,")) (VP (VBD ", 2, ") (NP (DT the) (","nn,4"," ",4,"))) (. .)))"]))]
# Subsequence: PP
subseq_pp_on_obj_templates = [(1.0, ([(0, "the"), (1,nouns), (2,transitive_verbs), (3,"the"), (4,nouns), (5,preps), (6,"the"), (7,nouns), (8,".")], [0,1,2,3,4,8],"temp49", ["(ROOT (S (NP (DT The) (","nn,1"," ",1,")) (VP (VBD ",2,") (NP (NP (DT the) (","nn,4"," ",4,")) ","ppp,5,7",")) (. .)))"], ["(ROOT (S (NP (DT The) (","nn,1"," ", 1,")) (VP (VBD ", 2, ") (NP (DT the) (","nn,4"," ",4,"))) (. .)))"]))]
# Constituent: If
const_under_if_templates = [
(0.5, ([(0,advs_embed_not_entailed), (1,"the"), (2,nouns), (3,"VP"), (4, ","), (5,"the"), (6,nouns), (7,"VP"), (8,".")], [1,2,3,8],"temp50", ["(ROOT (S (SBAR (IN ","cap,0",") (S (NP (DT the) (","nn,2"," ", 2,")) ","pvp,3",")) (, ,) (S (NP (DT the) (","nn,6"," ",6,")) ","pvp,7",") (. .)))"], ["(ROOT (S (NP (DT The) (","nn,2"," ", 2,")) ","pvp,3"," (. .)))"] )),
(0.25, ([(0,"whether or not"), (1,"the"), (2,nouns), (3,"VP"), (4, ","), (5,"the"), (6,nouns), (7,"VP"), (8,".")], [1,2,3,8],"temp50", ["(ROOT (S (SBAR (IN Whether) (CC or) (RB not) (S (NP (DT the) (","nn,2"," ", 2,")) ","pvp,3",")) (, ,) (S (NP (DT the) (","nn,6"," ",6,")) ","pvp,7",") (. .)))"], ["(ROOT (S (NP (DT The) (","nn,2"," ", 2,")) ","pvp,3"," (. .)))"] )),
(0.25, ([(0,"in case"), (1,"the"), (2,nouns), (3,"VP"), (4, ","), (5,"the"), (6,nouns), (7,"VP"), (8,".")], [1,2,3,8],"temp50", ["(ROOT (S (SBAR (PP (IN In) (NP (NN case))) (S (NP (DT the) (","nn,2"," ", 2,")) ","pvp,3",")) (, ,) (S (NP (DT the) (","nn,6"," ",6,")) ","pvp,7",") (. .)))"], ["(ROOT (S (NP (DT The) (","nn,2"," ", 2,")) ","pvp,3"," (. .)))"] ))
]
const_outside_if_templates = [(1.0, ([(0,advs_outside_not_entailed), (1,"the"), (2,nouns), (3,"VP"), (4,","), (5,"the"), (6,nouns), (7,"VP"), (8,".")], [5,6,7,8],"temp51",["(ROOT (S (SBAR (IN ","cap,0",") (S (NP (DT the) (","nn,2"," ", 2,")) ","pvp,3",")) (, ,) (S (NP (DT the) (","nn,6"," ",6,")) ","pvp,7",") (. .)))"], ["(ROOT (S (NP (DT The) (","nn,6"," ", 6,")) ","pvp,7"," (. .)))"]))]
# Constituent: Said
const_quot_templates = [
(1.0, ([(0, "the"), (1,nouns), (2,nonentailing_quot_vebs), (3,"that"), (4,"the"), (5,nouns), (6,"VP"), (7,".")], [4,5,6,7],"temp52", ["(ROOT (S (NP (DT The) (","nn,1"," ",1,")) (VP (VBD ",2,") (SBAR (IN that) (S (NP (DT the) (","nn,5", " ",5,")) ","pvp,6","))) (. .)))"], ["(ROOT (S (NP (DT The) (","nn,5"," ", 5,")) ","pvp,6"," (. .)))"]))
]
# All appear at least 100 with S complements "seemed", "appeared", "told", "reported"
# Constituent: Disjunction
const_disj_templates = [
(0.5, ([(0,"the"), (1,nouns), (2,"VP"), (3,","), (4,"or"), (5,"the"), (6,nouns), (7,"VP"), (8,".")], [0, 1, 2, 8], "temp53", ["(ROOT (S (S (NP (DT The) (","nn,1"," ",1, ")) ","pvp,2",") (, ,) (CC or) (S (NP (DT the) (","nn,6", " ",6,")) ","pvp,7",") (. .)))"], ["(ROOT (S (NP (DT The) (","nn,1"," ", 1,")) ","pvp,2"," (. .)))"])),
(0.5, ([(0,"the"), (1,nouns), (2,"VP"), (3,","), (4,"or"), (5,"the"), (6,nouns), (7,"VP"), (8,".")], [5, 6, 7, 8],"temp54", ["(ROOT (S (S (NP (DT The) (","nn,1"," ",1, ")) ","pvp,2",") (, ,) (CC or) (S (NP (DT the) (","nn,6", " ",6,")) ","pvp,7",") (. .)))"], ["(ROOT (S (NP (DT The) (","nn,6"," ", 6,")) ","pvp,7"," (. .)))"]))
]
# Constituent: Noun complements
const_noun_comp_nonent_templates = [(1.0, ([(0,"the"), (1,nouns), (2,"had"), (3,"the"), (4,nonent_complement_nouns), (5,"that"), (6, "the"), (7,nouns), (8,"VP"), (9,".")], [6,7,8,9],"temp55"))]
# All appear at least 100 times with S complements story
# Constituent: Adjective complements
const_adj_comp_nonent_templates = [(0.5, ([(0,"the"), (1,nouns_sg), (2,"was"), (3,adj_comp_nonent), (4,"that"), (5, "the"), (6,nouns), (7,"VP"), (8, ".")], [5,6,7,8],"temp56")), (0.5, ([(0,"the"), (1,nouns_pl), (2,"were"), (3,adj_comp_nonent), (4,"that"), (5, "the"), (6,nouns), (7,"VP"), (8, ".")], [5,6,7,8],"temp57"))]
# All appear at least 100 times with S complements
# Constituent: Probably, supposedly, ...
const_advs_nonent_templates = [(1.0,([(0,advs_nonentailed), (1,"the"), (2,nouns), (3,"VP"), (4,".")], [1,2,3,4],"temp58",["(ROOT (S (ADVP (RB ","cap,0",")) (S (NP (DT the) (","nn,2"," ",2,")) ","pvp,3",") (. .)))"], ["(ROOT (S (NP (DT The) (","nn,2"," ",2,")) ","pvp,3"," (. .)))"]))]
# Constituent: Since
const_adv_embed_templates = [(1.0, ([(0,advs_embed_entailed), (1,"the"), (2,nouns), (3,"VP"), (4,","), (5,"the"), (6,nouns), (7,"VP"), (8,".")], [1,2,3,8],"temp59", ["(ROOT (S (SBAR (IN ","cap,0",") (S (NP (DT the) (","nn,2"," ", 2,")) ","pvp,3",")) (, ,) (S (NP (DT the) (","nn,6"," ",6,")) ","pvp,7",") (. .)))"], ["(ROOT (S (NP (DT The) (","nn,2"," ", 2,")) ","pvp,3"," (. .)))"]))]
const_adv_outside_templates = [
(7.0/9, ([(0,advs_outside_entailed), (1,"the"), (2,nouns), (3,"VP"), (4,","), (5,"the"), (6,nouns), (7,"VP"), (8,".")], [5,6,7,8],"temp60", ["(ROOT (S (SBAR (IN ","cap,0",") (S (NP (DT the) (","nn,2"," ", 2,")) ","pvp,3",")) (, ,) (S (NP (DT the) (","nn,6"," ",6,")) ","pvp,7",") (. .)))"], ["(ROOT (S (NP (DT The) (","nn,6"," ", 6,")) ","pvp,7"," (. .)))"])),
(1.0/9, ([(0,"in case"), (1,"the"), (2,nouns), (3,"VP"), (4,","), (5,"the"), (6,nouns), (7,"VP"), (8,".")], [5,6,7,8],"temp60", ["(ROOT (S (SBAR (PP (IN In) (NP (NN case))) (S (NP (DT the) (","nn,2"," ", 2,")) ","pvp,3",")) (, ,) (S (NP (DT the) (","nn,6"," ",6,")) ","pvp,7",") (. .)))"], ["(ROOT (S (NP (DT The) (","nn,6"," ", 6,")) ","pvp,7"," (. .)))"])),
(1.0/9, ([(0,"whether or not"), (1,"the"), (2,nouns), (3,"VP"), (4,","), (5,"the"), (6,nouns), (7,"VP"), (8,".")], [5,6,7,8],"temp60", ["(ROOT (S (SBAR (IN Whether) (CC or) (RB not) (S (NP (DT the) (","nn,2"," ", 2,")) ","pvp,3",")) (, ,) (S (NP (DT the) (","nn,6"," ",6,")) ","pvp,7",") (. .)))"], ["(ROOT (S (NP (DT The) (","nn,6"," ", 6,")) ","pvp,7"," (. .)))"]))
]
# Constituent: Knew
const_quot_ent_templates = [(1.0, ([(0, "the"), (1,nouns), (2,const_quot_entailed), (3,"that"), (4,"the"), (5,nouns), (6,"VP"), (7,".")], [4,5,6,7],"temp61", ["(ROOT (S (NP (DT The) (","nn,1"," ",1,")) (VP (VBD ",2,") (SBAR (IN that) (S (NP (DT the) (","nn,5", " ",5,")) ","pvp,6","))) (. .)))"], ["(ROOT (S (NP (DT The) (","nn,5"," ", 5,")) ","pvp,6"," (. .)))"]))]
# Constituent: Conjunction
const_conj_templates = [
(0.5, ([(0,"the"), (1,nouns), (2,"VP"), (3,","), (4,"and"), (5,"the"), (6,nouns), (7,"VP"), (8,".")], [0, 1, 2, 8],"temp62", ["(ROOT (S (S (NP (DT The) (","nn,1"," ",1, ")) ","pvp,2",") (, ,) (CC and) (S (NP (DT the) (","nn,6", " ",6,")) ","pvp,7",") (. .)))"], ["(ROOT (S (NP (DT The) (","nn,1"," ", 1,")) ","pvp,2"," (. .)))"])),
(0.5, ([(0,"the"), (1,nouns), (2,"VP"), (3,","), (4,"and"), (5,"the"), (6,nouns), (7,"VP"), (8,".")], [5, 6, 7, 8],"temp63", ["(ROOT (S (S (NP (DT The) (","nn,1"," ",1, ")) ","pvp,2",") (, ,) (CC and) (S (NP (DT the) (","nn,6", " ",6,")) ","pvp,7",") (. .)))"], ["(ROOT (S (NP (DT The) (","nn,6"," ", 6,")) ","pvp,7"," (. .)))"]))
]
# Constituent: Embedded question
const_embed_quest = [(1.0, ([(0,"the"), (1,nouns), (2,question_embedding_verbs), (3,quest), (4,"the"), (5,nouns), (6,"VP"), (7,".")], [4,5,6,7],"temp64"))]
# Constituent: Noun complements
const_noun_comp_ent_templates = [(1.0, ([(0,"the"), (1,nouns), (2,"had"), (3,"the"), (4,ent_complement_nouns), (5,"that"), (6,"the"), (7,nouns), (8,"VP"), (9,".")], [6,7,8,9],"temp65"))]
# Constituent: Adjective complements
const_adj_comp_ent_templates = [(0.5, ([(0,"the"), (1,nouns_sg), (2,"was"), (3,adj_comp_ent), (4,"that"), (5, "the"), (6,nouns), (7,"VP"), (8, ".")], [5,6,7,8],"temp66")), (0.5, ([(0,"the"), (1,nouns_pl), (2,"were"), (3,adj_comp_ent), (4,"that"), (5, "the"), (6,nouns), (7,"VP"), (8, ".")], [5,6,7,8],"temp67"))]
# Constituent: Sentential adverbs
const_advs_ent_templates = [
(5.0/7,([(0,advs_entailed), (1,"the"), (2,nouns), (3,"VP"), (4,".")], [1,2,3,4],"temp68",["(ROOT (S (ADVP (RB ","cap,0",")) (S (NP (DT the) (","nn,2"," ",2,")) ","pvp,3",") (. .)))"], ["(ROOT (S (NP (DT The) (","nn,2"," ",2,")) ","pvp,3"," (. .)))"])),
(1.0/7,([(0,"without a doubt"), (1,"the"), (2,nouns), (3,"VP"), (4,".")], [1,2,3,4],"temp68",["(ROOT (S (PP (IN Without) (NP (DT a) (NN doubt))) (S (NP (DT the) (","nn,2"," ",2,")) ","pvp,3",") (. .)))"], ["(ROOT (S (NP (DT The) (","nn,2"," ",2,")) ","pvp,3"," (. .)))"])),
(1.0/7,([(0,"of course"), (1,"the"), (2,nouns), (3,"VP"), (4,".")], [1,2,3,4],"temp68",["(ROOT (S (PP (IN Of) (NP (NN course))) (S (NP (DT the) (","nn,2"," ",2,")) ","pvp,3",") (. .)))"], ["(ROOT (S (NP (DT The) (","nn,2"," ",2,")) ","pvp,3"," (. .)))"]))
]
if __name__ == "__main__":
print("Lexical Overlap: Not entailed")
print(template_filler(lex_simple_templates))
print(template_filler(lex_prep_templates))
print(template_filler(lex_rc_templates))
print(template_filler(lex_pass_templates))
print(template_filler(lex_conj_templates))
print("")
print("Lexical Overlap: Entailed")
print(template_filler(lex_rc_ent_templates))
print(template_filler(lex_cross_pp_ent_templates))
print(template_filler(lex_cross_rc_ent_templates))
print(template_filler(lex_ent_conj_templates))
#print(template_filler(lex_cross_adv_ent_templates))
print(template_filler(lex_ent_pass_templates))
print("")
print("Subsequence: Not entailed")
print(template_filler(subseq_nps_templates))
print(template_filler(subseq_pp_on_subj_templates))
print(template_filler(subseq_rel_on_subj_templates))
print(template_filler(subseq_past_participle_templates))
print(template_filler(subseq_npz_templates))
print("")
print("Subsequence: Entailed")
print(template_filler(subseq_conj_templates))
print(template_filler(subseq_adj_templates))
#print(template_filler(subseq_adv_templates))
print(template_filler(subseq_understood_templates))
print(template_filler(subseq_rel_on_obj_templates))
print(template_filler(subseq_pp_on_obj_templates))
print("")
print("Constituent: Not entailed")
print(template_filler(const_under_if_templates))
print(template_filler(const_outside_if_templates))
print(template_filler(const_quot_templates))
print(template_filler(const_disj_templates))
#print(template_filler(const_noun_comp_nonent_templates))
#print(template_filler(const_adj_comp_nonent_templates))
print(template_filler(const_advs_nonent_templates))
print("")
print("Constituent: Entailed")
print(template_filler(const_adv_embed_templates))
print(template_filler(const_adv_outside_templates))
print(template_filler(const_quot_ent_templates))
print(template_filler(const_conj_templates))
#print(template_filler(const_embed_quest))
#print(template_filler(const_noun_comp_ent_templates))
#print(template_filler(const_adj_comp_ent_templates))
print(template_filler(const_advs_ent_templates))
|
bip_utils/bip/bip84/__init__.py | MIPPLTeam/bip_utils | 149 | 12710901 | <gh_stars>100-1000
from bip_utils.bip.bip84.bip84 import Bip84
|
pyrtl/rtllib/prngs.py | ryoon/PyRTL | 159 | 12710922 | """
``Example``::
``csprng_trivium``
load, req = pyrtl.Input(1, 'load'), pyrtl.Input(1, 'req')
ready, rand = pyrtl.Output(1, 'ready'), pyrtl.Output(128, 'rand')
ready_out, rand_out = prngs.csprng_trivium(128, load, req)
ready <<= ready_out
rand <<= rand_out
sim_trace = pyrtl.SimulationTrace()
sim = pyrtl.Simulation(tracer=sim_trace)
# seed once at the beginning
sim.step({'load': 1, 'req': 0})
while sim.value[ready] == 0: # or loop 19 cycles
sim.step({'load': 0, 'req': 0})
sim.step({'load': 0, 'req': 1})
while sim.value[ready] == 0: # or loop 2 cycles
sim.step({'load': 0, 'req': 0})
print(sim.inspect(rand))
sim_trace.render_trace(symbol_len=45, segment_size=5)
``prng_xoroshiro128``
load, req = pyrtl.Input(1, 'load'), pyrtl.Input(1, 'req')
ready, rand = pyrtl.Output(1, 'ready'), pyrtl.Output(128, 'rand')
ready_out, rand_out = prngs.prng_xoroshiro128(128, load, req)
ready <<= ready_out
rand <<= rand_out
sim_trace = pyrtl.SimulationTrace()
sim = pyrtl.Simulation(tracer=sim_trace)
sim.step({'load': 1, 'req': 0}) # seed once at the beginning
sim.step({'load': 0, 'req': 1})
while sim.value[ready] == 0: # or loop 2 cycles
sim.step({'load': 0, 'req': 0})
print(sim.inspect(rand))
sim_trace.render_trace(symbol_len=40, segment_size=1)
``prng_lfsr``
load, req = pyrtl.Input(1, 'load'), pyrtl.Input(1, 'req')
rand = pyrtl.Output(64, 'rand')
rand <<= prngs.prng_lfsr(64, load, req)
sim_trace = pyrtl.SimulationTrace()
sim = pyrtl.Simulation(tracer=sim_trace)
sim.step({'load': 1, 'req': 0}) # seed once at the beginning
sim.step({'load': 0, 'req': 1})
sim.step({'load': 0, 'req': 0})
print(sim.inspect(rand))
sim_trace.render_trace(symbol_len=40, segment_size=1)
``explicit seeding``
seed = pyrtl.Input(127, 'seed')
load, req = pyrtl.Input(1, 'load'), pyrtl.Input(1, 'req')
rand = pyrtl.Output(32, 'rand')
rand <<= prngs.prng_lfsr(32, load, req, seed)
sim_trace = pyrtl.SimulationTrace()
sim = pyrtl.Simulation(tracer=sim_trace)
sim.step({'load': 1, 'req': 0, 'seed': 0x102030405060708090a0b0c0d0e0f010})
sim.step({'load': 0, 'req': 1, 'seed': 0x102030405060708090a0b0c0d0e0f010})
sim.step({'load': 0, 'req': 0, 'seed': 0x102030405060708090a0b0c0d0e0f010})
print(sim.inspect(rand))
sim_trace.render_trace(symbol_len=40, segment_size=1)
"""
from __future__ import absolute_import
import pyrtl
def prng_lfsr(bitwidth, load, req, seed=None):
""" Builds a single-cycle PRNG using a 127 bits Fibonacci LFSR.
:param bitwidth: the desired bitwidth of the random number
:param load: one bit signal to load the seed into the prng
:param req: one bit signal to request a random number
:param seed: 127 bits WireVector, defaults to None (self-seeding),
refrain from self-seeding if reseeding at run time is required
:return: register containing the random number with the given bitwidth
A very fast and compact PRNG that generates a random number using only one clock cycle.
Has a period of 2**127 - 1. Its linearity makes it a bit statistically weak, but should be
good enough for any noncryptographic purpose like test pattern generation.
"""
# 127 bits is chosen because 127 is a mersenne prime, which makes the period of the
# LFSR maximized at 2**127 - 1 for any requested bitwidth
if seed is None:
import random
cryptogen = random.SystemRandom()
seed = cryptogen.randrange(1, 2**127) # seed itself if no seed signal is given
lfsr = pyrtl.Register(127 if bitwidth < 127 else bitwidth)
# leap ahead by shifting the LFSR bitwidth times
leap_ahead = lfsr
for i in range(bitwidth):
leap_ahead = pyrtl.concat(leap_ahead, leap_ahead[125] ^ leap_ahead[126])
with pyrtl.conditional_assignment:
with load:
lfsr.next |= seed
with req:
lfsr.next |= leap_ahead
return lfsr[:bitwidth]
def prng_xoroshiro128(bitwidth, load, req, seed=None):
""" Builds a PRNG using the Xoroshiro128+ algorithm in hardware.
:param bitwidth: the desired bitwidth of the random number
:param load: one bit signal to load the seed into the prng
:param req: one bit signal to request a random number
:param seed: 128 bits WireVector, defaults to None (self-seeding),
refrain from self-seeding if reseeding at run time is required
:return ready, rand: ready is a one bit signal showing the random number has been
produced, rand is a register containing the random number with the given bitwidth
An efficient noncryptographic PRNG, has much smaller area than Trivium.
But it does require a 64-bit adder to compute the output, so it is a bit slower.
Has a period of 2**128 - 1. Passes most statistical tests. Outputs a 64-bit random
word each cycle, takes multiple cycles if more than 64 bits are requested, and MSBs
of the random words are returned if the bitwidth is not a multiple of 64.
See also http://xoroshiro.di.unimi.it/
"""
from math import ceil, log
from pyrtl.rtllib import adders
from pyrtl.rtllib.libutils import _shifted_reg_next as shift # for readability
if seed is None:
import random
cryptogen = random.SystemRandom()
seed = cryptogen.randrange(1, 2**128) # seed itself if no seed signal is given
seed = pyrtl.as_wires(seed, 128)
s0, s1 = (pyrtl.Register(64) for i in range(2))
output = pyrtl.WireVector(64)
# update internal states by xoring, rotating, and shifting
_s1 = s0 ^ s1
s0_next = (shift(s0, 'l', 55) | shift(s0, 'r', 9)) ^ shift(_s1, 'l', 14) ^ _s1
s1_next = shift(_s1, 'l', 36) | shift(_s1, 'r', 28)
output <<= adders.kogge_stone(s0, s1)
gen_cycles = int(ceil(bitwidth / 64))
counter_bitwidth = int(ceil(log(gen_cycles, 2))) if gen_cycles > 1 else 1
rand = pyrtl.Register(gen_cycles * 64)
counter = pyrtl.Register(counter_bitwidth, 'counter')
gen_done = counter == gen_cycles - 1
state = pyrtl.Register(1)
WAIT, GEN = (pyrtl.Const(x) for x in range(2))
with pyrtl.conditional_assignment:
with load:
s0.next |= seed[:64]
s1.next |= seed[64:]
state.next |= WAIT
with req:
counter.next |= 0
s0.next |= s0_next
s1.next |= s1_next
rand.next |= pyrtl.concat(rand, output)
state.next |= GEN
with state == GEN:
with ~gen_done:
counter.next |= counter + 1
s0.next |= s0_next
s1.next |= s1_next
rand.next |= pyrtl.concat(rand, output)
ready = ~load & ~req & (state == GEN) & gen_done
return ready, rand[-bitwidth:] # return MSBs because LSBs are less random
def csprng_trivium(bitwidth, load, req, seed=None, bits_per_cycle=64):
""" Builds a cyptographically secure PRNG using the Trivium stream cipher.
:param bitwidth: the desired bitwidth of the random number
:param load: one bit signal to load the seed into the prng
:param req: one bit signal to request a random number
:param seed: 160 bits WireVector (80 bits key + 80 bits IV), defaults to None (self-seeding),
refrain from self-seeding if reseeding at run time is needed
:param bits_per_cycle: the number of output bits to generate in parallel each cycle,
up to 64 bits, must be a power of two: either 1, 2, 4, 8, 16, 32, or 64
:return ready, rand: ready is a one bit signal showing either the random number has
been produced or the seed has been initialized, rand is a register containing the
random number with the given bitwidth
This prng uses Trivium's key stream as its random bits output.
Both seed and key stream are MSB first (the earliest bit is stored at the MSB).
Trivium has a seed initialization stage that discards the first weak 1152 output bits
after each loading. Generation stage can take multiple cycles as well depending on the
given bitwidth and bits_per_cycle.
Has smaller gate area and faster speed than AES-CTR and any other stream cipher.
Passes all known statistical tests. Can be used to generate encryption keys or IVs.
Designed to securely generate up to 2**64 bits. If more than 2**64 bits is needed,
must reseed after each generation of 2**64 bits.
Trivium specifications:
http://www.ecrypt.eu.org/stream/ciphers/trivium/trivium.pdf
See also the eSTREAM portfolio page:
http://www.ecrypt.eu.org/stream/e2-trivium.html
"""
from math import ceil, log
if (64 // bits_per_cycle) * bits_per_cycle != 64:
raise pyrtl.PyrtlError('bits_per_cycle is invalid')
if seed is None:
import random
cryptogen = random.SystemRandom()
seed = cryptogen.randrange(2**160) # seed itself if no seed signal is given
seed = pyrtl.as_wires(seed, 160)
key = seed[80:]
iv = seed[:80]
a = pyrtl.Register(93)
b = pyrtl.Register(84)
c = pyrtl.Register(111)
feedback_a, feedback_b, feedback_c, output = ([] for i in range(4))
for i in range(bits_per_cycle):
t1 = a[65 - i] ^ a[92 - i]
t2 = b[68 - i] ^ b[83 - i]
t3 = c[65 - i] ^ c[110 - i]
feedback_a.append(t3 ^ c[108 - i] & c[109 - i] ^ a[68 - i])
feedback_b.append(t1 ^ a[90 - i] & a[91 - i] ^ b[77 - i])
feedback_c.append(t2 ^ b[81 - i] & b[82 - i] ^ c[86 - i])
output.append(t1 ^ t2 ^ t3)
# update internal states by shifting bits_per_cycle times
a_next = pyrtl.concat(a, *feedback_a)
b_next = pyrtl.concat(b, *feedback_b)
c_next = pyrtl.concat(c, *feedback_c)
init_cycles = 1152 // bits_per_cycle
gen_cycles = int(ceil(bitwidth / bits_per_cycle))
counter_bitwidth = int(ceil(log(max(init_cycles + 1, gen_cycles), 2)))
rand = pyrtl.Register(bitwidth)
counter = pyrtl.Register(counter_bitwidth, 'counter')
init_done = counter == init_cycles
gen_done = counter == gen_cycles - 1
state = pyrtl.Register(2)
WAIT, INIT, GEN = (pyrtl.Const(x) for x in range(3))
with pyrtl.conditional_assignment:
with load:
counter.next |= 0
a.next |= key
b.next |= iv
c.next |= pyrtl.concat(pyrtl.Const("3'b111"), pyrtl.Const(0, 108))
state.next |= INIT
with req:
counter.next |= 0
a.next |= a_next
b.next |= b_next
c.next |= c_next
rand.next |= pyrtl.concat(rand, *output)
state.next |= GEN
with state == INIT:
with ~init_done:
counter.next |= counter + 1
a.next |= a_next
b.next |= b_next
c.next |= c_next
with state == GEN:
with ~gen_done:
counter.next |= counter + 1
a.next |= a_next
b.next |= b_next
c.next |= c_next
rand.next |= pyrtl.concat(rand, *output)
ready = ~load & ~req & ((state == INIT) & init_done | (state == GEN) & gen_done)
return ready, rand
|
apps/batch/urls_api.py | crazypenguin/devops | 300 | 12710947 | <reponame>crazypenguin/devops
from django.urls import path
from . import views_api
app_name = "batch"
urlpatterns = [
path('get/hosts/', views_api.get_hosts, name='get_hosts'),
path('upload/', views_api.upload, name='upload'),
path('logs/', views_api.logs, name='logs'),
]
|
lib/symbioticpy/symbiotic/targets/ikos.py | paldebjit/symbiotic | 235 | 12710989 | # prepare for Python 3
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import subprocess
import sys
import os
import re
try:
import benchexec.util as util
import benchexec.result as result
from benchexec.tools.template import BaseTool
except ImportError:
# fall-back solution (at least for now)
import symbiotic.benchexec.util as util
import symbiotic.benchexec.result as result
from symbiotic.benchexec.tools.template import BaseTool
from . tool import SymbioticBaseTool
SOFTTIMELIMIT = 'timelimit'
class SymbioticTool(BaseTool, SymbioticBaseTool):
"""
Tool info for CPAchecker.
It has additional features such as building CPAchecker before running it
if executed within a source checkout.
It also supports extracting data from the statistics output of CPAchecker
for adding it to the result tables.
"""
def __init__(self, opts):
SymbioticBaseTool.__init__(self, opts)
def executable(self):
return util.find_executable('ikos')
def version(self, executable):
stdout = self._version_from_tool(executable, '--version')
line = next(l for l in stdout.splitlines() if l.startswith('ikos'))
line = line.replace('ikos' , '')
return line.strip()
def name(self):
return 'ikos'
def cmdline(self, executable, options, tasks, propertyfile=None, rlimits={}):
opts = ['-d=dbm']
if self._options.property.assertions():
opts.append('-a=prover')
elif self._options.property.memsafety():
opts.append('-a=boa')
opts.append('-a=nullity')
opts.append('-a=dfa')
elif self._options.property.signedoverflow():
opts.append('-a=sio')
return [executable] + options + opts + tasks
def determine_result(self, returncode, returnsignal, output, isTimeout):
# TODO: fixme for memsafety
for line in output:
if 'error: double free' in line:
return result.RESULT_FALSE_FREE
elif 'error: buffer overflow' in line:
return result.RESULT_FALSE_DEREF
elif 'error: assertion never holds' in line:
return result.RESULT_FALSE_REACH
elif 'The program is SAFE' in line:
return result.RESULT_TRUE_PROP
elif 'The program is potentially UNSAFE' in line:
return result.RESULT_UNKNOWN
return result.RESULT_ERROR
def llvm_version(self):
"""
Return required version of LLVM
"""
return '7.0.1'
|
migrations/versions/5d5340d8c969_.py | frostming/Flog | 202 | 12711007 | """empty message
Revision ID: 5d5340d8c969
Revises:
Create Date: 2021-06-17 11:12:46.834659
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "5d5340d8c969"
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"category",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("text", sa.String(length=50), nullable=True),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("text"),
)
op.create_table(
"integration",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("name", sa.String(length=50), nullable=True),
sa.Column("settings", sa.Text(), nullable=True),
sa.Column("enabled", sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("name"),
)
op.create_table(
"page",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("slug", sa.String(length=100), nullable=False),
sa.Column("title", sa.String(length=50), nullable=False),
sa.Column("display", sa.Boolean(), nullable=True),
sa.Column("ptype", sa.String(length=20), nullable=True),
sa.Column("content", sa.Text(), nullable=True),
sa.Column("html", sa.Text(), nullable=True),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("slug"),
sa.UniqueConstraint("title"),
)
op.create_table(
"tag",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("text", sa.String(length=50), nullable=True),
sa.Column("url", sa.String(length=100), nullable=True),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("text"),
)
op.create_table(
"user",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("username", sa.String(length=64), nullable=True),
sa.Column("name", sa.String(length=100), nullable=True),
sa.Column("email", sa.String(length=100), nullable=True),
sa.Column("password", sa.String(length=200), nullable=True),
sa.Column("settings", sa.Text(), nullable=True),
sa.Column("is_admin", sa.Boolean(), nullable=True),
sa.Column("link", sa.String(length=128), nullable=True),
sa.Column("picture", sa.String(length=512), nullable=True),
sa.Column("type", sa.String(length=16), nullable=True),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("username", "email", name="_username_email"),
)
op.create_table(
"o_auth2_token",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("name", sa.String(length=40), nullable=True),
sa.Column("token_type", sa.String(length=40), nullable=True),
sa.Column("access_token", sa.String(length=200), nullable=True),
sa.Column("refresh_token", sa.String(length=200), nullable=True),
sa.Column("expires_at", sa.Integer(), nullable=True),
sa.Column("user_id", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
["user_id"],
["user.id"],
),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"post",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("title", sa.String(length=200), nullable=False),
sa.Column("date", sa.DateTime(), nullable=True),
sa.Column("last_modified", sa.DateTime(), nullable=True),
sa.Column("image", sa.String(length=400), nullable=True),
sa.Column("image_caption", sa.String(length=400), nullable=True),
sa.Column("lang", sa.String(length=20), nullable=True),
sa.Column("content", sa.Text(), nullable=True),
sa.Column("html", sa.Text(), nullable=True),
sa.Column("toc", sa.Text(), nullable=True),
sa.Column("url", sa.String(length=80), nullable=True),
sa.Column("comment", sa.Boolean(), nullable=True),
sa.Column("description", sa.String(length=400), nullable=True),
sa.Column("author", sa.String(length=50), nullable=True),
sa.Column("slug", sa.String(length=100), nullable=True),
sa.Column("is_draft", sa.Boolean(), nullable=True),
sa.Column("category_id", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
["category_id"],
["category.id"],
),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"comment",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("post_id", sa.Integer(), nullable=True),
sa.Column("author_id", sa.Integer(), nullable=True),
sa.Column("floor", sa.Integer(), nullable=True),
sa.Column("content", sa.Text(), nullable=True),
sa.Column("html", sa.Text(), nullable=True),
sa.Column("create_at", sa.DateTime(), nullable=True),
sa.Column("parent_id", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
["author_id"],
["user.id"],
),
sa.ForeignKeyConstraint(
["parent_id"],
["comment.id"],
),
sa.ForeignKeyConstraint(
["post_id"],
["post.id"],
),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("post_id", "floor", name="_post_floor"),
)
op.create_table(
"tags",
sa.Column("tag_id", sa.Integer(), nullable=True),
sa.Column("post_id", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
["post_id"],
["post.id"],
),
sa.ForeignKeyConstraint(
["tag_id"],
["tag.id"],
),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("tags")
op.drop_table("comment")
op.drop_table("post")
op.drop_table("o_auth2_token")
op.drop_table("user")
op.drop_table("tag")
op.drop_table("page")
op.drop_table("integration")
op.drop_table("category")
# ### end Alembic commands ###
|
dm_alchemy/symbolic_alchemy_trackers.py | locross93/dm_alchemy | 182 | 12711010 | <gh_stars>100-1000
# Lint as: python3
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Trackers running on symbolic alchemy."""
import abc
import collections
import copy
import itertools
from typing import Any, Callable, Dict, Optional, TypeVar
from dm_alchemy import event_tracker
from dm_alchemy.ideal_observer import ideal_observer
from dm_alchemy.ideal_observer import precomputed_maps
from dm_alchemy.types import graphs
from dm_alchemy.types import stones_and_potions
from dm_alchemy.types import utils
import numpy as np
Graph = graphs.Graph
GameState = event_tracker.GameState
NO_OUTCOME = event_tracker.NO_OUTCOME
PerceivedStone = stones_and_potions.PerceivedStone
PerceivedPotion = stones_and_potions.PerceivedPotion
AlignedStoneIndex = stones_and_potions.AlignedStoneIndex
PerceivedPotionIndex = stones_and_potions.PerceivedPotionIndex
StoneMap = stones_and_potions.StoneMap
PotionMap = stones_and_potions.PotionMap
CAULDRON = stones_and_potions.CAULDRON
RewardWeights = stones_and_potions.RewardWeights
PrecomputedMaps = precomputed_maps.PrecomputedMaps
# For typing
symbolic_alchemy = Any
ActionInfo = collections.namedtuple(
'ActionInfo', 'original_action has_stone has_potion')
# Create a type which can refer to anything derived from SymbolicAlchemyTracker
BaseOrDerivedTracker = TypeVar(
'BaseOrDerivedTracker', bound='SymbolicAlchemyTracker')
class SequenceStatsTracker:
"""Tracks how a statistic changes throughout an episode."""
def __init__(
self, tracker: BaseOrDerivedTracker,
get_stat: Callable[[BaseOrDerivedTracker], Any],
default_stat: Any = 0):
self._get_stat = get_stat
self._tracker = tracker
self.stats = []
self.default_stat = default_stat
def track(self) -> None:
self.stats.append(self._get_stat(self._tracker))
def reset(self) -> None:
self.stats = []
class SymbolicAlchemyTracker:
"""Object which has functions called for each action in symbolic alchemy."""
@property
@abc.abstractmethod
def name(self) -> str:
pass
@property
def per_action_trackers(self) -> Dict[str, SequenceStatsTracker]:
return {}
@property
def per_trial_trackers(self) -> Dict[str, SequenceStatsTracker]:
return {}
def episode_start(self, unused_chemistry: utils.Chemistry) -> None:
del unused_chemistry
for tracker in itertools.chain(
self.per_trial_trackers.values(), self.per_action_trackers.values()):
tracker.reset()
def trial_start(self, unused_game_state: GameState) -> None:
del unused_game_state
for tracker in self.per_action_trackers.values():
tracker.track()
def action_and_outcome(
self, unused_action: utils.TypeBasedAction,
unused_outcome: Optional[PerceivedStone], unused_action_info: ActionInfo
) -> None:
del unused_action, unused_outcome, unused_action_info
for tracker in self.per_action_trackers.values():
tracker.track()
def trial_end(self) -> None:
for tracker in self.per_trial_trackers.values():
tracker.track()
def episode_returns(self) -> Any:
return {k: tuple(tracker.stats)
for k, tracker in itertools.chain(self.per_trial_trackers.items(),
self.per_action_trackers.items())}
def default_returns(
self, num_trials: int, num_actions_per_trial: int
) -> Any:
"""Returns some default values for the tracker."""
per_trial = zip(
self.per_trial_trackers.items(), itertools.repeat(num_trials))
num_actions = num_trials * (num_actions_per_trial + 1)
per_action = zip(
self.per_action_trackers.items(), itertools.repeat(num_actions))
return {k: tuple(tracker.default_stat for _ in range(expected_length))
for (k, tracker), expected_length in itertools.chain(
per_trial, per_action)}
StatTrackerOrDerived = TypeVar('StatTrackerOrDerived', bound='StatTracker')
GetStat = Callable[[StatTrackerOrDerived, utils.TypeBasedAction,
Optional[PerceivedStone], ActionInfo], Any]
Condition = Callable[[utils.TypeBasedAction, Optional[PerceivedStone],
ActionInfo], bool]
class StatTracker(SymbolicAlchemyTracker):
"""Tracks a statistic each time an action occurs."""
def __init__(self, get_stat: GetStat, init_step_stat: Any = 0):
self._get_stat = get_stat
self.cumul_action_occurred = copy.deepcopy(init_step_stat)
self.last_step_stat = copy.deepcopy(init_step_stat)
self._init_step_stat = init_step_stat
self.per_action_tracker = SequenceStatsTracker(
self, lambda tracker: tracker.last_step_stat,
copy.deepcopy(self._init_step_stat))
self.per_trial_tracker = SequenceStatsTracker(
self, lambda tracker: tracker.cumul_action_occurred,
copy.deepcopy(self._init_step_stat))
@property
def per_action_trackers(self) -> Dict[str, SequenceStatsTracker]:
return {'per_action': self.per_action_tracker}
@property
def per_trial_trackers(self) -> Dict[str, SequenceStatsTracker]:
return {'per_trial': self.per_trial_tracker}
def action_and_outcome(
self, action: utils.TypeBasedAction,
outcome: Optional[PerceivedStone], action_info: ActionInfo
) -> None:
self.last_step_stat = self._get_stat(self, action, outcome, action_info)
self.cumul_action_occurred += self.last_step_stat
super().action_and_outcome(action, outcome, action_info)
def trial_end(self) -> None:
super().trial_end()
self.cumul_action_occurred = copy.deepcopy(self._init_step_stat)
class SpecificActionTracker(StatTracker):
"""Counts number of actions which satisfy some condition."""
def __init__(self, condition: Condition):
def get_stat(
unused_tracker: StatTracker, action: utils.TypeBasedAction,
outcome: Optional[PerceivedStone], action_info: ActionInfo) -> int:
return 1 if condition(action, outcome, action_info) else 0
super().__init__(get_stat=get_stat)
class NoChangeActionTracker(SpecificActionTracker):
"""Counts number of actions which do not cause stone to change."""
NAME = 'no_change'
@property
def name(self) -> str:
return self.NAME
def __init__(self):
def condition(
action: utils.TypeBasedAction, outcome: Optional[PerceivedStone],
unused_action_info: ActionInfo) -> bool:
del unused_action_info
return (all(stone is not None
for stone in [outcome, action.perceived_stone]) and
action.perceived_stone == outcome)
super().__init__(condition=condition)
class NegStoneCashedTracker(SpecificActionTracker):
"""Counts number of times a negative stone is put in the cauldron."""
NAME = 'neg_stone'
@property
def name(self) -> str:
return self.NAME
def __init__(self):
def condition(
action: utils.TypeBasedAction, unused_outcome: Optional[PerceivedStone],
unused_action_info: ActionInfo
) -> bool:
del unused_outcome, unused_action_info
return (action.cauldron and action.perceived_stone is not None and
action.perceived_stone.reward < 0)
super().__init__(condition=condition)
class CashedStoneValueTracker(SymbolicAlchemyTracker):
"""Counts average value of cashed stone."""
NAME = 'cashed_stone_value'
@property
def name(self) -> str:
return self.NAME
def __init__(
self, reward_weights: RewardWeights, stone_map: StoneMap,
rotation: np.ndarray):
self._stone_map = stone_map
self._rotation = rotation
self.average_stone_value = 0.0
self._num_stones_cashed = 0
self._reward_weights = reward_weights
self.per_trial_tracker = SequenceStatsTracker(
self, lambda tracker: tracker.average_stone_value, 0.0)
@property
def per_trial_trackers(self) -> Dict[str, SequenceStatsTracker]:
return {'per_trial': self.per_trial_tracker}
def action_and_outcome(
self, action: utils.TypeBasedAction, outcome: Optional[PerceivedStone],
action_info: ActionInfo
) -> None:
if action.cauldron and action.using_stone:
aligned_stone = stones_and_potions.align(
action.perceived_stone, self._rotation)
latent_stone = self._stone_map.apply(aligned_stone)
self.average_stone_value += self._reward_weights(
latent_stone.latent_coords)
self._num_stones_cashed += 1
super().action_and_outcome(action, outcome, action_info)
def trial_end(self) -> None:
if self._num_stones_cashed > 0:
self.average_stone_value /= self._num_stones_cashed
super().trial_end()
self.average_stone_value = 0.0
self._num_stones_cashed = 0
class ChangeGoldstoneTracker(SpecificActionTracker):
"""Counts number of times a goldstone is changed to something else."""
NAME = 'gold_changed'
@property
def name(self) -> str:
return self.NAME
def __init__(self, threshold: int = 2):
def condition(
action: utils.TypeBasedAction, outcome: Optional[PerceivedStone],
unused_action_info: ActionInfo) -> bool:
del unused_action_info
if not action.using_stone or not action.using_potion:
return False
stone_reward = (action.perceived_stone.reward
if action.perceived_stone else 0)
return outcome is not None and stone_reward > threshold > outcome.reward
super().__init__(condition=condition)
def pos_stone_not_cashed_tracker_name(
lb: int = 0, ub: Optional[int] = None
) -> str:
if lb == 0 and ub is None:
return 'pos_stone_not_cashed'
elif ub is None:
return 'stone_above_' + str(lb) + '_not_cashed'
return 'stone_between_' + str(lb) + '_and_' + str(ub) + '_not_cashed'
class PosStoneNotCashedTracker(SymbolicAlchemyTracker):
"""Counts number of times a stone with specified reward is not cashed."""
def __init__(
self, reward_weights: RewardWeights, lb: int = 0,
ub: Optional[int] = None):
self.pos_stones_at_end = 0
self._condition = lambda r: lb < r < ub if ub is not None else lb < r
self._game_state = None
self._reward_weights = reward_weights
self.lb = lb
self.ub = ub
self.per_trial_tracker = SequenceStatsTracker(
self, lambda tracker: tracker.pos_stones_at_end)
@property
def per_trial_trackers(self) -> Dict[str, SequenceStatsTracker]:
return {'per_trial': self.per_trial_tracker}
@property
def name(self) -> str:
return pos_stone_not_cashed_tracker_name(self.lb, self.ub)
def trial_start(self, game_state: GameState) -> None:
self._game_state = game_state
super().trial_start(game_state)
def trial_end(self) -> None:
self.pos_stones_at_end = len(
[s for s in self._game_state.existing_stones()
if self._condition(self._reward_weights(s.latent))])
super().trial_end()
class StoneImprovementTracker(SymbolicAlchemyTracker):
"""Counts number of times a goldstone is changed to something else."""
# pylint: disable=protected-access
# TODO(b/173784755): avoid protected access by using event tracker to tracker
# latest slot based action.
NAME = 'stone_improvement'
@property
def name(self) -> str:
return self.NAME
def __init__(
self, reward_weights: RewardWeights, stone_map: StoneMap,
rotation: np.ndarray):
self._stone_map = stone_map
self._rotation = rotation
self.average_stone_improvement = 0.0
self._reward_weights = reward_weights
self._game_state = None
self._start_rewards = {}
self._end_rewards = {}
self._prev_existing_stones = set()
self.per_trial_tracker = SequenceStatsTracker(
self, lambda tracker: tracker.average_stone_improvement, 0.0)
@property
def per_trial_trackers(self) -> Dict[str, SequenceStatsTracker]:
return {'per_trial': self.per_trial_tracker}
def action_and_outcome(
self, action: utils.TypeBasedAction, outcome: Optional[PerceivedStone],
action_info: ActionInfo
) -> None:
if action.cauldron:
# We can't get the stone ind as it has already been removed from the game
# state, so instead just see what stone ind is missing.
missing_stones = self._prev_existing_stones.difference(
self._game_state._existing_stones)
assert len(missing_stones) == 1, (
'Should be 1 missing stone when stone is used.')
aligned_stone = stones_and_potions.align(
action.perceived_stone, self._rotation)
latent_stone = self._stone_map.apply(aligned_stone)
for ind in missing_stones:
self._end_rewards[ind] = self._reward_weights(
latent_stone.latent_coords)
self._prev_existing_stones = copy.deepcopy(
self._game_state._existing_stones)
super().action_and_outcome(action, outcome, action_info)
def trial_start(self, game_state: GameState) -> None:
self._game_state = game_state
self._prev_existing_stones = copy.deepcopy(
self._game_state._existing_stones)
self._start_rewards = {
i: self._reward_weights(self._game_state.get_stone(i).latent)
for i in self._prev_existing_stones}
super().trial_start(game_state)
def trial_end(self) -> None:
stone_improvements = [reward - self._start_rewards[idx]
for idx, reward in self._end_rewards.items()]
self.average_stone_improvement = (
0.0 if not stone_improvements else np.mean(stone_improvements))
super().trial_end()
self.average_stone_improvement = 0.0
self._start_rewards = {}
self._end_rewards = {}
# pylint: enable=protected-access
class AddMatrixEventTracker(SymbolicAlchemyTracker):
"""Adds a matrix event tracker per trial and add these to episode returns."""
NAME = 'matrix_event'
@property
def name(self) -> str:
return self.NAME
def __init__(self):
self._event_trackers = None
self.game_state = None
self.per_trial_tracker = SequenceStatsTracker(
self, lambda tracker: tracker.game_state.trackers[self.name],
event_tracker.MatrixEventTracker(1, 1))
@property
def per_trial_trackers(self) -> Dict[str, SequenceStatsTracker]:
return {'event_tracker': self.per_trial_tracker}
def trial_start(self, game_state: GameState) -> None:
matrix_event_tracker = event_tracker.MatrixEventTracker(
game_state.num_stones, game_state.num_potions)
self.game_state = game_state
game_state.add_event_trackers([matrix_event_tracker])
super().trial_start(game_state)
class ItemGeneratedTracker(SymbolicAlchemyTracker):
"""Tracks the items generated during the episode."""
NAME = 'items_generated'
@property
def name(self) -> str:
return self.NAME
def __init__(self):
self.trials = None
self.per_trial_tracker = SequenceStatsTracker(
self, lambda tracker: tracker.trials,
utils.TrialItems(stones=[], potions=[]))
@property
def per_trial_trackers(self) -> Dict[str, SequenceStatsTracker]:
return {'trials': self.per_trial_tracker}
def trial_start(self, game_state: GameState) -> None:
self.trials = copy.deepcopy(game_state.existing_items())
super().trial_start(game_state)
def episode_returns(self) -> Any:
items = utils.EpisodeItems([], [])
items.trials = super().episode_returns()['trials']
return items
class ScoreTracker(StatTracker):
"""Adds a reward tracker and return reward per trial."""
NAME = 'score'
@property
def name(self) -> str:
return self.NAME
def __init__(self, reward_weights: RewardWeights):
self._reward_weights = reward_weights
self.prev_reward = 0
self.game_state = None
def latest_reward(tracker, *unused_args, **unused_kwargs):
del unused_args, unused_kwargs
cumul_reward = tracker.game_state.trackers['reward'].reward
reward = cumul_reward - tracker.prev_reward
tracker.prev_reward = cumul_reward
return reward
super().__init__(get_stat=latest_reward)
def trial_start(self, game_state: GameState) -> None:
reward_tracker = event_tracker.RewardTracker(self._reward_weights)
self.game_state = game_state
game_state.add_event_trackers([reward_tracker])
self.prev_reward = 0
super().trial_start(game_state)
class ItemsUsedTracker(StatTracker):
"""Tracks what stones and potions are used."""
NAME = 'items_used'
@property
def name(self) -> str:
return self.NAME
def __init__(self):
self.prev_items = np.zeros((2,), dtype=np.int)
self.game_state: Optional[GameState] = None
def latest_items_used(
tracker: 'ItemsUsedTracker', unused_action: utils.TypeBasedAction,
unused_outcome: Optional[PerceivedStone], unused_action_info: ActionInfo
) -> np.ndarray:
del unused_action, unused_outcome, unused_action_info
items_used = tracker.game_state.trackers['items_used']
cumul_items_used = np.array(
[items_used.num_potions_used, items_used.num_stones_used],
dtype=np.int)
items_used = cumul_items_used - tracker.prev_items
tracker.prev_items = cumul_items_used
return items_used
super().__init__(get_stat=latest_items_used,
init_step_stat=np.zeros((2,), dtype=np.int))
def trial_start(self, game_state: GameState) -> None:
self.game_state = game_state
game_state.add_event_trackers([event_tracker.ItemsUsedTracker()])
self.prev_items = np.zeros((2,), dtype=np.int)
super().trial_start(game_state)
TrialExtraInfo = collections.namedtuple(
'TrialExtraInfo',
'num_world_states num_potion_maps num_stone_maps num_graphs')
class BeliefStateTracker(SymbolicAlchemyTracker):
"""Adds a belief state which is updated to a symbolic alchemy bot."""
NAME = 'belief_state'
@property
def name(self) -> str:
return self.NAME
def __init__(
self, precomputed: PrecomputedMaps,
env: 'symbolic_alchemy.SymbolicAlchemy',
init_belief_state=None):
self.precomputed = precomputed
self.belief_state = None
self._init_belief_state = (
init_belief_state or ideal_observer.BeliefStateWithRotation(
self.precomputed))
self._extra_info = None
self._world_states_per_action = None
self._env = env
self.extra_info_per_action_tracker = SequenceStatsTracker(
self, lambda tracker: tracker.extra_info,
TrialExtraInfo(
num_world_states=0, num_stone_maps=0, num_potion_maps=0,
num_graphs=0))
self.extra_info_per_trial_tracker = SequenceStatsTracker(
self, lambda tracker: tracker.extra_info,
TrialExtraInfo(
num_world_states=0, num_stone_maps=0, num_potion_maps=0,
num_graphs=0))
@property
def per_action_trackers(self) -> Dict[str, SequenceStatsTracker]:
return {'per_action_extra_info': self.extra_info_per_action_tracker}
@property
def per_trial_trackers(self) -> Dict[str, SequenceStatsTracker]:
return {'extra_info': self.extra_info_per_trial_tracker}
def episode_start(self, unused_chemistry: utils.Chemistry):
self.belief_state = copy.deepcopy(self._init_belief_state)
super().episode_start(unused_chemistry)
def trial_start(self, game_state: GameState) -> None:
current_stones = collections.Counter(self._env.perceived_stones())
current_potions = collections.Counter(self._env.perceived_potions())
self.belief_state.new_trial(current_stones, current_potions)
super().trial_start(game_state)
def action_and_outcome(
self, action: utils.TypeBasedAction, outcome: Optional[PerceivedStone],
action_info: ActionInfo
) -> None:
# A stone value of -1 indicates that the action was invalid
if not action.using_stone:
super().action_and_outcome(action, outcome, action_info)
return
if action.perceived_stone is None:
raise ValueError('Action says using stone but perceived stone is None.')
# An outcome of -1 means the stone did not change.
current_outcome = outcome or action.perceived_stone
assert current_outcome is not None
if action.using_potion:
self.belief_state.action_and_outcome(
action.perceived_stone, action.perceived_potion, current_outcome,
self.precomputed)
super().action_and_outcome(action, outcome, action_info)
@property
def extra_info(self) -> TrialExtraInfo:
return TrialExtraInfo(
num_world_states=self.belief_state.num_world_states,
num_potion_maps=self.belief_state.num_potion_maps,
num_stone_maps=self.belief_state.num_stone_maps,
num_graphs=self.belief_state.num_graphs)
def get_partial_potion_map(
self, index_to_perm_index: np.ndarray
) -> stones_and_potions.PartialPotionMap:
return self.belief_state.partial_potion_map(index_to_perm_index)
def get_partial_stone_map(self) -> stones_and_potions.PartialStoneMap:
return self.belief_state.partial_stone_map()
def get_partial_graph(
self, possible_partial_graph_indices: np.ndarray
) -> graphs.PartialGraph:
return self.belief_state.partial_graph(possible_partial_graph_indices)
|
2019/glen.py | nyanthanya/Contoh-Program | 105 | 12711021 | def glen(generator):
"""
len implementation for generators.
"""
return sum(1 for _ in generator) |
bonobo/_version.py | winsmith/bonobo | 243 | 12711089 | __version__ = '0.5.2'
|
src/genie/libs/parser/iosxe/tests/ShowPlatformSoftwareMemorySwitchActiveAllocTypeBrief/cli/equal/golden_output_expected.py | balmasea/genieparser | 204 | 12711133 | expected_output = {
'type': {
'BYTE': {
'allocated': 7045122,
'allocations': 737743,
'frees': 734750,
'requested': 6877514,
},
'BYTE*': {
'allocated': 29128,
'allocations': 345,
'frees': 309,
'requested': 27112,
},
'PArray': {
'allocated': 0,
'allocations': 180,
'frees': 180,
'requested': 0,
},
'Summary': {
'allocated': 7969955,
'allocations': 762405,
'frees': 759097,
'requested': 7784707,
},
'_btrace_ctx_global_': {
'allocated': 7864,
'allocations': 26,
'frees': 7,
'requested': 6800,
},
'_btrace_module_*': {
'allocated': 4389,
'allocations': 66,
'frees': 0,
'requested': 693,
},
'_dns_resolver_ctxt': {
'allocated': 128,
'allocations': 1,
'frees': 0,
'requested': 72,
},
'bipc_channel_': {
'allocated': 136128,
'allocations': 412,
'frees': 404,
'requested': 135680,
},
'bipc_rx_stream_': {
'allocated': 459328,
'allocations': 412,
'frees': 404,
'requested': 458880,
},
'brand_context_s': {
'allocated': 0,
'allocations': 9,
'frees': 9,
'requested': 0,
},
'chasfs_ctx_int_': {
'allocated': 12576,
'allocations': 6,
'frees': 3,
'requested': 12408,
},
'confd_cs_node**': {
'allocated': 0,
'allocations': 84,
'frees': 84,
'requested': 0,
},
'confd_event_node': {
'allocated': 0,
'allocations': 246,
'frees': 246,
'requested': 0,
},
'confd_hkeypath': {
'allocated': 0,
'allocations': 129,
'frees': 129,
'requested': 0,
},
'evContext_p': {
'allocated': 12640,
'allocations': 1,
'frees': 0,
'requested': 12584,
},
'file_alloc_handle_s': {
'allocated': 1120,
'allocations': 14,
'frees': 0,
'requested': 336,
},
'file_info': {
'allocated': 71536,
'allocations': 34,
'frees': 0,
'requested': 69632,
},
'filter_key_s': {
'allocated': 0,
'allocations': 3,
'frees': 3,
'requested': 0,
},
'green_args_s': {
'allocated': 0,
'allocations': 284,
'frees': 284,
'requested': 0,
},
'green_assist_be_defer_': {
'allocated': 0,
'allocations': 3,
'frees': 3,
'requested': 0,
},
'green_subscribe_tblcur': {
'allocated': 0,
'allocations': 3,
'frees': 3,
'requested': 0,
},
'green_subscribe_toc_tb': {
'allocated': 104,
'allocations': 1,
'frees': 0,
'requested': 48,
},
'hash_table_s': {
'allocated': 1664,
'allocations': 16,
'frees': 0,
'requested': 768,
},
'hashtable': {
'allocated': 96,
'allocations': 1,
'frees': 0,
'requested': 40,
},
'int32': {
'allocated': 0,
'allocations': 1,
'frees': 1,
'requested': 0,
},
'lru_id_mgr_handle_': {
'allocated': 372,
'allocations': 1,
'frees': 0,
'requested': 316,
},
'mdt_obj_mgr_t': {
'allocated': 88,
'allocations': 1,
'frees': 0,
'requested': 32,
},
'mdtpub_sensor_periodic': {
'allocated': 0,
'allocations': 26,
'frees': 26,
'requested': 0,
},
'mqipc_ctl_': {
'allocated': 2480,
'allocations': 79,
'frees': 69,
'requested': 1920,
},
'netconf_write_buffer_s': {
'allocated': 0,
'allocations': 10402,
'frees': 10402,
'requested': 0,
},
's_mdt_dc_filters_list': {
'allocated': 0,
'allocations': 29,
'frees': 29,
'requested': 0,
},
's_mdt_filter_dc_choice': {
'allocated': 0,
'allocations': 29,
'frees': 29,
'requested': 0,
},
's_yp_sensor_oc': {
'allocated': 0,
'allocations': 3,
'frees': 3,
'requested': 0,
},
'section_data_s': {
'allocated': 0,
'allocations': 3,
'frees': 3,
'requested': 0,
},
'sensor_data_collection': {
'allocated': 0,
'allocations': 10399,
'frees': 10399,
'requested': 0,
},
'service_dir_connect_ac': {
'allocated': 0,
'allocations': 28,
'frees': 28,
'requested': 0,
},
'tc_work_queue_s': {
'allocated': 0,
'allocations': 5,
'frees': 5,
'requested': 0,
},
'tdl_epoch_s': {
'allocated': 152,
'allocations': 1,
'frees': 0,
'requested': 96,
},
'tdldb_info_': {
'allocated': 3136,
'allocations': 14,
'frees': 0,
'requested': 2352,
},
'tdldb_plat_data_s*': {
'allocated': 2432,
'allocations': 16,
'frees': 0,
'requested': 1536,
},
'tdlhandle_s': {
'allocated': 53584,
'allocations': 1290,
'frees': 1256,
'requested': 51680,
},
'tdlhandle_s*': {
'allocated': 0,
'allocations': 29,
'frees': 29,
'requested': 0,
},
'vista_context_': {
'allocated': 125888,
'allocations': 30,
'frees': 0,
'requested': 124208,
},
},
}
|
src/main/python/algorithms/sorting/RadixSort.py | pratikadarsh/Algorithms | 558 | 12711136 | '''
* @file RadixSort.py
* @author (original JAVA) EAlexa and <NAME>, <EMAIL>
* (conversion to Python) <NAME>, <EMAIL>
* @date 29 Jun 2020
* @version 0.1
* @brief Radix sort implementation
* See https://en.wikipedia.org/wiki/Radix_sort for details on runtime and complexity Radix sorts
* operates in O(nw) time, where n is the number of keys, and w is the key length where w is
* constant on primitive types like Integer which gives it a better performance than other
* compare-based sort algorithms, like i.e. QuickSort
'''
import math
class RadixSort():
"""
Mergesort implements InplaceSort for ease of testings, but in reality
it is not really a good fit for an inplace sorting algorithm.
"""
def __init__(self):
pass
def sort(self, values):
if values == None:
return
return self.radixSort(values)
def getMax(self, array):
maxNum = array[0]
for i in range(0, len(array)):
if array[i] > maxNum:
maxNum = array[i]
return maxNum
def calculateNumberOfDigits(self, number):
return int(math.log(number, 10) + 1)
def radixSort(self, numbers):
"""
Requires all numbers to be greater than or equal to 1
"""
if numbers == None or len(numbers) <= 1:
return
maximum = self.getMax(numbers)
numberOfDigits = self.calculateNumberOfDigits(maximum)
placeValue = 1
while numberOfDigits > 0:
numberOfDigits -= 1
numbers = self.countSort(numbers, placeValue)
placeValue *= 10
return numbers
def countSort(self, numbers, placeValue):
rangeParm = 10
frequency = [0]*rangeParm
sortedValues = [None]*len(numbers)
for i in range(0, len(numbers)):
digit = (numbers[i] // placeValue) % rangeParm
frequency[digit] += 1
for i in range(1, rangeParm):
frequency[i] += frequency[i - 1]
for i in range(len(numbers) - 1, -1, -1):
digit = (numbers[i] // placeValue) % rangeParm
sortedValues[frequency[digit] - 1] = numbers[i]
frequency[digit] -= 1
return sortedValues[:len(numbers)]
if __name__ == '__main__':
"""
Example usage
"""
sorter = RadixSort()
numbers = [387, 468, 134, 123, 68, 221, 769, 37, 7, 890, 1, 587]
numbers = sorter.sort(numbers)
# Prints:
# [1, 7, 37, 68, 123, 134, 221, 387, 468, 587, 769, 890]
print(numbers)
|
bindings/python/cntk/misc/converter.py | shyamalschandra/CNTK | 17,702 | 12711143 | <filename>bindings/python/cntk/misc/converter.py
# ==============================================================================
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
import cntk as C
def convert(root_func, filter, converter):
'''
Clones the graph underlying root_func and in the clone substitutes
all Functions obtained by applying 'filter', with a new Function obtained by calling the specified 'converter'
Args:
root_func: a root function of a graph to be cloned and converted
filter: a lambda for filtering out the Functions to be converted
converter: a lambda for obtaining the substitute for each of the Functions to be converted
Returns:
Cloned and converted Function (graph)
'''
# recursively convert for blocks in root_func
blocks = C.logging.graph.depth_first_search(root_func, lambda x : type(x) == C.Function and x.root_function.is_block, depth = 0)
for i in range(len(blocks)):
# search for blocks again in case block input/output has been modified
blocks1 = C.logging.graph.depth_first_search(root_func, lambda x : type(x) == C.Function and x.root_function.is_block, depth = 0)
block = blocks1[i] # assuming depth_first_search order to be stable, so use the old index on new search results
block_root = C.as_composite(block.block_root)
new_block_root = convert(block_root, filter, converter)
if new_block_root != block_root:
block_arguments_mapping = dict(block.block_arguments_mapping)
new_block_arguments_mapping = []
for arg, new_arg in zip(block_root.arguments, new_block_root.arguments):
new_block_arguments_mapping += [(new_arg, block_arguments_mapping[arg])]
new_block = C.as_block(new_block_root, new_block_arguments_mapping, block.op_name, block.name)
if all([x not in root_func.outputs for x in block.outputs]) or all([x in block.outputs for x in root_func.outputs]):
root_func = root_func.clone(C.CloneMethod.share, dict(zip(block.outputs, new_block.outputs)))
else:
new_outputs = [new_block.outputs[block.outputs.index(x)] if x in block.outputs else None for x in root_func.outputs]
root_func_nonreplaced = C.combine([x for x in root_func.outputs if x not in block.outputs])
root_func_nonreplaced_clone = root_func_nonreplaced.clone(C.CloneMethod.share, dict(zip(block.outputs, new_block.outputs)))
idx = 0
for nonreplaced_output in root_func_nonreplaced_clone.outputs:
while new_outputs[idx]:
idx += 1
new_outputs[idx] = nonreplaced_output
root_func = C.combine(new_outputs)
# replace all Function instances under root_func that pass the specified 'filter'
functions_to_convert = C.logging.graph.depth_first_search(root_func, filter, depth = 0)
for i in range(len(functions_to_convert)):
# The graph could be modified already by this function, so we need to rescan to the new set.
functions_to_convert1 = C.logging.graph.depth_first_search(root_func, filter, depth = 0)
# We are using a filter passed in by the caller. So once a function is converted, we may not
# get the same number of functions again, so we need to use correct index depending on the new size.
index = 0
if len(functions_to_convert) > len(functions_to_convert1):
assert(len(functions_to_convert) - len(functions_to_convert1) == i) # Only one conversion at a time.
# index = 0 will work for this case, we are picking the first function from the new list.
elif len(functions_to_convert) == len(functions_to_convert1):
index = i # here we pick the current index of the for loop.
else:
raise RuntimeError("The conversion adds another possible conversion(s). Stopping infinite conversions.")
function_to_convert = functions_to_convert1[index]
converted = converter(function_to_convert)
if not function_to_convert.output in root_func.outputs:
root_func = root_func.clone(C.CloneMethod.share, {function_to_convert.output : converted.output})
else:
# if cudnn_rnn output is the root_func output, just use converted as root_func and no clone needed
if len(root_func.outputs) > 1:
root_func = C.combine([converted if x == function_to_convert.output else x for x in root_func.outputs])
else:
root_func = converted
return root_func
|
tests/ml/test_utils.py | sethvargo/vaex | 337 | 12711151 | <gh_stars>100-1000
import os
import pytest
# Creating a custom mark decorator for units that test belong the incubator.
skip_incubator = pytest.mark.skipif('RUN_INCUBATOR_TESTS' not in os.environ,
reason="Add environment variable RUN_INCUBATOR_TESTS to run this test since \
modules and libraries in the incubator may change abruplty without notice.")
|
test/hlt/pytest/python/com/huawei/iotplatform/client/dto/QueryDeviceGroupsInDTO.py | yuanyi-thu/AIOT- | 128 | 12711171 | <filename>test/hlt/pytest/python/com/huawei/iotplatform/client/dto/QueryDeviceGroupsInDTO.py
class QueryDeviceGroupsInDTO(object):
def __init__(self):
self.accessAppId = None
self.pageNo = None
self.pageSize = None
self.name = None
def getAccessAppId(self):
return self.accessAppId
def setAccessAppId(self, accessAppId):
self.accessAppId = accessAppId
def getPageNo(self):
return self.pageNo
def setPageNo(self, pageNo):
self.pageNo = pageNo
def getPageSize(self):
return self.pageSize
def setPageSize(self, pageSize):
self.pageSize = pageSize
def getName(self):
return self.name
def setName(self, name):
self.name = name
|
sdk/python/pulumi_gcp/identityplatform/inbound_saml_config.py | sisisin/pulumi-gcp | 121 | 12711172 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['InboundSamlConfigArgs', 'InboundSamlConfig']
@pulumi.input_type
class InboundSamlConfigArgs:
def __init__(__self__, *,
display_name: pulumi.Input[str],
idp_config: pulumi.Input['InboundSamlConfigIdpConfigArgs'],
sp_config: pulumi.Input['InboundSamlConfigSpConfigArgs'],
enabled: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a InboundSamlConfig resource.
:param pulumi.Input[str] display_name: Human friendly display name.
:param pulumi.Input['InboundSamlConfigIdpConfigArgs'] idp_config: SAML IdP configuration when the project acts as the relying party
Structure is documented below.
:param pulumi.Input['InboundSamlConfigSpConfigArgs'] sp_config: SAML SP (Service Provider) configuration when the project acts as the relying party to receive
and accept an authentication assertion issued by a SAML identity provider.
Structure is documented below.
:param pulumi.Input[bool] enabled: If this config allows users to sign in with the provider.
:param pulumi.Input[str] name: The name of the InboundSamlConfig resource. Must start with 'saml.' and can only have alphanumeric characters,
hyphens, underscores or periods. The part after 'saml.' must also start with a lowercase letter, end with an
alphanumeric character, and have at least 2 characters.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
pulumi.set(__self__, "display_name", display_name)
pulumi.set(__self__, "idp_config", idp_config)
pulumi.set(__self__, "sp_config", sp_config)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if name is not None:
pulumi.set(__self__, "name", name)
if project is not None:
pulumi.set(__self__, "project", project)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Input[str]:
"""
Human friendly display name.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: pulumi.Input[str]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter(name="idpConfig")
def idp_config(self) -> pulumi.Input['InboundSamlConfigIdpConfigArgs']:
"""
SAML IdP configuration when the project acts as the relying party
Structure is documented below.
"""
return pulumi.get(self, "idp_config")
@idp_config.setter
def idp_config(self, value: pulumi.Input['InboundSamlConfigIdpConfigArgs']):
pulumi.set(self, "idp_config", value)
@property
@pulumi.getter(name="spConfig")
def sp_config(self) -> pulumi.Input['InboundSamlConfigSpConfigArgs']:
"""
SAML SP (Service Provider) configuration when the project acts as the relying party to receive
and accept an authentication assertion issued by a SAML identity provider.
Structure is documented below.
"""
return pulumi.get(self, "sp_config")
@sp_config.setter
def sp_config(self, value: pulumi.Input['InboundSamlConfigSpConfigArgs']):
pulumi.set(self, "sp_config", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
If this config allows users to sign in with the provider.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the InboundSamlConfig resource. Must start with 'saml.' and can only have alphanumeric characters,
hyphens, underscores or periods. The part after 'saml.' must also start with a lowercase letter, end with an
alphanumeric character, and have at least 2 characters.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@pulumi.input_type
class _InboundSamlConfigState:
def __init__(__self__, *,
display_name: Optional[pulumi.Input[str]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
idp_config: Optional[pulumi.Input['InboundSamlConfigIdpConfigArgs']] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
sp_config: Optional[pulumi.Input['InboundSamlConfigSpConfigArgs']] = None):
"""
Input properties used for looking up and filtering InboundSamlConfig resources.
:param pulumi.Input[str] display_name: Human friendly display name.
:param pulumi.Input[bool] enabled: If this config allows users to sign in with the provider.
:param pulumi.Input['InboundSamlConfigIdpConfigArgs'] idp_config: SAML IdP configuration when the project acts as the relying party
Structure is documented below.
:param pulumi.Input[str] name: The name of the InboundSamlConfig resource. Must start with 'saml.' and can only have alphanumeric characters,
hyphens, underscores or periods. The part after 'saml.' must also start with a lowercase letter, end with an
alphanumeric character, and have at least 2 characters.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input['InboundSamlConfigSpConfigArgs'] sp_config: SAML SP (Service Provider) configuration when the project acts as the relying party to receive
and accept an authentication assertion issued by a SAML identity provider.
Structure is documented below.
"""
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if idp_config is not None:
pulumi.set(__self__, "idp_config", idp_config)
if name is not None:
pulumi.set(__self__, "name", name)
if project is not None:
pulumi.set(__self__, "project", project)
if sp_config is not None:
pulumi.set(__self__, "sp_config", sp_config)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
Human friendly display name.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
If this config allows users to sign in with the provider.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="idpConfig")
def idp_config(self) -> Optional[pulumi.Input['InboundSamlConfigIdpConfigArgs']]:
"""
SAML IdP configuration when the project acts as the relying party
Structure is documented below.
"""
return pulumi.get(self, "idp_config")
@idp_config.setter
def idp_config(self, value: Optional[pulumi.Input['InboundSamlConfigIdpConfigArgs']]):
pulumi.set(self, "idp_config", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the InboundSamlConfig resource. Must start with 'saml.' and can only have alphanumeric characters,
hyphens, underscores or periods. The part after 'saml.' must also start with a lowercase letter, end with an
alphanumeric character, and have at least 2 characters.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="spConfig")
def sp_config(self) -> Optional[pulumi.Input['InboundSamlConfigSpConfigArgs']]:
"""
SAML SP (Service Provider) configuration when the project acts as the relying party to receive
and accept an authentication assertion issued by a SAML identity provider.
Structure is documented below.
"""
return pulumi.get(self, "sp_config")
@sp_config.setter
def sp_config(self, value: Optional[pulumi.Input['InboundSamlConfigSpConfigArgs']]):
pulumi.set(self, "sp_config", value)
class InboundSamlConfig(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
display_name: Optional[pulumi.Input[str]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
idp_config: Optional[pulumi.Input[pulumi.InputType['InboundSamlConfigIdpConfigArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
sp_config: Optional[pulumi.Input[pulumi.InputType['InboundSamlConfigSpConfigArgs']]] = None,
__props__=None):
"""
Inbound SAML configuration for a Identity Toolkit project.
You must enable the
[Google Identity Platform](https://console.cloud.google.com/marketplace/details/google-cloud-platform/customer-identity) in
the marketplace prior to using this resource.
## Example Usage
### Identity Platform Inbound Saml Config Basic
```python
import pulumi
import pulumi_gcp as gcp
saml_config = gcp.identityplatform.InboundSamlConfig("samlConfig",
display_name="<NAME>",
idp_config=gcp.identityplatform.InboundSamlConfigIdpConfigArgs(
idp_entity_id="tf-idp",
sign_request=True,
sso_url="https://example.com",
idp_certificates=[gcp.identityplatform.InboundSamlConfigIdpConfigIdpCertificateArgs(
x509_certificate=(lambda path: open(path).read())("test-fixtures/rsa_cert.pem"),
)],
),
sp_config=gcp.identityplatform.InboundSamlConfigSpConfigArgs(
sp_entity_id="tf-sp",
callback_uri="https://example.com",
))
```
## Import
InboundSamlConfig can be imported using any of these accepted formats
```sh
$ pulumi import gcp:identityplatform/inboundSamlConfig:InboundSamlConfig default projects/{{project}}/inboundSamlConfigs/{{name}}
```
```sh
$ pulumi import gcp:identityplatform/inboundSamlConfig:InboundSamlConfig default {{project}}/{{name}}
```
```sh
$ pulumi import gcp:identityplatform/inboundSamlConfig:InboundSamlConfig default {{name}}
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] display_name: Human friendly display name.
:param pulumi.Input[bool] enabled: If this config allows users to sign in with the provider.
:param pulumi.Input[pulumi.InputType['InboundSamlConfigIdpConfigArgs']] idp_config: SAML IdP configuration when the project acts as the relying party
Structure is documented below.
:param pulumi.Input[str] name: The name of the InboundSamlConfig resource. Must start with 'saml.' and can only have alphanumeric characters,
hyphens, underscores or periods. The part after 'saml.' must also start with a lowercase letter, end with an
alphanumeric character, and have at least 2 characters.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[pulumi.InputType['InboundSamlConfigSpConfigArgs']] sp_config: SAML SP (Service Provider) configuration when the project acts as the relying party to receive
and accept an authentication assertion issued by a SAML identity provider.
Structure is documented below.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: InboundSamlConfigArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Inbound SAML configuration for a Identity Toolkit project.
You must enable the
[Google Identity Platform](https://console.cloud.google.com/marketplace/details/google-cloud-platform/customer-identity) in
the marketplace prior to using this resource.
## Example Usage
### Identity Platform Inbound Saml Config Basic
```python
import pulumi
import pulumi_gcp as gcp
saml_config = gcp.identityplatform.InboundSamlConfig("samlConfig",
display_name="<NAME>",
idp_config=gcp.identityplatform.InboundSamlConfigIdpConfigArgs(
idp_entity_id="tf-idp",
sign_request=True,
sso_url="https://example.com",
idp_certificates=[gcp.identityplatform.InboundSamlConfigIdpConfigIdpCertificateArgs(
x509_certificate=(lambda path: open(path).read())("test-fixtures/rsa_cert.pem"),
)],
),
sp_config=gcp.identityplatform.InboundSamlConfigSpConfigArgs(
sp_entity_id="tf-sp",
callback_uri="https://example.com",
))
```
## Import
InboundSamlConfig can be imported using any of these accepted formats
```sh
$ pulumi import gcp:identityplatform/inboundSamlConfig:InboundSamlConfig default projects/{{project}}/inboundSamlConfigs/{{name}}
```
```sh
$ pulumi import gcp:identityplatform/inboundSamlConfig:InboundSamlConfig default {{project}}/{{name}}
```
```sh
$ pulumi import gcp:identityplatform/inboundSamlConfig:InboundSamlConfig default {{name}}
```
:param str resource_name: The name of the resource.
:param InboundSamlConfigArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(InboundSamlConfigArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
display_name: Optional[pulumi.Input[str]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
idp_config: Optional[pulumi.Input[pulumi.InputType['InboundSamlConfigIdpConfigArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
sp_config: Optional[pulumi.Input[pulumi.InputType['InboundSamlConfigSpConfigArgs']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = InboundSamlConfigArgs.__new__(InboundSamlConfigArgs)
if display_name is None and not opts.urn:
raise TypeError("Missing required property 'display_name'")
__props__.__dict__["display_name"] = display_name
__props__.__dict__["enabled"] = enabled
if idp_config is None and not opts.urn:
raise TypeError("Missing required property 'idp_config'")
__props__.__dict__["idp_config"] = idp_config
__props__.__dict__["name"] = name
__props__.__dict__["project"] = project
if sp_config is None and not opts.urn:
raise TypeError("Missing required property 'sp_config'")
__props__.__dict__["sp_config"] = sp_config
super(InboundSamlConfig, __self__).__init__(
'gcp:identityplatform/inboundSamlConfig:InboundSamlConfig',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
display_name: Optional[pulumi.Input[str]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
idp_config: Optional[pulumi.Input[pulumi.InputType['InboundSamlConfigIdpConfigArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
sp_config: Optional[pulumi.Input[pulumi.InputType['InboundSamlConfigSpConfigArgs']]] = None) -> 'InboundSamlConfig':
"""
Get an existing InboundSamlConfig resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] display_name: Human friendly display name.
:param pulumi.Input[bool] enabled: If this config allows users to sign in with the provider.
:param pulumi.Input[pulumi.InputType['InboundSamlConfigIdpConfigArgs']] idp_config: SAML IdP configuration when the project acts as the relying party
Structure is documented below.
:param pulumi.Input[str] name: The name of the InboundSamlConfig resource. Must start with 'saml.' and can only have alphanumeric characters,
hyphens, underscores or periods. The part after 'saml.' must also start with a lowercase letter, end with an
alphanumeric character, and have at least 2 characters.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[pulumi.InputType['InboundSamlConfigSpConfigArgs']] sp_config: SAML SP (Service Provider) configuration when the project acts as the relying party to receive
and accept an authentication assertion issued by a SAML identity provider.
Structure is documented below.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _InboundSamlConfigState.__new__(_InboundSamlConfigState)
__props__.__dict__["display_name"] = display_name
__props__.__dict__["enabled"] = enabled
__props__.__dict__["idp_config"] = idp_config
__props__.__dict__["name"] = name
__props__.__dict__["project"] = project
__props__.__dict__["sp_config"] = sp_config
return InboundSamlConfig(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Output[str]:
"""
Human friendly display name.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def enabled(self) -> pulumi.Output[Optional[bool]]:
"""
If this config allows users to sign in with the provider.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter(name="idpConfig")
def idp_config(self) -> pulumi.Output['outputs.InboundSamlConfigIdpConfig']:
"""
SAML IdP configuration when the project acts as the relying party
Structure is documented below.
"""
return pulumi.get(self, "idp_config")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the InboundSamlConfig resource. Must start with 'saml.' and can only have alphanumeric characters,
hyphens, underscores or periods. The part after 'saml.' must also start with a lowercase letter, end with an
alphanumeric character, and have at least 2 characters.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def project(self) -> pulumi.Output[str]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@property
@pulumi.getter(name="spConfig")
def sp_config(self) -> pulumi.Output['outputs.InboundSamlConfigSpConfig']:
"""
SAML SP (Service Provider) configuration when the project acts as the relying party to receive
and accept an authentication assertion issued by a SAML identity provider.
Structure is documented below.
"""
return pulumi.get(self, "sp_config")
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.