ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b401984cb69c0579c89894a69186857d5163d270 | import argparse, os
PREFIX = None
SUFFIX = None
# NOTE: if the series starts with an '01' filename for instance,
# set this value to -1 to have the result start with '00'
OFFSET = 0
# Takes in a filtered filename consisting solely of an index integer
# as a string, and properly reindexes it according to any arguments
def reindex(indexStr):
index = 0
newStr = indexStr
try:
index = int(indexStr)
except Exception:
print(indexStr + " cannot be converted to int, skipping...")
return indexStr
index = index + OFFSET
if index < 10:
newStr = '00' + str(index)
elif index < 100:
newStr = '0' + str(index)
else:
newStr = str(index)
return newStr
def sanitize_filename(f):
if PREFIX != None and PREFIX != '':
f = f.split(PREFIX, 1)[1]
if SUFFIX != None and SUFFIX != '':
f = f.split(SUFFIX, 1)[0]
return f
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="mangarenamer: Use this script to rename image files of a manga or comic to prepare for a CBR or CBZ archive")
# Starting offset (default 0)
parser.add_argument('-o', '--offset', help="the number that the first file in this directory should be offset by")
# Prefix to scrub (default empty)
parser.add_argument('-P', '--prefix', help="the prefix to sanitize from the source filename")
# Suffix to scrub (default empty)
parser.add_argument('-S', '--suffix', help="the suffix to sanitize from the source filename")
args = vars(parser.parse_args())
OFFSET = int(args['offset'])
PREFIX = args['prefix']
SUFFIX = args['suffix']
for dirpath, dirnames, filenames in os.walk(os.curdir):
for f in filenames:
# Use __file__ to get the name of this file
# import __main__ as main and main.__file__ alternatively
if f != __file__:
filename, ext = os.path.splitext(f)
newFilename = sanitize_filename(filename)
newFilename = reindex(newFilename)
newFilename = newFilename + ext
if newFilename == f:
print("Skipping " + newFilename + " since no renaming is necessary...")
else:
print("Renaming " + f + " to " + newFilename)
os.rename(f, newFilename)
|
py | b40198e1404f4643323f8ae0628714c45051cb27 | import shutil
import unittest
import tempfile
import pandas as pd
from kgtk.cli_entry import cli_entry
from kgtk.cli.filter import run
from kgtk.exceptions import KGTKException
from pathlib import Path
class TestKGTKFilter(unittest.TestCase):
def setUp(self) -> None:
self.file_path = 'data/sample_kgtk_edge_file.tsv'
self.file_path2 = 'data/sample_kgtk_non_edge_file.tsv'
self.temp_dir = tempfile.mkdtemp()
self.df = pd.read_csv(self.file_path, sep='\t')
self.df2 = pd.read_csv(self.file_path2, sep='\t')
def tearDown(self) -> None:
shutil.rmtree(self.temp_dir)
def test_kgtk_filter_p31(self):
# create GT from the file itself using pandas
p31_qnodes = list(self.df.loc[self.df['label'] == 'P31']['node1'].unique())
cli_entry("kgtk", "filter", "-i", self.file_path, "-o", f'{self.temp_dir}/p31.tsv', "-p", ";P31;", "-v",
"--reject-file", f'{self.temp_dir}/reject.tsv')
df = pd.read_csv(f'{self.temp_dir}/p31.tsv', sep='\t')
r_qnodes = list(df['node1'].unique())
for q in r_qnodes:
self.assertTrue(q in p31_qnodes)
self.assertEqual(len(df), 10)
def test_kgtk_filter_Q2447774(self):
# create GT from the file itself using pandas
node2s = list(self.df.loc[self.df['node1'] == 'Q2447774']['node2'])
cli_entry("kgtk", "filter", "-i", self.file_path, "-o", f'{self.temp_dir}/Q2447774.tsv', "-p", "Q2447774;;",
"--reject-file", f'{self.temp_dir}/reject.tsv')
df = pd.read_csv(f'{self.temp_dir}/Q2447774.tsv', sep='\t')
r_node2s = list(df['node2'])
for q in r_node2s:
self.assertTrue(q in node2s)
self.assertEqual(len(df), 27)
def test_kgtk_filter_one_row(self):
cli_entry("kgtk", "filter", "-i", self.file_path, "-o", f'{self.temp_dir}/one_row.tsv', "-p",
"Q65695069;P577;^2019-07-19T00:00:00Z/11", "-v",
"--reject-file", f'{self.temp_dir}/reject.tsv')
df = pd.read_csv(f'{self.temp_dir}/one_row.tsv', sep='\t')
self.assertEqual(len(df), 1)
def test_kgtk_filter_single_pred_inverted(self):
df = self.df2.loc[self.df2['pred'] != 'P577']
cli_entry("kgtk", "filter", "-i", self.file_path2, "-o", f'{self.temp_dir}/P577.tsv', "-p",
";P577;", "--subj", "sub", "--pred", "pred", "--obj", "obj", "-v", "--invert",
"--reject-file", f'{self.temp_dir}/reject.tsv')
df_r = pd.read_csv(f'{self.temp_dir}/P577.tsv', sep='\t')
self.assertEqual(len(df_r), len(df))
def test_kgtk_filter_single_object(self):
df = self.df2.loc[self.df2['obj'] == 'Q11365']
cli_entry("kgtk", "filter", "-i", self.file_path2, "-o", f'{self.temp_dir}/Q11365.tsv', "-p",
";;Q11365", "--subj", "sub", "--pred", "pred", "--obj", "obj", "-v",
"--reject-file", f'{self.temp_dir}/reject.tsv')
df_r = pd.read_csv(f'{self.temp_dir}/Q11365.tsv', sep='\t')
self.assertEqual(len(df_r), len(df))
def test_kgtk_filter_single_object_inverted(self):
df = self.df2.loc[self.df2['obj'] != 'Q11365']
cli_entry("kgtk", "filter", "-i", self.file_path2, "-o", f'{self.temp_dir}/Q11365.tsv', "-p",
";;Q11365", "--subj", "sub", "--pred", "pred", "--obj", "obj", "--invert",
"--reject-file", f'{self.temp_dir}/reject.tsv', "--show-option")
df_r = pd.read_csv(f'{self.temp_dir}/Q11365.tsv', sep='\t')
self.assertEqual(len(df_r), len(df))
def test_kgtk_filter_reject_file(self):
df = self.df2.loc[self.df2['obj'] == 'Q11365']
cli_entry("kgtk", "filter", "-i", self.file_path2, "-o", f'{self.temp_dir}/Q11365.tsv', "-p",
";;Q11365", "--subj", "sub", "--pred", "pred", "--obj", "obj", "-v", "--invert", "--reject-file",
f'{self.temp_dir}/reject.tsv')
df_r = pd.read_csv(f'{self.temp_dir}/reject.tsv', sep='\t')
self.assertEqual(len(df_r), len(df))
def test_kgtk_filter_bad_pattern(self):
with self.assertRaises(KGTKException):
run(input_file=Path(self.file_path), output_files=[[Path(f'{self.temp_dir}/one_row.tsv')]],
reject_file=None, patterns=[["Q65695069;P577;^2019-07-19T00:00:00Z/11;bla"]],
subj_col=None, pred_col=None, obj_col=None, or_pattern=False,
invert=False, match_type="match", first_match_only=False, regex=False, show_version=False,
numeric=False, fancy=False, pass_empty_value=False)
def test_kgtk_filter_column_indexes(self):
run(input_file=Path(self.file_path2), output_files=[[Path(f'{self.temp_dir}/one_row.tsv')]],
reject_file=None, patterns=[["Q;P;O"]],
subj_col='1', pred_col='2', obj_col='3', or_pattern=False,
invert=False, match_type="match", first_match_only=False, regex=False, show_version=False,
numeric=False, fancy=False, pass_empty_value=False)
df = pd.read_csv(f'{self.temp_dir}/one_row.tsv', sep='\t')
self.assertEqual(len(df), 0)
|
py | b40199ff252759987ee3bb0f630f8695351d7d4e | # Copyright 2020-2021 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For further info, check https://github.com/canonical/charmcraft
import datetime
import logging
import pathlib
import zipfile
from argparse import ArgumentParser, Namespace
from unittest import mock
from unittest.mock import MagicMock, patch
import pytest
import yaml
from charmcraft.cmdbase import CommandError
from charmcraft.commands import pack
from charmcraft.commands.pack import PackCommand, build_zip
from charmcraft.config import Project
from charmcraft.utils import SingleOptionEnsurer, useful_filepath
def get_namespace(
*,
bases_index=None,
debug=False,
destructive_mode=False,
entrypoint=None,
force=None,
requirement=None,
shell=False,
shell_after=False,
):
if bases_index is None:
bases_index = []
return Namespace(
bases_index=bases_index,
debug=debug,
destructive_mode=destructive_mode,
entrypoint=entrypoint,
force=force,
requirement=requirement,
shell=shell,
shell_after=shell_after,
)
# empty namespace
noargs = get_namespace()
@pytest.fixture
def bundle_yaml(tmp_path):
"""Create an empty bundle.yaml, with the option to set values to it."""
bundle_path = tmp_path / "bundle.yaml"
bundle_path.write_text("{}")
content = {}
def func(*, name):
content["name"] = name
encoded = yaml.dump(content)
bundle_path.write_text(encoded)
return encoded
return func
@pytest.fixture
def mock_parts():
with patch("charmcraft.commands.pack.parts") as mock_parts:
yield mock_parts
@pytest.fixture
def mock_launch_shell():
with patch("charmcraft.commands.build.launch_shell") as mock_shell:
yield mock_shell
# -- tests for the project type decissor
def test_resolve_charm_type(config):
"""The config indicates the project is a charm."""
config.set(type="charm")
cmd = PackCommand("group", config)
with patch.object(cmd, "_pack_charm") as mock:
cmd.run(noargs)
mock.assert_called_with(noargs)
def test_resolve_bundle_type(config):
"""The config indicates the project is a bundle."""
config.set(type="bundle")
cmd = PackCommand("group", config)
with patch.object(cmd, "_pack_bundle") as mock:
cmd.run(noargs)
mock.assert_called_with(noargs)
def test_resolve_no_config_packs_charm(config, tmp_path):
"""There is no config, so it's decided to pack a charm."""
config.set(
project=Project(
config_provided=False,
dirpath=tmp_path,
started_at=datetime.datetime.utcnow(),
)
)
cmd = PackCommand("group", config)
with patch.object(cmd, "_pack_charm") as mock:
cmd.run(noargs)
mock.assert_called_with(noargs)
def test_resolve_bundle_with_requirement(config):
"""The requirement option is not valid when packing a bundle."""
config.set(type="bundle")
args = Namespace(requirement="reqs.txt", entrypoint=None)
with pytest.raises(CommandError) as cm:
PackCommand("group", config).run(args)
assert str(cm.value) == "The -r/--requirement option is valid only when packing a charm"
def test_resolve_bundle_with_entrypoint(config):
"""The entrypoint option is not valid when packing a bundle."""
config.set(type="bundle")
args = Namespace(requirement=None, entrypoint="mycharm.py")
with pytest.raises(CommandError) as cm:
PackCommand("group", config).run(args)
assert str(cm.value) == "The -e/--entry option is valid only when packing a charm"
# -- tests for main bundle building process
def test_bundle_simple_succesful_build(tmp_path, caplog, bundle_yaml, bundle_config):
"""A simple happy story."""
caplog.set_level(logging.INFO, logger="charmcraft.commands")
# mandatory files (other thant the automatically provided manifest)
content = bundle_yaml(name="testbundle")
bundle_config.set(type="bundle")
(tmp_path / "README.md").write_text("test readme")
# build!
PackCommand("group", bundle_config).run(noargs)
# check
zipname = tmp_path / "testbundle.zip"
zf = zipfile.ZipFile(zipname)
assert "charmcraft.yaml" not in [x.filename for x in zf.infolist()]
assert zf.read("bundle.yaml") == content.encode("ascii")
assert zf.read("README.md") == b"test readme"
expected = "Created '{}'.".format(zipname)
assert [expected] == [rec.message for rec in caplog.records]
# check the manifest is present and with particular values that depend on given info
manifest = yaml.safe_load(zf.read("manifest.yaml"))
assert manifest["charmcraft-started-at"] == bundle_config.project.started_at.isoformat() + "Z"
# verify that the manifest was not leftover in user's project
assert not (tmp_path / "manifest.yaml").exists()
def test_bundle_missing_bundle_file(tmp_path, bundle_config):
"""Can not build a bundle without bundle.yaml."""
# build without a bundle.yaml!
with pytest.raises(CommandError) as cm:
PackCommand("group", bundle_config).run(noargs)
assert str(cm.value) == (
"Missing or invalid main bundle file: '{}'.".format(tmp_path / "bundle.yaml")
)
def test_bundle_missing_other_mandatory_file(tmp_path, bundle_config, bundle_yaml):
"""Can not build a bundle without any of the mandatory files."""
bundle_yaml(name="testbundle")
bundle_config.set(type="bundle")
# build without a README!
with pytest.raises(CommandError) as cm:
PackCommand("group", bundle_config).run(noargs)
assert str(cm.value) == "Missing mandatory file: {!r}.".format(str(tmp_path / "README.md"))
def test_bundle_missing_name_in_bundle(tmp_path, bundle_yaml, bundle_config):
"""Can not build a bundle without name."""
bundle_config.set(type="bundle")
# build!
with pytest.raises(CommandError) as cm:
PackCommand("group", bundle_config).run(noargs)
assert str(cm.value) == (
"Invalid bundle config; "
"missing a 'name' field indicating the bundle's name in file '{}'.".format(
tmp_path / "bundle.yaml"
)
)
def test_bundle_debug_no_error(
tmp_path, bundle_yaml, bundle_config, mock_parts, mock_launch_shell
):
bundle_yaml(name="testbundle")
bundle_config.set(type="bundle")
(tmp_path / "README.md").write_text("test readme")
PackCommand("group", bundle_config).run(get_namespace(debug=True))
assert mock_launch_shell.mock_calls == []
def test_bundle_debug_with_error(
tmp_path, bundle_yaml, bundle_config, mock_parts, mock_launch_shell
):
mock_parts.PartsLifecycle.return_value.run.side_effect = CommandError("fail")
bundle_yaml(name="testbundle")
bundle_config.set(type="bundle")
(tmp_path / "README.md").write_text("test readme")
with pytest.raises(CommandError):
PackCommand("group", bundle_config).run(get_namespace(debug=True))
assert mock_launch_shell.mock_calls == [mock.call()]
def test_bundle_shell(tmp_path, bundle_yaml, bundle_config, mock_parts, mock_launch_shell):
bundle_yaml(name="testbundle")
bundle_config.set(type="bundle")
(tmp_path / "README.md").write_text("test readme")
PackCommand("group", bundle_config).run(get_namespace(shell=True))
assert mock_launch_shell.mock_calls == [mock.call()]
def test_bundle_shell_after(tmp_path, bundle_yaml, bundle_config, mock_parts, mock_launch_shell):
bundle_yaml(name="testbundle")
bundle_config.set(type="bundle")
(tmp_path / "README.md").write_text("test readme")
PackCommand("group", bundle_config).run(get_namespace(shell_after=True))
assert mock_launch_shell.mock_calls == [mock.call()]
# -- tests for get paths helper
def test_prime_mandatory_ok(tmp_path, bundle_yaml, bundle_config):
"""Simple succesful case getting all mandatory files."""
bundle_yaml(name="testbundle")
test_mandatory = ["foo.txt", "bar.bin"]
test_file1 = tmp_path / "foo.txt"
test_file1.touch()
test_file2 = tmp_path / "bar.bin"
test_file2.touch()
with patch.object(pack, "MANDATORY_FILES", test_mandatory):
PackCommand("group", bundle_config).run(noargs)
zf = zipfile.ZipFile(tmp_path / "testbundle.zip")
zipped_files = [x.filename for x in zf.infolist()]
assert "foo.txt" in zipped_files
assert "bar.bin" in zipped_files
def test_prime_extra_ok(tmp_path, bundle_yaml, bundle_config):
"""Extra files were indicated ok."""
bundle_yaml(name="testbundle")
bundle_config.set(prime=["f2.txt", "f1.txt"])
testfile1 = tmp_path / "f1.txt"
testfile1.touch()
testfile2 = tmp_path / "f2.txt"
testfile2.touch()
with patch.object(pack, "MANDATORY_FILES", []):
PackCommand("group", bundle_config).run(noargs)
zf = zipfile.ZipFile(tmp_path / "testbundle.zip")
zipped_files = [x.filename for x in zf.infolist()]
assert "f1.txt" in zipped_files
assert "f2.txt" in zipped_files
def test_prime_extra_missing(tmp_path, bundle_yaml, bundle_config):
"""Extra files were indicated but not found."""
bundle_yaml(name="testbundle")
bundle_config.set(prime=["f2.txt", "f1.txt"])
testfile1 = tmp_path / "f1.txt"
testfile1.touch()
with patch.object(pack, "MANDATORY_FILES", []):
with pytest.raises(CommandError) as err:
PackCommand("group", bundle_config).run(noargs)
assert str(err.value) == (
"Parts processing error: Failed to copy '{}/build/stage/f2.txt': "
"no such file or directory.".format(tmp_path)
)
def test_prime_extra_long_path(tmp_path, bundle_yaml, bundle_config):
"""An extra file can be deep in directories."""
bundle_yaml(name="testbundle")
bundle_config.set(prime=["foo/bar/baz/extra.txt"])
testfile = tmp_path / "foo" / "bar" / "baz" / "extra.txt"
testfile.parent.mkdir(parents=True)
testfile.touch()
with patch.object(pack, "MANDATORY_FILES", []):
PackCommand("group", bundle_config).run(noargs)
zf = zipfile.ZipFile(tmp_path / "testbundle.zip")
zipped_files = [x.filename for x in zf.infolist()]
assert "foo/bar/baz/extra.txt" in zipped_files
def test_prime_extra_wildcards_ok(tmp_path, bundle_yaml, bundle_config):
"""Use wildcards to specify several files ok."""
bundle_yaml(name="testbundle")
bundle_config.set(prime=["*.txt"])
testfile1 = tmp_path / "f1.txt"
testfile1.touch()
testfile2 = tmp_path / "f2.bin"
testfile2.touch()
testfile3 = tmp_path / "f3.txt"
testfile3.touch()
with patch.object(pack, "MANDATORY_FILES", []):
PackCommand("group", bundle_config).run(noargs)
zf = zipfile.ZipFile(tmp_path / "testbundle.zip")
zipped_files = [x.filename for x in zf.infolist()]
assert "f1.txt" in zipped_files
assert "f2.bin" not in zipped_files
assert "f3.txt" in zipped_files
def test_prime_extra_wildcards_not_found(tmp_path, bundle_yaml, bundle_config):
"""Use wildcards to specify several files but nothing found."""
bundle_yaml(name="testbundle")
bundle_config.set(prime=["*.txt"])
# non-existent files are not included if using a wildcard
with patch.object(pack, "MANDATORY_FILES", []):
PackCommand("group", bundle_config).run(noargs)
zf = zipfile.ZipFile(tmp_path / "testbundle.zip")
zipped_files = [x.filename for x in zf.infolist()]
assert zipped_files == ["manifest.yaml"]
def test_prime_extra_globstar(tmp_path, bundle_yaml, bundle_config):
"""Double star means whatever directories are in the path."""
bundle_yaml(name="testbundle")
bundle_config.set(prime=["lib/**/*"])
srcpaths = (
("lib/foo/f1.txt", True),
("lib/foo/deep/fx.txt", True),
("lib/bar/f2.txt", True),
("lib/f3.txt", True),
("extra/lib/f.txt", False),
("libs/fs.txt", False),
)
for srcpath, expected in srcpaths:
testfile = tmp_path / pathlib.Path(srcpath)
testfile.parent.mkdir(parents=True, exist_ok=True)
testfile.touch()
with patch.object(pack, "MANDATORY_FILES", []):
PackCommand("group", bundle_config).run(noargs)
zf = zipfile.ZipFile(tmp_path / "testbundle.zip")
zipped_files = [x.filename for x in zf.infolist()]
for srcpath, expected in srcpaths:
assert (srcpath in zipped_files) == expected
def test_prime_extra_globstar_specific_files(tmp_path, bundle_yaml, bundle_config):
"""Combination of both mechanisms."""
bundle_yaml(name="testbundle")
bundle_config.set(prime=["lib/**/*.txt"])
srcpaths = (
("lib/foo/f1.txt", True),
("lib/foo/f1.nop", False),
("lib/foo/deep/fx.txt", True),
("lib/foo/deep/fx.nop", False),
("lib/bar/f2.txt", True),
("lib/bar/f2.nop", False),
("lib/f3.txt", True),
("lib/f3.nop", False),
("extra/lib/f.txt", False),
("libs/fs.nop", False),
)
for srcpath, expected in srcpaths:
testfile = tmp_path / pathlib.Path(srcpath)
testfile.parent.mkdir(parents=True, exist_ok=True)
testfile.touch()
with patch.object(pack, "MANDATORY_FILES", []):
PackCommand("group", bundle_config).run(noargs)
zf = zipfile.ZipFile(tmp_path / "testbundle.zip")
zipped_files = [x.filename for x in zf.infolist()]
for srcpath, expected in srcpaths:
assert (srcpath in zipped_files) == expected
# -- tests for zip builder
def test_zipbuild_simple(tmp_path):
"""Build a bunch of files in the zip."""
build_dir = tmp_path / "somedir"
build_dir.mkdir()
testfile1 = build_dir / "foo.txt"
testfile1.write_bytes(b"123\x00456")
subdir = build_dir / "bar"
subdir.mkdir()
testfile2 = subdir / "baz.txt"
testfile2.write_bytes(b"mo\xc3\xb1o")
zip_filepath = tmp_path / "testresult.zip"
build_zip(zip_filepath, build_dir)
zf = zipfile.ZipFile(zip_filepath)
assert sorted(x.filename for x in zf.infolist()) == ["bar/baz.txt", "foo.txt"]
assert zf.read("foo.txt") == b"123\x00456"
assert zf.read("bar/baz.txt") == b"mo\xc3\xb1o"
def test_zipbuild_symlink_simple(tmp_path):
"""Symlinks are supported."""
build_dir = tmp_path / "somedir"
build_dir.mkdir()
testfile1 = build_dir / "real.txt"
testfile1.write_bytes(b"123\x00456")
testfile2 = build_dir / "link.txt"
testfile2.symlink_to(testfile1)
zip_filepath = tmp_path / "testresult.zip"
build_zip(zip_filepath, build_dir)
zf = zipfile.ZipFile(zip_filepath)
assert sorted(x.filename for x in zf.infolist()) == ["link.txt", "real.txt"]
assert zf.read("real.txt") == b"123\x00456"
assert zf.read("link.txt") == b"123\x00456"
def test_zipbuild_symlink_outside(tmp_path):
"""No matter where the symlink points to."""
# outside the build dir
testfile1 = tmp_path / "real.txt"
testfile1.write_bytes(b"123\x00456")
# inside the build dir
build_dir = tmp_path / "somedir"
build_dir.mkdir()
testfile2 = build_dir / "link.txt"
testfile2.symlink_to(testfile1)
zip_filepath = tmp_path / "testresult.zip"
build_zip(zip_filepath, build_dir)
zf = zipfile.ZipFile(zip_filepath)
assert sorted(x.filename for x in zf.infolist()) == ["link.txt"]
assert zf.read("link.txt") == b"123\x00456"
# tests for the main charm building process -- so far this is only using the "build" command
# infrastructure, until we migrate the (adapted) behaviour to this command
def test_charm_parameters_requirement(config):
"""The --requirement option implies a set of validations."""
cmd = PackCommand("group", config)
parser = ArgumentParser()
cmd.fill_parser(parser)
(action,) = [action for action in parser._actions if action.dest == "requirement"]
assert action.type is useful_filepath
def test_charm_parameters_entrypoint(config):
"""The --entrypoint option implies a set of validations."""
cmd = PackCommand("group", config)
parser = ArgumentParser()
cmd.fill_parser(parser)
(action,) = [action for action in parser._actions if action.dest == "entrypoint"]
assert isinstance(action.type, SingleOptionEnsurer)
assert action.type.converter is useful_filepath
def test_charm_parameters_validator(config, tmp_path):
"""Check that build.Builder is properly called."""
args = Namespace(
bases_index=[],
debug=True,
destructive_mode=True,
entrypoint="test-epoint",
force=True,
requirement="test-reqs",
shell=True,
shell_after=True,
)
config.set(
type="charm",
project=Project(dirpath=tmp_path, started_at=datetime.datetime.utcnow()),
)
with patch("charmcraft.commands.build.Validator", autospec=True) as validator_class_mock:
validator_class_mock.return_value = validator_instance_mock = MagicMock()
with patch("charmcraft.commands.build.Builder"):
PackCommand("group", config).run(args)
validator_instance_mock.process.assert_called_with(
Namespace(
**{
"bases_indices": [],
"debug": True,
"destructive_mode": True,
"entrypoint": "test-epoint",
"from": tmp_path,
"force": True,
"requirement": "test-reqs",
"shell": True,
"shell_after": True,
}
)
)
def test_charm_builder_infrastructure_called(config):
"""Check that build.Builder is properly called."""
config.set(type="charm")
with patch("charmcraft.commands.build.Validator", autospec=True) as validator_mock:
validator_mock(config).process.return_value = "processed args"
with patch("charmcraft.commands.build.Builder") as builder_class_mock:
builder_class_mock.return_value = builder_instance_mock = MagicMock()
PackCommand("group", config).run(noargs)
builder_class_mock.assert_called_with("processed args", config)
builder_instance_mock.run.assert_called_with([], destructive_mode=False)
|
py | b4019b297691b3cda2404acd781d0fb161dfc223 | #!/usr/bin/python
# vim: set fileencoding=utf-8 :
class TestFilter(object):
"""
filter: 过滤函数,根据fun返回Ture保留参数
"""
def is_odd(self, num):
return num%2 == 1
def test_filter(self):
list_num = [13, 34, 25, 26, 69, 10]
result_list = filter(self.is_odd, list_num)
print "[13, 25, 69]", result_list
|
py | b4019b473d00d32d30c9de02bffcf7556c627f1d | """Training a face recognizer with TensorFlow using softmax cross entropy loss
"""
# MIT License
#
# Copyright (c) 2016 David Sandberg
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os.path
import time
import sys
import random
import tensorflow as tf
import numpy as np
import importlib
import argparse
import facenet
import lfw
import h5py
import tensorflow.contrib.slim as slim
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
def main(args):
network = importlib.import_module(args.model_def)
subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
if not os.path.isdir(log_dir): # Create the log directory if it doesn't exist
os.makedirs(log_dir)
model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
if not os.path.isdir(model_dir): # Create the model directory if it doesn't exist
os.makedirs(model_dir)
# Write arguments to a text file
facenet.write_arguments_to_file(args, os.path.join(log_dir, 'arguments.txt'))
# Store some git revision info in a text file in the log directory
src_path,_ = os.path.split(os.path.realpath(__file__))
facenet.store_revision_info(src_path, log_dir, ' '.join(sys.argv))
np.random.seed(seed=args.seed)
random.seed(args.seed)
train_set = facenet.get_dataset(args.data_dir)
if args.filter_filename:
train_set = filter_dataset(train_set, os.path.expanduser(args.filter_filename),
args.filter_percentile, args.filter_min_nrof_images_per_class)
nrof_classes = len(train_set)
print('Model directory: %s' % model_dir)
print('Log directory: %s' % log_dir)
pretrained_model = None
if args.pretrained_model:
pretrained_model = os.path.expanduser(args.pretrained_model)
print('Pre-trained model: %s' % pretrained_model)
if args.lfw_dir:
print('LFW directory: %s' % args.lfw_dir)
# Read the file containing the pairs used for testing
pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
# Get the paths for the corresponding images
lfw_paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext)
with tf.Graph().as_default():
tf.set_random_seed(args.seed)
global_step = tf.Variable(0, trainable=False)
# Get a list of image paths and their labels
image_list, label_list = facenet.get_image_paths_and_labels(train_set)
assert len(image_list)>0, 'The dataset should not be empty'
# Create a queue that produces indices into the image_list and label_list
labels = ops.convert_to_tensor(label_list, dtype=tf.int32)
range_size = array_ops.shape(labels)[0]
index_queue = tf.train.range_input_producer(range_size, num_epochs=None,
shuffle=True, seed=None, capacity=32)
index_dequeue_op = index_queue.dequeue_many(args.batch_size*args.epoch_size, 'index_dequeue')
learning_rate_placeholder = tf.placeholder(tf.float32, name='learning_rate')
batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')
phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
image_paths_placeholder = tf.placeholder(tf.string, shape=(None,1), name='image_paths')
labels_placeholder = tf.placeholder(tf.int64, shape=(None,1), name='labels')
input_queue = data_flow_ops.FIFOQueue(capacity=100000,
dtypes=[tf.string, tf.int64],
shapes=[(1,), (1,)],
shared_name=None, name=None)
enqueue_op = input_queue.enqueue_many([image_paths_placeholder, labels_placeholder], name='enqueue_op')
nrof_preprocess_threads = 4
images_and_labels = []
for _ in range(nrof_preprocess_threads):
filenames, label = input_queue.dequeue()
images = []
for filename in tf.unstack(filenames):
file_contents = tf.read_file(filename)
image = tf.image.decode_image(file_contents, channels=3)
if args.random_rotate:
image = tf.py_func(facenet.random_rotate_image, [image], tf.uint8)
if args.random_crop:
image = tf.random_crop(image, [args.image_size, args.image_size, 3])
else:
image = tf.image.resize_image_with_crop_or_pad(image, args.image_size, args.image_size)
if args.random_flip:
image = tf.image.random_flip_left_right(image)
#pylint: disable=no-member
image.set_shape((args.image_size, args.image_size, 3))
images.append(tf.image.per_image_standardization(image))
images_and_labels.append([images, label])
image_batch, label_batch = tf.train.batch_join(
images_and_labels, batch_size=batch_size_placeholder,
shapes=[(args.image_size, args.image_size, 3), ()], enqueue_many=True,
capacity=4 * nrof_preprocess_threads * args.batch_size,
allow_smaller_final_batch=True)
image_batch = tf.identity(image_batch, 'image_batch')
image_batch = tf.identity(image_batch, 'input')
label_batch = tf.identity(label_batch, 'label_batch')
print('Total number of classes: %d' % nrof_classes)
print('Total number of examples: %d' % len(image_list))
print('Building training graph')
# Build the inference graph
prelogits, _ = network.inference(image_batch, args.keep_probability,
phase_train=phase_train_placeholder, bottleneck_layer_size=args.embedding_size,
weight_decay=args.weight_decay)
logits = slim.fully_connected(prelogits, len(train_set), activation_fn=None,
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
weights_regularizer=slim.l2_regularizer(args.weight_decay),
scope='Logits', reuse=False)
embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings')
# Add center loss
if args.center_loss_factor>0.0:
prelogits_center_loss, _ = facenet.center_loss(prelogits, label_batch, args.center_loss_alfa, nrof_classes)
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, prelogits_center_loss * args.center_loss_factor)
learning_rate = tf.train.exponential_decay(learning_rate_placeholder, global_step,
args.learning_rate_decay_epochs*args.epoch_size, args.learning_rate_decay_factor, staircase=True)
tf.summary.scalar('learning_rate', learning_rate)
# Calculate the average cross entropy loss across the batch
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=label_batch, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
# Calculate the total losses
regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
total_loss = tf.add_n([cross_entropy_mean] + regularization_losses, name='total_loss')
# Build a Graph that trains the model with one batch of examples and updates the model parameters
train_op = facenet.train(total_loss, global_step, args.optimizer,
learning_rate, args.moving_average_decay, tf.global_variables(), args.log_histograms)
# Create savers (which gets used depends on whether we are doing transfer learning)
# TODO make layer name prefix configurable
transfer_layers = [v for v in tf.trainable_variables() if v.name.startswith('InceptionResnetV1')]
saver_transfer = tf.train.Saver(transfer_layers, max_to_keep=3)
saver_all_layers = tf.train.Saver(tf.trainable_variables(), max_to_keep=3)
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.summary.merge_all()
# Start running operations on the Graph.
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
coord = tf.train.Coordinator()
tf.train.start_queue_runners(coord=coord, sess=sess)
with sess.as_default():
if pretrained_model:
print('Restoring pretrained model: %s' % pretrained_model)
if args.transferlearning:
print('Using transfer learning, so old weights from final layer will not be loaded')
saver_transfer.restore(sess, pretrained_model)
else:
saver_all_layers.restore(sess, pretrained_model)
# Training and validation loop
print('Running training')
epoch = 0
while epoch < args.max_nrof_epochs:
step = sess.run(global_step, feed_dict=None)
epoch = step // args.epoch_size
# Train for one epoch
train(args, sess, epoch, image_list, label_list, index_dequeue_op, enqueue_op, image_paths_placeholder, labels_placeholder,
learning_rate_placeholder, phase_train_placeholder, batch_size_placeholder, global_step,
total_loss, train_op, summary_op, summary_writer, regularization_losses, args.learning_rate_schedule_file)
# Save variables and the metagraph if it doesn't exist already
save_variables_and_metagraph(sess, saver, summary_writer, model_dir, subdir, step)
# Evaluate on LFW
if args.lfw_dir:
evaluate(sess, enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder,
embeddings, label_batch, lfw_paths, actual_issame, args.lfw_batch_size, args.lfw_nrof_folds, log_dir, step, summary_writer)
return model_dir
def find_threshold(var, percentile):
hist, bin_edges = np.histogram(var, 100)
cdf = np.float32(np.cumsum(hist)) / np.sum(hist)
bin_centers = (bin_edges[:-1]+bin_edges[1:])/2
#plt.plot(bin_centers, cdf)
threshold = np.interp(percentile*0.01, cdf, bin_centers)
return threshold
def filter_dataset(dataset, data_filename, percentile, min_nrof_images_per_class):
with h5py.File(data_filename,'r') as f:
distance_to_center = np.array(f.get('distance_to_center'))
label_list = np.array(f.get('label_list'))
image_list = np.array(f.get('image_list'))
distance_to_center_threshold = find_threshold(distance_to_center, percentile)
indices = np.where(distance_to_center>=distance_to_center_threshold)[0]
filtered_dataset = dataset
removelist = []
for i in indices:
label = label_list[i]
image = image_list[i]
if image in filtered_dataset[label].image_paths:
filtered_dataset[label].image_paths.remove(image)
if len(filtered_dataset[label].image_paths)<min_nrof_images_per_class:
removelist.append(label)
ix = sorted(list(set(removelist)), reverse=True)
for i in ix:
del(filtered_dataset[i])
return filtered_dataset
def train(args, sess, epoch, image_list, label_list, index_dequeue_op, enqueue_op, image_paths_placeholder, labels_placeholder,
learning_rate_placeholder, phase_train_placeholder, batch_size_placeholder, global_step,
loss, train_op, summary_op, summary_writer, regularization_losses, learning_rate_schedule_file):
batch_number = 0
if args.learning_rate>0.0:
lr = args.learning_rate
else:
lr = facenet.get_learning_rate_from_file(learning_rate_schedule_file, epoch)
index_epoch = sess.run(index_dequeue_op)
label_epoch = np.array(label_list)[index_epoch]
image_epoch = np.array(image_list)[index_epoch]
# Enqueue one epoch of image paths and labels
labels_array = np.expand_dims(np.array(label_epoch),1)
image_paths_array = np.expand_dims(np.array(image_epoch),1)
sess.run(enqueue_op, {image_paths_placeholder: image_paths_array, labels_placeholder: labels_array})
# Training loop
train_time = 0
while batch_number < args.epoch_size:
start_time = time.time()
feed_dict = {learning_rate_placeholder: lr, phase_train_placeholder:True, batch_size_placeholder:args.batch_size}
if (batch_number % 100 == 0):
err, _, step, reg_loss, summary_str = sess.run([loss, train_op, global_step, regularization_losses, summary_op], feed_dict=feed_dict)
summary_writer.add_summary(summary_str, global_step=step)
else:
err, _, step, reg_loss = sess.run([loss, train_op, global_step, regularization_losses], feed_dict=feed_dict)
duration = time.time() - start_time
print('Epoch: [%d][%d/%d]\tTime %.3f\tLoss %2.3f\tRegLoss %2.3f' %
(epoch, batch_number+1, args.epoch_size, duration, err, np.sum(reg_loss)))
batch_number += 1
train_time += duration
# Add validation loss and accuracy to summary
summary = tf.Summary()
#pylint: disable=maybe-no-member
summary.value.add(tag='time/total', simple_value=train_time)
summary_writer.add_summary(summary, step)
return step
def evaluate(sess, enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder,
embeddings, labels, image_paths, actual_issame, batch_size, nrof_folds, log_dir, step, summary_writer):
start_time = time.time()
# Run forward pass to calculate embeddings
print('Runnning forward pass on LFW images')
# Enqueue one epoch of image paths and labels
labels_array = np.expand_dims(np.arange(0,len(image_paths)),1)
image_paths_array = np.expand_dims(np.array(image_paths),1)
sess.run(enqueue_op, {image_paths_placeholder: image_paths_array, labels_placeholder: labels_array})
embedding_size = embeddings.get_shape()[1]
nrof_images = len(actual_issame)*2
assert nrof_images % batch_size == 0, 'The number of LFW images must be an integer multiple of the LFW batch size'
nrof_batches = nrof_images // batch_size
emb_array = np.zeros((nrof_images, embedding_size))
lab_array = np.zeros((nrof_images,))
for _ in range(nrof_batches):
feed_dict = {phase_train_placeholder:False, batch_size_placeholder:batch_size}
emb, lab = sess.run([embeddings, labels], feed_dict=feed_dict)
lab_array[lab] = lab
emb_array[lab] = emb
assert np.array_equal(lab_array, np.arange(nrof_images))==True, 'Wrong labels used for evaluation, possibly caused by training examples left in the input pipeline'
_, _, accuracy, val, val_std, far = lfw.evaluate(emb_array, actual_issame, nrof_folds=nrof_folds)
print('Accuracy: %1.3f+-%1.3f' % (np.mean(accuracy), np.std(accuracy)))
print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far))
lfw_time = time.time() - start_time
# Add validation loss and accuracy to summary
summary = tf.Summary()
#pylint: disable=maybe-no-member
summary.value.add(tag='lfw/accuracy', simple_value=np.mean(accuracy))
summary.value.add(tag='lfw/val_rate', simple_value=val)
summary.value.add(tag='time/lfw', simple_value=lfw_time)
summary_writer.add_summary(summary, step)
with open(os.path.join(log_dir,'lfw_result.txt'),'at') as f:
f.write('%d\t%.5f\t%.5f\n' % (step, np.mean(accuracy), val))
def save_variables_and_metagraph(sess, saver, summary_writer, model_dir, model_name, step):
# Save the model checkpoint
print('Saving variables')
start_time = time.time()
checkpoint_path = os.path.join(model_dir, 'model-%s.ckpt' % model_name)
saver.save(sess, checkpoint_path, global_step=step, write_meta_graph=False)
save_time_variables = time.time() - start_time
print('Variables saved in %.2f seconds' % save_time_variables)
metagraph_filename = os.path.join(model_dir, 'model-%s.meta' % model_name)
save_time_metagraph = 0
if not os.path.exists(metagraph_filename):
print('Saving metagraph')
start_time = time.time()
saver.export_meta_graph(metagraph_filename)
save_time_metagraph = time.time() - start_time
print('Metagraph saved in %.2f seconds' % save_time_metagraph)
summary = tf.Summary()
#pylint: disable=maybe-no-member
summary.value.add(tag='time/save_variables', simple_value=save_time_variables)
summary.value.add(tag='time/save_metagraph', simple_value=save_time_metagraph)
summary_writer.add_summary(summary, step)
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--logs_base_dir', type=str,
help='Directory where to write event logs.', default='~/logs/facenet')
parser.add_argument('--models_base_dir', type=str,
help='Directory where to write trained models and checkpoints.', default='~/models/facenet')
parser.add_argument('--gpu_memory_fraction', type=float,
help='Upper bound on the amount of GPU memory that will be used by the process.', default=1.0)
parser.add_argument('--pretrained_model', type=str,
help='Load a pretrained model before training starts.')
parser.add_argument('--data_dir', type=str,
help='Path to the data directory containing aligned face patches. Multiple directories are separated with colon.',
default='~/datasets/casia/casia_maxpy_mtcnnalign_182_160')
parser.add_argument('--model_def', type=str,
help='Model definition. Points to a module containing the definition of the inference graph.', default='models.inception_resnet_v1')
parser.add_argument('--max_nrof_epochs', type=int,
help='Number of epochs to run.', default=500)
parser.add_argument('--batch_size', type=int,
help='Number of images to process in a batch.', default=90)
parser.add_argument('--image_size', type=int,
help='Image size (height, width) in pixels.', default=160)
parser.add_argument('--epoch_size', type=int,
help='Number of batches per epoch.', default=1000)
parser.add_argument('--embedding_size', type=int,
help='Dimensionality of the embedding.', default=128)
parser.add_argument('--random_crop',
help='Performs random cropping of training images. If false, the center image_size pixels from the training images are used. ' +
'If the size of the images in the data directory is equal to image_size no cropping is performed', action='store_true')
parser.add_argument('--random_flip',
help='Performs random horizontal flipping of training images.', action='store_true')
parser.add_argument('--random_rotate',
help='Performs random rotations of training images.', action='store_true')
parser.add_argument('--keep_probability', type=float,
help='Keep probability of dropout for the fully connected layer(s).', default=1.0)
parser.add_argument('--weight_decay', type=float,
help='L2 weight regularization.', default=0.0)
parser.add_argument('--center_loss_factor', type=float,
help='Center loss factor.', default=0.0)
parser.add_argument('--center_loss_alfa', type=float,
help='Center update rate for center loss.', default=0.95)
parser.add_argument('--optimizer', type=str, choices=['ADAGRAD', 'ADADELTA', 'ADAM', 'RMSPROP', 'MOM'],
help='The optimization algorithm to use', default='ADAGRAD')
parser.add_argument('--learning_rate', type=float,
help='Initial learning rate. If set to a negative value a learning rate ' +
'schedule can be specified in the file "learning_rate_schedule.txt"', default=0.1)
parser.add_argument('--learning_rate_decay_epochs', type=int,
help='Number of epochs between learning rate decay.', default=100)
parser.add_argument('--learning_rate_decay_factor', type=float,
help='Learning rate decay factor.', default=1.0)
parser.add_argument('--moving_average_decay', type=float,
help='Exponential decay for tracking of training parameters.', default=0.9999)
parser.add_argument('--seed', type=int,
help='Random seed.', default=666)
parser.add_argument('--nrof_preprocess_threads', type=int,
help='Number of preprocessing (data loading and augmentation) threads.', default=4)
parser.add_argument('--log_histograms',
help='Enables logging of weight/bias histograms in tensorboard.', action='store_true')
parser.add_argument('--learning_rate_schedule_file', type=str,
help='File containing the learning rate schedule that is used when learning_rate is set to to -1.', default='data/learning_rate_schedule.txt')
parser.add_argument('--filter_filename', type=str,
help='File containing image data used for dataset filtering', default='')
parser.add_argument('--filter_percentile', type=float,
help='Keep only the percentile images closed to its class center', default=100.0)
parser.add_argument('--filter_min_nrof_images_per_class', type=int,
help='Keep only the classes with this number of examples or more', default=0)
parser.add_argument('--transferlearning',
help='When loading weights pre-trained model, ignore weights from classification layer (which will not match your new model)', action='store_true')
# Parameters for validation on LFW
parser.add_argument('--lfw_pairs', type=str,
help='The file containing the pairs to use for validation.', default='data/pairs.txt')
parser.add_argument('--lfw_file_ext', type=str,
help='The file extension for the LFW dataset.', default='png', choices=['jpg', 'png'])
parser.add_argument('--lfw_dir', type=str,
help='Path to the data directory containing aligned face patches.', default='')
parser.add_argument('--lfw_batch_size', type=int,
help='Number of images to process in a batch in the LFW test set.', default=100)
parser.add_argument('--lfw_nrof_folds', type=int,
help='Number of folds to use for cross validation. Mainly used for testing.', default=10)
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
|
py | b4019da279bcb75c4355e14bd1d71459ef0e2a10 | import argparse
import os
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
from torch.utils.data import DataLoader
from graph4nlp.pytorch.datasets.kinship import KinshipDataset
from graph4nlp.pytorch.modules.utils.config_utils import get_yaml_config
from .model import Complex, ConvE, Distmult, GCNComplex, GCNDistMult, GGNNDistMult
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
np.set_printoptions(precision=3)
cudnn.benchmark = True
class KGC(nn.Module):
def __init__(self, cfg, num_entities, num_relations):
super(KGC, self).__init__()
self.cfg = cfg
self.num_entities = num_entities
self.num_relations = num_relations
if cfg["model"] is None:
model = ConvE(argparse.Namespace(**cfg), num_entities, num_relations)
elif cfg["model"] == "conve":
model = ConvE(argparse.Namespace(**cfg), num_entities, num_relations)
elif cfg["model"] == "distmult":
model = Distmult(argparse.Namespace(**cfg), num_entities, num_relations)
elif cfg["model"] == "complex":
model = Complex(argparse.Namespace(**cfg), num_entities, num_relations)
elif cfg["model"] == "ggnn_distmult":
model = GGNNDistMult(argparse.Namespace(**cfg), num_entities, num_relations)
elif cfg["model"] == "gcn_distmult":
model = GCNDistMult(argparse.Namespace(**cfg), num_entities, num_relations)
elif cfg["model"] == "gcn_complex":
model = GCNComplex(argparse.Namespace(**cfg), num_entities, num_relations)
else:
raise Exception("Unknown model type!")
self.model = model
def init(self):
return self.model.init()
def forward(self, e1_tensor, rel_tensor, KG_graph):
return self.model(e1_tensor, rel_tensor, KG_graph)
def loss(self, pred, e2_multi):
return self.model.loss(pred, e2_multi)
def inference_forward(self, collate_data, KG_graph):
e1_tensor = collate_data["e1_tensor"]
rel_tensor = collate_data["rel_tensor"]
if self.cfg["cuda"]:
e1_tensor = e1_tensor.to("cuda")
rel_tensor = rel_tensor.to("cuda")
return self.model(e1_tensor, rel_tensor, KG_graph)
def post_process(self, logits_results, e2=None):
max_values, argsort1 = torch.sort(logits_results, 1, descending=True)
rank1 = np.where(argsort1.cpu().numpy()[0] == e2[0, 0].item())[0][0]
print("rank = {}".format(rank1 + 1))
return argsort1[:, 0].item()
def ranking_and_hits_this(cfg, model, dev_rank_batcher, vocab, name, kg_graph=None):
print("")
print("-" * 50)
print(name)
print("-" * 50)
print("")
hits_left = []
hits_right = []
hits = []
ranks = []
ranks_left = []
ranks_right = []
for _ in range(10):
hits_left.append([])
hits_right.append([])
hits.append([])
for i, str2var in enumerate(dev_rank_batcher):
e1 = str2var["e1_tensor"]
e2 = str2var["e2_tensor"]
rel = str2var["rel_tensor"]
rel_reverse = str2var["rel_eval_tensor"]
e2_multi1 = str2var["e2_multi1"].float()
e2_multi2 = str2var["e2_multi2"].float()
if cfg["cuda"]:
e1 = e1.to("cuda")
e2 = e2.to("cuda")
rel = rel.to("cuda")
rel_reverse = rel_reverse.to("cuda")
e2_multi1 = e2_multi1.to("cuda")
e2_multi2 = e2_multi2.to("cuda")
pred1 = model(e1, rel, kg_graph)
pred2 = model(e2, rel_reverse, kg_graph)
pred1, pred2 = pred1.data, pred2.data
e1, e2 = e1.data, e2.data
e2_multi1, e2_multi2 = e2_multi1.data, e2_multi2.data
for i in range(e1.shape[0]):
# these filters contain ALL labels
filter1 = e2_multi1[i].long()
filter2 = e2_multi2[i].long()
# save the prediction that is relevant
target_value1 = pred1[i, e2[i, 0].item()].item()
target_value2 = pred2[i, e1[i, 0].item()].item()
# zero all known cases (this are not interesting)
# this corresponds to the filtered setting
pred1[i][filter1] = 0.0
pred2[i][filter2] = 0.0
# write base the saved values
pred1[i][e2[i]] = target_value1
pred2[i][e1[i]] = target_value2
# sort and rank
max_values, argsort1 = torch.sort(pred1, 1, descending=True)
max_values, argsort2 = torch.sort(pred2, 1, descending=True)
argsort1 = argsort1.cpu().numpy()
argsort2 = argsort2.cpu().numpy()
for i in range(e1.shape[0]):
# find the rank of the target entities
rank1 = np.where(argsort1[i] == e2[i, 0].item())[0][0]
rank2 = np.where(argsort2[i] == e1[i, 0].item())[0][0]
# rank+1, since the lowest rank is rank 1 not rank 0
ranks.append(rank1 + 1)
ranks_left.append(rank1 + 1)
ranks.append(rank2 + 1)
ranks_right.append(rank2 + 1)
# this could be done more elegantly, but here you go
for hits_level in range(10):
if rank1 <= hits_level:
hits[hits_level].append(1.0)
hits_left[hits_level].append(1.0)
else:
hits[hits_level].append(0.0)
hits_left[hits_level].append(0.0)
if rank2 <= hits_level:
hits[hits_level].append(1.0)
hits_right[hits_level].append(1.0)
else:
hits[hits_level].append(0.0)
hits_right[hits_level].append(0.0)
# dev_rank_batcher.state.loss = [0]
for i in range(10):
print("Hits left @{0}: {1}".format(i + 1, np.mean(hits_left[i])))
print("Hits right @{0}: {1}".format(i + 1, np.mean(hits_right[i])))
print("Hits @{0}: {1}".format(i + 1, np.mean(hits[i])))
print("Mean rank left: {0}".format(np.mean(ranks_left)))
print("Mean rank right: {0}".format(np.mean(ranks_right)))
print("Mean rank: {0}".format(np.mean(ranks)))
print("Mean reciprocal rank left: {0}".format(np.mean(1.0 / np.array(ranks_left))))
print("Mean reciprocal rank right: {0}".format(np.mean(1.0 / np.array(ranks_right))))
print("Mean reciprocal rank: {0}".format(np.mean(1.0 / np.array(ranks))))
return np.mean(1.0 / np.array(ranks))
def main(cfg, model_path):
dataset = KinshipDataset(
root_dir="examples/pytorch/kg_completion/data/{}".format(cfg["dataset"]),
topology_subdir="kgc",
)
test_dataloader = DataLoader(
dataset.test,
batch_size=cfg["batch_size"],
shuffle=False,
num_workers=0,
# num_workers=args.loader_threads,
collate_fn=dataset.collate_fn,
)
num_entities = len(dataset.vocab_model.in_word_vocab)
num_relations = len(dataset.vocab_model.out_word_vocab)
graph_path = "examples/pytorch/kg_completion/data/{}/processed/kgc/" "KG_graph.pt".format(
cfg["dataset"]
)
KG_graph = torch.load(graph_path)
if cfg["cuda"] is True:
KG_graph = KG_graph.to("cuda")
else:
KG_graph = KG_graph.to("cpu")
model = KGC(cfg, num_entities, num_relations)
if cfg["cuda"] is True:
model.to("cuda")
model_params = torch.load(model_path)
print(model)
total_param_size = []
params = [(key, value.size(), value.numel()) for key, value in model_params.items()]
for key, size, count in params:
total_param_size.append(count)
print(key, size, count)
print(np.array(total_param_size).sum())
model.load_state_dict(model_params)
model.eval()
ranking_and_hits_this(
cfg, model, test_dataloader, dataset.vocab_model, "test_evaluation", kg_graph=KG_graph
)
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"-task_config", "--task_config", required=True, type=str, help="path to the config file"
)
parser.add_argument("--grid_search", action="store_true", help="flag: grid search")
args = vars(parser.parse_args())
return args
if __name__ == "__main__":
cfg = get_args()
task_args = get_yaml_config(cfg["task_config"])
task_args["cuda"] = True
model_name = "{2}_{3}_{0}_{1}".format(
task_args["input_drop"],
task_args["hidden_drop"],
task_args["model"],
task_args["direction_option"],
)
model_path = "examples/pytorch/kg_completion/saved_models/{0}_{1}.model".format(
task_args["dataset"], model_name
)
torch.manual_seed(task_args["seed"])
main(task_args, model_path)
|
py | b4019dee0cac98c0a040857ec81a414a9407ef18 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import pep8
import textwrap
from senlin.hacking import checks
from senlin.tests.unit.common import base
class HackingTestCase(base.SenlinTestCase):
@mock.patch('pep8._checks',
{'physical_line': {}, 'logical_line': {}, 'tree': {}})
def _run_check(self, code, checker, filename=None):
pep8.register_check(checker)
lines = textwrap.dedent(code).strip().splitlines(True)
checker = pep8.Checker(filename=filename, lines=lines)
checker.check_all()
checker.report._deferred_print.sort()
return checker.report._deferred_print
def _assert_has_errors(self, code, checker, expected_errors=None,
filename=None):
actual_errors = [e[:3] for e in
self._run_check(code, checker, filename)]
self.assertEqual(expected_errors or [], actual_errors)
def _assert_has_no_errors(self, code, checker, filename=None):
self._assert_has_errors(code, checker, filename=filename)
def test_assert_equal_none(self):
self.assertEqual(1, len(list(checks.assert_equal_none(
"self.assertEqual(A, None)"))))
self.assertEqual(1, len(list(checks.assert_equal_none(
"self.assertEqual(None, A)"))))
self.assertEqual(0, len(list(checks.assert_equal_none(
"self.assertIsNone()"))))
def test_use_jsonutils(self):
def __get_msg(fun):
msg = ("S319: jsonutils.%(fun)s must be used instead of "
"json.%(fun)s" % {'fun': fun})
return [(0, msg)]
for method in ('dump', 'dumps', 'load', 'loads'):
self.assertEqual(__get_msg(method), list(checks.use_jsonutils(
"json.%s(" % method, "./senlin/engine/cluster.py")))
self.assertEqual(0, len(list(checks.use_jsonutils(
"jsonx.%s(" % method, "./senlin/engine/cluster.py"))))
self.assertEqual(0, len(list(checks.use_jsonutils(
"json.dumb", "./senlin/engine/cluster.py"))))
def test_no_mutable_default_args(self):
self.assertEqual(1, len(list(checks.no_mutable_default_args(
"def create_cluster(mapping={}, **params)"))))
self.assertEqual(0, len(list(checks.no_mutable_default_args(
"defined = []"))))
self.assertEqual(0, len(list(checks.no_mutable_default_args(
"defined, undefined = [], {}"))))
def test_api_version_decorator(self):
code = """
@some_other_decorator
@wsgi.api_version("2.2")
def my_method():
pass
"""
actual_error = self._run_check(code,
checks.check_api_version_decorator)[0]
self.assertEqual(2, actual_error[0])
self.assertEqual(0, actual_error[1])
self.assertEqual('S321', actual_error[2])
self.assertEqual(' The api_version decorator must be the first '
'decorator on a method.',
actual_error[3])
def test_api_version_decorator_good(self):
code = """
class SomeController():
@wsgi.api_version("2.2")
def my_method():
pass
"""
actual_error = self._run_check(code,
checks.check_api_version_decorator)
self.assertEqual(0, len(actual_error))
def test_no_log_warn(self):
code = """
LOG.warn("LOG.warn is deprecated")
"""
errors = [(1, 0, 'S322')]
self._assert_has_errors(code, checks.no_log_warn,
expected_errors=errors)
code = """
LOG.warning("LOG.warn is deprecated")
"""
self._assert_has_no_errors(code, checks.no_log_warn)
def test_assert_equal_true(self):
test_value = True
self.assertEqual(0, len(list(checks.assert_equal_true(
"assertTrue(True)"))))
self.assertEqual(1, len(list(checks.assert_equal_true(
"assertEqual(True, %s)" % test_value))))
self.assertEqual(1, len(list(checks.assert_equal_true(
"assertEqual(%s, True)" % test_value))))
|
py | b4019e9903a1d2a361686bb6ae4127ddbaea743a | # This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = ""
cfg.tag_prefix = ""
cfg.parentdir_prefix = "pstatmodel-"
cfg.versionfile_source = "pstatmodel/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(
[c] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
)
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {
"version": dirname[len(parentdir_prefix) :],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None,
}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print(
"Tried directories %s but none started with prefix %s"
% (str(rootdirs), parentdir_prefix)
)
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r"\d", r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
if verbose:
print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None,
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(
GITS,
[
"describe",
"--tags",
"--dirty",
"--always",
"--long",
"--match",
"%s*" % tag_prefix,
],
cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (
full_tag,
tag_prefix,
)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[
0
].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split("/"):
root = os.path.dirname(root)
except NameError:
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None,
}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
}
|
py | b4019f2afa4fe818178919d3c0228d8502c69917 | def average(*args):
len1 = len(args)
count = sum(args)
if len1 == 0:
return 0
else:
return count/len1
print(average(1,2,2,3,4))
def firstCharUpper(s):
return s[0].upper()+s[1:]
print(firstCharUpper('hello'))
print(firstCharUpper('sunny'))
print(firstCharUpper('september')) |
py | b4019f7bf35fc3abc53d6de5a05c90dd49e33f01 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 10 16:13:59 2020
@author: nm
"""
import itertools
import numpy as np
from functools import reduce
def myreduce(func,iterable):
'''
Implementation of reduce function from scratch
'''
final_value = iterable[0]
for i in range(1,len(iterable)):
final_value = func(final_value,iterable[i])
return final_value
def myfilter(func, iterable):
'''
Implements filter function from scratch
'''
return [element for element in iterable if func(element)]
if __name__ == '__main__':
my_list = list(range(1,20))
print('func 1: ', myreduce(func=lambda a, b: a*(b/7), iterable=my_list))
print('func 2: ', myfilter(func=lambda a: a%2==0, iterable=my_list))
# list comprehensions
## lists formation
l1 = [[i*y for i in range(1,5)] for y in ['x','y','z']]
l2 = [[i*y for i in ['x','y','z']] for y in range(1,5)]
l3 = [[[i] for i in range(y,y+3)] for y in range(2,5)]
l4 = [list(range(i,i+4)) for i in range(2,6)]
l5 = [[(i,y) for i in range(1,4)] for y in range(1,4)]
## final answer
print(list(itertools.chain(*l1)))
print(list(itertools.chain(*l2)))
print(list(itertools.chain(*l3)))
print(l4)
print(list(itertools.chain(*l5))) |
py | b4019fcb103b83e931f685a5b14785c53d673c8a | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import (
Dict,
AsyncIterable,
Awaitable,
AsyncIterator,
Sequence,
Tuple,
Type,
Union,
)
import pkg_resources
import google.api_core.client_options as ClientOptions # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.speech_v1.types import cloud_speech
from google.protobuf import duration_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
from .transports.base import SpeechTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import SpeechGrpcAsyncIOTransport
from .client import SpeechClient
class SpeechAsyncClient:
"""Service that implements Google Cloud Speech API."""
_client: SpeechClient
DEFAULT_ENDPOINT = SpeechClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = SpeechClient.DEFAULT_MTLS_ENDPOINT
common_billing_account_path = staticmethod(SpeechClient.common_billing_account_path)
parse_common_billing_account_path = staticmethod(
SpeechClient.parse_common_billing_account_path
)
common_folder_path = staticmethod(SpeechClient.common_folder_path)
parse_common_folder_path = staticmethod(SpeechClient.parse_common_folder_path)
common_organization_path = staticmethod(SpeechClient.common_organization_path)
parse_common_organization_path = staticmethod(
SpeechClient.parse_common_organization_path
)
common_project_path = staticmethod(SpeechClient.common_project_path)
parse_common_project_path = staticmethod(SpeechClient.parse_common_project_path)
common_location_path = staticmethod(SpeechClient.common_location_path)
parse_common_location_path = staticmethod(SpeechClient.parse_common_location_path)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
SpeechAsyncClient: The constructed client.
"""
return SpeechClient.from_service_account_info.__func__(SpeechAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
SpeechAsyncClient: The constructed client.
"""
return SpeechClient.from_service_account_file.__func__(SpeechAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@property
def transport(self) -> SpeechTransport:
"""Returns the transport used by the client instance.
Returns:
SpeechTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(
type(SpeechClient).get_transport_class, type(SpeechClient)
)
def __init__(
self,
*,
credentials: ga_credentials.Credentials = None,
transport: Union[str, SpeechTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the speech client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.SpeechTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = SpeechClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def recognize(
self,
request: cloud_speech.RecognizeRequest = None,
*,
config: cloud_speech.RecognitionConfig = None,
audio: cloud_speech.RecognitionAudio = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> cloud_speech.RecognizeResponse:
r"""Performs synchronous speech recognition: receive
results after all audio has been sent and processed.
Args:
request (:class:`google.cloud.speech_v1.types.RecognizeRequest`):
The request object. The top-level message sent by the
client for the `Recognize` method.
config (:class:`google.cloud.speech_v1.types.RecognitionConfig`):
Required. Provides information to the
recognizer that specifies how to process
the request.
This corresponds to the ``config`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
audio (:class:`google.cloud.speech_v1.types.RecognitionAudio`):
Required. The audio data to be
recognized.
This corresponds to the ``audio`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.speech_v1.types.RecognizeResponse:
The only message returned to the client by the Recognize method. It
contains the result as zero or more sequential
SpeechRecognitionResult messages.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([config, audio])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = cloud_speech.RecognizeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if config is not None:
request.config = config
if audio is not None:
request.audio = audio
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.recognize,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=5000.0,
),
default_timeout=5000.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def long_running_recognize(
self,
request: cloud_speech.LongRunningRecognizeRequest = None,
*,
config: cloud_speech.RecognitionConfig = None,
audio: cloud_speech.RecognitionAudio = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Performs asynchronous speech recognition: receive results via
the google.longrunning.Operations interface. Returns either an
``Operation.error`` or an ``Operation.response`` which contains
a ``LongRunningRecognizeResponse`` message. For more information
on asynchronous speech recognition, see the
`how-to <https://cloud.google.com/speech-to-text/docs/async-recognize>`__.
Args:
request (:class:`google.cloud.speech_v1.types.LongRunningRecognizeRequest`):
The request object. The top-level message sent by the
client for the `LongRunningRecognize` method.
config (:class:`google.cloud.speech_v1.types.RecognitionConfig`):
Required. Provides information to the
recognizer that specifies how to process
the request.
This corresponds to the ``config`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
audio (:class:`google.cloud.speech_v1.types.RecognitionAudio`):
Required. The audio data to be
recognized.
This corresponds to the ``audio`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.speech_v1.types.LongRunningRecognizeResponse` The only message returned to the client by the LongRunningRecognize method.
It contains the result as zero or more sequential
SpeechRecognitionResult messages. It is included in
the result.response field of the Operation returned
by the GetOperation call of the
google::longrunning::Operations service.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([config, audio])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = cloud_speech.LongRunningRecognizeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if config is not None:
request.config = config
if audio is not None:
request.audio = audio
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.long_running_recognize,
default_timeout=5000.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
cloud_speech.LongRunningRecognizeResponse,
metadata_type=cloud_speech.LongRunningRecognizeMetadata,
)
# Done; return the response.
return response
def streaming_recognize(
self,
requests: AsyncIterator[cloud_speech.StreamingRecognizeRequest] = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> Awaitable[AsyncIterable[cloud_speech.StreamingRecognizeResponse]]:
r"""Performs bidirectional streaming speech recognition:
receive results while sending audio. This method is only
available via the gRPC API (not REST).
Args:
requests (AsyncIterator[`google.cloud.speech_v1.types.StreamingRecognizeRequest`]):
The request object AsyncIterator. The top-level message sent by the
client for the `StreamingRecognize` method. Multiple
`StreamingRecognizeRequest` messages are sent. The first
message must contain a `streaming_config` message and
must not contain `audio_content`. All subsequent
messages must contain `audio_content` and must not
contain a `streaming_config` message.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
AsyncIterable[google.cloud.speech_v1.types.StreamingRecognizeResponse]:
StreamingRecognizeResponse is the only message returned to the client by
StreamingRecognize. A series of zero or more
StreamingRecognizeResponse messages are streamed back
to the client. If there is no recognizable audio, and
single_utterance is set to false, then no messages
are streamed back to the client.
Here's an example of a series of
StreamingRecognizeResponses that might be returned
while processing audio:
1. results { alternatives { transcript: "tube" }
stability: 0.01 }
2. results { alternatives { transcript: "to be a" }
stability: 0.01 }
3. results { alternatives { transcript: "to be" }
stability: 0.9 } results { alternatives {
transcript: " or not to be" } stability: 0.01 }
4.
results { alternatives { transcript: "to be or not to be"
confidence: 0.92 }
alternatives { transcript: "to bee or not to bee" }
is_final: true }
5. results { alternatives { transcript: " that's" }
stability: 0.01 }
6. results { alternatives { transcript: " that is" }
stability: 0.9 } results { alternatives {
transcript: " the question" } stability: 0.01 }
7.
results { alternatives { transcript: " that is the question"
confidence: 0.98 }
alternatives { transcript: " that was the question" }
is_final: true }
Notes:
- Only two of the above responses #4 and #7 contain
final results; they are indicated by
is_final: true. Concatenating these together
generates the full transcript: "to be or not to be
that is the question".
- The others contain interim results. #3 and #6
contain two interim \`results`: the first portion
has a high stability and is less likely to change;
the second portion has a low stability and is very
likely to change. A UI designer might choose to
show only high stability results.
- The specific stability and confidence values shown
above are only for illustrative purposes. Actual
values may vary.
-
In each response, only one of these fields will be set:
error, speech_event_type, or one or more
(repeated) results.
"""
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.streaming_recognize,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=5000.0,
),
default_timeout=5000.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Send the request.
response = rpc(requests, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
await self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-speech",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("SpeechAsyncClient",)
|
py | b4019ff0c059692f69d32e3feebd4da7941c4717 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-07-24 20:26
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('gwells', '0005_auto_20180629_0024'),
]
state_operations = [
migrations.RemoveField(
model_name='lithologydescription',
name='activity_submission',
),
migrations.RemoveField(
model_name='lithologydescription',
name='bedrock_material',
),
migrations.RemoveField(
model_name='lithologydescription',
name='bedrock_material_descriptor',
),
migrations.RemoveField(
model_name='lithologydescription',
name='lithology_colour',
),
migrations.RemoveField(
model_name='lithologydescription',
name='lithology_description',
),
migrations.RemoveField(
model_name='lithologydescription',
name='lithology_hardness',
),
migrations.RemoveField(
model_name='lithologydescription',
name='lithology_material',
),
migrations.RemoveField(
model_name='lithologydescription',
name='lithology_moisture',
),
migrations.RemoveField(
model_name='lithologydescription',
name='lithology_structure',
),
migrations.RemoveField(
model_name='lithologydescription',
name='secondary_surficial_material',
),
migrations.RemoveField(
model_name='lithologydescription',
name='surficial_material',
),
migrations.RemoveField(
model_name='lithologydescription',
name='water_bearing_estimated_flow_units',
),
migrations.RemoveField(
model_name='lithologydescription',
name='well_tag_number',
),
migrations.DeleteModel(
name='LithologyDescription',
),
]
operations = [
migrations.SeparateDatabaseAndState(state_operations=state_operations)
]
|
py | b401a01bb7402951e2e777c39293fd9ffc4fd494 | import hashlib
from Crypto.Cipher import AES
from streamlink.compat import is_py3
def evp_bytestokey(password, salt, key_len, iv_len):
"""
Python implementation of OpenSSL's EVP_BytesToKey()
:param password: or passphrase
:param salt: 8 byte salt
:param key_len: length of key in bytes
:param iv_len: length of IV in bytes
:return: (key, iv)
"""
d = d_i = b''
while len(d) < key_len + iv_len:
d_i = hashlib.md5(d_i + password + salt).digest()
d += d_i
return d[:key_len], d[key_len:key_len + iv_len]
def decrypt_openssl(data, passphrase, key_length=32):
if data.startswith(b"Salted__"):
salt = data[len(b"Salted__"):AES.block_size]
key, iv = evp_bytestokey(passphrase, salt, key_length, AES.block_size)
d = AES.new(key, AES.MODE_CBC, iv)
out = d.decrypt(data[AES.block_size:])
return unpad_pkcs5(out)
def unpad_pkcs5(padded):
if is_py3:
return padded[:-padded[-1]]
else:
return padded[:-ord(padded[-1])] |
py | b401a021123f97b140a343f9058d492ebe27a333 | import os
import sys
import random
import math
import numpy as np
import skimage.io
import matplotlib
import matplotlib.pyplot as plt
# Root directory of the project
ROOT_DIR = os.path.abspath("../")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn import utils
import mrcnn.model as modellib
from mrcnn import visualize
# Import COCO config
sys.path.append(os.path.join(ROOT_DIR, "samples/coco/")) # To find local version
import coco
# %matplotlib inline
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
# Local path to trained weights file
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Download COCO trained weights from Releases if needed
if not os.path.exists(COCO_MODEL_PATH):
utils.download_trained_weights(COCO_MODEL_PATH)
# Directory of images to run detection on
IMAGE_DIR = os.path.join(ROOT_DIR, "images")
###################################################################
class InferenceConfig(coco.CocoConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
config = InferenceConfig()
config.display()
###################################################################
# Create model object in inference mode.
model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config)
# Load weights trained on MS-COCO
model.load_weights(COCO_MODEL_PATH, by_name=True)
###################################################################
# COCO Class names
# Index of the class in the list is its ID. For example, to get ID of
# the teddy bear class, use: class_names.index('teddy bear')
class_names = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'traffic light',
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',
'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',
'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',
'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard',
'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',
'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',
'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
'keyboard', 'cell phone', 'microwave', 'oven', 'toaster',
'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',
'teddy bear', 'hair drier', 'toothbrush']
###################################################################
# Load a random image from the images folder
file_names = next(os.walk(IMAGE_DIR))[2]
image = skimage.io.imread(os.path.join(IMAGE_DIR, random.choice(file_names)))
# Run detection
results = model.detect([image], verbose=1)
# Visualize results
r = results[0]
visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],
class_names, r['scores'])
|
py | b401a0359b097df4a4d9ec2ae83817d9cd1cb491 | import logging
from telegram.ext import CommandHandler
from telegram import ChatAction
from bot import markups as rm
from bot import strings as s
from bot import u
logger = logging.getLogger(__name__)
STATUSES_DICT = {
'': 'no active status',
'waiting_pack_title': 'waiting for pack title',
'waiting_pack_name': 'waiting for pack name',
'waiting_pack_first_sticker': 'waiting for the first sticker of the pack',
'adding_stickers': 'waiting for stickers to add',
'adding_waiting_pack_title': 'waiting for the pack title of the target pack so we can add stickers to it',
'removing_stickers': 'waiting for the sticker to remove from its pack',
'exporting_pack_waiting_sticker': 'waiting for a sticker from the pack to export',
'unknown': 'unmapped status',
'waiting_user_transparency': 'waiting to specify the transparency state',
'waiting_addpack_name': 'waiting for a pack name to be added'
}
@u.action(ChatAction.TYPING)
@u.restricted
@u.failwithmessage
def on_cancel_command(bot, update, user_data):
logger.info('%d: /cancel', update.effective_user.id)
user_status = user_data.get('status', 'unknown')
logger.info('resetting status for %d (previous: %s)', update.effective_user.id, user_status)
# reset user status
user_data['status'] = ''
# remove temporary data
user_data.pop('pack', None)
update.message.reply_text(s.CANCEL, reply_markup=rm.HIDE)
@u.action(ChatAction.TYPING)
@u.restricted
@u.failwithmessage
def on_status_command(bot, update, user_data):
logger.info('%d: /status', update.effective_user.id)
user_status = user_data.get('status', 'unknown')
user_status_desc = STATUSES_DICT.get(user_status, user_status)
logger.info('status of %d: %s', update.effective_user.id, user_status)
update.message.reply_text(user_status_desc)
HANDLERS = (
CommandHandler(['cancel', 'c', 'done', 'd'], on_cancel_command, pass_user_data=True),
CommandHandler(['status', 's'], on_status_command, pass_user_data=True)
)
|
py | b401a0609b8baab4ae9246af4b7fedbf728c3056 | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test running bitcoind with -reindex and -reindex-chainstate options.
- Start a single node and generate 3 blocks.
- Stop the node and restart it with -reindex. Verify that the node has reindexed up to block 3.
- Stop the node and restart it with -reindex-chainstate. Verify that the node has reindexed up to block 3.
"""
from test_framework.test_framework import OmegacoinTestFramework
from test_framework.util import assert_equal
import time
class ReindexTest(OmegacoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def reindex(self):
self.nodes[0].generate(3)
blockcount = self.nodes[0].getblockcount()
self.stop_nodes()
extra_args = [["-reindex", "-checkblockindex=1"]]
self.start_nodes(extra_args)
assert_equal(self.nodes[0].getblockcount(), blockcount) # start_node is blocking on reindex
self.log.info("Success")
def run_test(self):
self.reindex()
if __name__ == '__main__':
ReindexTest().main()
|
py | b401a14d3507171f529248e04542064a2f636ae9 | from sklearn2sql_heroku.tests.regression import generic as reg_gen
reg_gen.test_model("GradientBoostingRegressor" , "RandomReg_500" , "mssql")
|
py | b401a2601e35e860824078a8269ceaa1ca483c4c | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""ActiveMaster definition."""
from config_bootstrap import Master
class TryServerANGLE(Master.Master4a):
project_name = 'ANGLE Try Server'
master_port = 21403
slave_port = 31403
master_port_alt = 26403
buildbot_url = 'http://build.chromium.org/p/tryserver.chromium.angle/'
gerrit_host = 'https://chromium-review.googlesource.com'
service_account_file = 'service-account-chromium-tryserver.json'
buildbucket_bucket = 'master.tryserver.chromium.angle'
pubsub_service_account_file = 'service-account-luci-milo.json'
pubsub_topic = 'projects/luci-milo/topics/public-buildbot'
name = 'tryserver.chromium.angle'
|
py | b401a2baf12c3e0fb4341756149329f67ddf1b09 | #!/usr/bin/env python
"""Test script to check for required functionality.
Execute this code at the command line by typing:
python swc-installation-test-2.py
Run the script and follow the instructions it prints at the end.
This script requires at least Python 2.6. You can check the version
of Python that you have installed with 'swc-installation-test-1.py'.
By default, this script will test for all the dependencies your
instructor thinks you need. If you want to test for a different set
of packages, you can list them on the command line. For example:
python swc-installation-test-2.py git virtual-editor
This is useful if the original test told you to install a more recent
version of a particular dependency, and you just want to re-test that
dependency.
"""
from __future__ import print_function # for Python 2.6 compatibility
import distutils.ccompiler as _distutils_ccompiler
import fnmatch as _fnmatch
try: # Python 2.7 and 3.x
import importlib as _importlib
except ImportError: # Python 2.6 and earlier
class _Importlib (object):
"""Minimal workarounds for functions we need
"""
@staticmethod
def import_module(name):
module = __import__(name)
for n in name.split('.')[1:]:
module = getattr(module, n)
return module
_importlib = _Importlib()
import logging as _logging
import os as _os
import platform as _platform
import re as _re
import shlex as _shlex
import subprocess as _subprocess
import sys as _sys
try: # Python 3.x
import urllib.parse as _urllib_parse
except ImportError: # Python 2.x
import urllib as _urllib_parse # for quote()
if not hasattr(_shlex, 'quote'): # Python versions older than 3.3
# Use the undocumented pipes.quote()
import pipes as _pipes
_shlex.quote = _pipes.quote
__version__ = '0.1'
# Comment out any entries you don't need
CHECKS = [
# Shell
'virtual-shell',
# Editors
'virtual-editor',
# Browsers
'virtual-browser',
# Version control
'git',
'hg', # Command line tool
#'mercurial', # Python package
'EasyMercurial',
# Build tools and packaging
'make',
'virtual-pypi-installer',
'setuptools',
#'xcode',
# Testing
'nosetests', # Command line tool
'nose', # Python package
'py.test', # Command line tool
'pytest', # Python package
# SQL
'sqlite3', # Command line tool
'sqlite3-python', # Python package
# Python
'python',
'ipython', # Command line tool
'IPython', # Python package
'argparse', # Useful for utility scripts
'numpy',
'scipy',
'matplotlib',
'pandas',
'sympy',
'Cython',
'networkx',
'mayavi.mlab',
]
CHECKER = {}
_ROOT_PATH = _os.sep
if _platform.system() == 'win32':
_ROOT_PATH = 'c:\\'
class InvalidCheck (KeyError):
def __init__(self, check):
super(InvalidCheck, self).__init__(check)
self.check = check
def __str__(self):
return self.check
class DependencyError (Exception):
_default_url = 'http://software-carpentry.org/setup/'
_setup_urls = { # (system, version, package) glob pairs
('*', '*', 'Cython'): 'http://docs.cython.org/src/quickstart/install.html',
('Linux', '*', 'EasyMercurial'): 'http://easyhg.org/download.html#download-linux',
('Darwin', '*', 'EasyMercurial'): 'http://easyhg.org/download.html#download-mac',
('Windows', '*', 'EasyMercurial'): 'http://easyhg.org/download.html#download-windows',
('*', '*', 'EasyMercurial'): 'http://easyhg.org/download.html',
('*', '*', 'argparse'): 'https://pypi.python.org/pypi/argparse#installation',
('*', '*', 'ash'): 'http://www.in-ulm.de/~mascheck/various/ash/',
('*', '*', 'bash'): 'http://www.gnu.org/software/bash/manual/html_node/Basic-Installation.html#Basic-Installation',
('Linux', '*', 'chromium'): 'http://code.google.com/p/chromium/wiki/LinuxBuildInstructions',
('Darwin', '*', 'chromium'): 'http://code.google.com/p/chromium/wiki/MacBuildInstructions',
('Windows', '*', 'chromium'): 'http://www.chromium.org/developers/how-tos/build-instructions-windows',
('*', '*', 'chromium'): 'http://www.chromium.org/developers/how-tos',
('Windows', '*', 'emacs'): 'http://www.gnu.org/software/emacs/windows/Installing-Emacs.html',
('*', '*', 'emacs'): 'http://www.gnu.org/software/emacs/#Obtaining',
('*', '*', 'firefox'): 'http://www.mozilla.org/en-US/firefox/new/',
('Linux', '*', 'gedit'): 'http://www.linuxfromscratch.org/blfs/view/svn/gnome/gedit.html',
('*', '*', 'git'): 'http://git-scm.com/downloads',
('*', '*', 'google-chrome'): 'https://www.google.com/intl/en/chrome/browser/',
('*', '*', 'hg'): 'http://mercurial.selenic.com/',
('*', '*', 'mercurial'): 'http://mercurial.selenic.com/',
('*', '*', 'IPython'): 'http://ipython.org/install.html',
('*', '*', 'ipython'): 'http://ipython.org/install.html',
('*', '*', 'jinja'): 'http://jinja.pocoo.org/docs/intro/#installation',
('*', '*', 'kate'): 'http://kate-editor.org/get-it/',
('*', '*', 'make'): 'http://www.gnu.org/software/make/',
('Darwin', '*', 'matplotlib'): 'http://matplotlib.org/users/installing.html#building-on-osx',
('Windows', '*', 'matplotlib'): 'http://matplotlib.org/users/installing.html#installing-on-windows',
('*', '*', 'matplotlib'): 'http://matplotlib.org/users/installing.html#installing',
('*', '*', 'mayavi.mlab'): 'http://docs.enthought.com/mayavi/mayavi/installation.html',
('*', '*', 'nano'): 'http://www.nano-editor.org/dist/latest/faq.html#3',
('*', '*', 'networkx'): 'http://networkx.github.com/documentation/latest/install.html#installing',
('*', '*', 'nose'): 'https://nose.readthedocs.org/en/latest/#installation-and-quick-start',
('*', '*', 'nosetests'): 'https://nose.readthedocs.org/en/latest/#installation-and-quick-start',
('*', '*', 'notepad++'): 'http://notepad-plus-plus.org/download/v6.3.html',
('*', '*', 'numpy'): 'http://docs.scipy.org/doc/numpy/user/install.html',
('*', '*', 'pandas'): 'http://pandas.pydata.org/pandas-docs/stable/install.html',
('*', '*', 'pip'): 'http://www.pip-installer.org/en/latest/installing.html',
('*', '*', 'pytest'): 'http://pytest.org/latest/getting-started.html',
('*', '*', 'python'): 'http://www.python.org/download/releases/2.7.3/#download',
('*', '*', 'pyzmq'): 'https://github.com/zeromq/pyzmq/wiki/Building-and-Installing-PyZMQ',
('*', '*', 'py.test'): 'http://pytest.org/latest/getting-started.html',
('Linux', '*', 'scipy'): 'http://www.scipy.org/Installing_SciPy/Linux',
('Darwin', '*', 'scipy'): 'http://www.scipy.org/Installing_SciPy/Mac_OS_X',
('Windows', '*', 'scipy'): 'http://www.scipy.org/Installing_SciPy/Windows',
('*', '*', 'scipy'): 'http://www.scipy.org/Installing_SciPy',
('*', '*', 'setuptools'): 'https://pypi.python.org/pypi/setuptools#installation-instructions',
('*', '*', 'sqlite3'): 'http://www.sqlite.org/download.html',
('*', '*', 'sublime-text'): 'http://www.sublimetext.com/2',
('*', '*', 'sympy'): 'http://docs.sympy.org/dev/install.html',
('Darwin', '*', 'textmate'): 'http://macromates.com/',
('Darwin', '*', 'textwrangler'): 'http://www.barebones.com/products/textwrangler/download.html',
('*', '*', 'tornado'): 'http://www.tornadoweb.org/',
('*', '*', 'vim'): 'http://www.vim.org/download.php',
('Darwin', '*', 'xcode'): 'https://developer.apple.com/xcode/',
('*', '*', 'xemacs'): 'http://www.us.xemacs.org/Install/',
('*', '*', 'zsh'): 'http://www.zsh.org/',
}
def _get_message(self):
return self._message
def _set_message(self, message):
self._message = message
message = property(_get_message, _set_message)
def __init__(self, checker, message, causes=None):
super(DependencyError, self).__init__(message)
self.checker = checker
self.message = message
if causes is None:
causes = []
self.causes = causes
def get_url(self):
system = _platform.system()
version = None
for pversion in (
'linux_distribution',
'mac_ver',
'win32_ver',
):
value = getattr(_platform, pversion)()
if value[0]:
version = value[0]
break
package = self.checker.name
for (s,v,p),url in self._setup_urls.items():
if (_fnmatch.fnmatch(system, s) and
_fnmatch.fnmatch(version, v) and
_fnmatch.fnmatch(package, p)):
return url
return self._default_url
def __str__(self):
url = self.get_url()
lines = [
'check for {0} failed:'.format(self.checker.full_name()),
' ' + self.message,
' For instructions on installing an up-to-date version, see',
' ' + url,
]
if self.causes:
lines.append(' causes:')
for cause in self.causes:
lines.extend(' ' + line for line in str(cause).splitlines())
return '\n'.join(lines)
def check(checks=None):
successes = []
failures = []
if not checks:
checks = CHECKS
for check in checks:
try:
checker = CHECKER[check]
except KeyError as e:
raise InvalidCheck(check)# from e
_sys.stdout.write('check {0}...\t'.format(checker.full_name()))
try:
version = checker.check()
except DependencyError as e:
failures.append(e)
_sys.stdout.write('fail\n')
else:
_sys.stdout.write('pass\n')
successes.append((checker, version))
if successes:
print('\nSuccesses:\n')
for checker,version in successes:
print('{0} {1}'.format(
checker.full_name(),
version or 'unknown'))
if failures:
print('\nFailures:')
printed = []
for failure in failures:
if failure not in printed:
print()
print(failure)
printed.append(failure)
return False
return True
class Dependency (object):
def __init__(self, name, long_name=None, minimum_version=None,
version_delimiter='.', and_dependencies=None,
or_dependencies=None):
self.name = name
self.long_name = long_name or name
self.minimum_version = minimum_version
self.version_delimiter = version_delimiter
if not and_dependencies:
and_dependencies = []
self.and_dependencies = and_dependencies
if not or_dependencies:
or_dependencies = []
self.or_dependencies = or_dependencies
self._check_error = None
def __str__(self):
return '<{0} {1}>'.format(type(self).__name__, self.name)
def full_name(self):
if self.name == self.long_name:
return self.name
else:
return '{0} ({1})'.format(self.long_name, self.name)
def check(self):
if self._check_error:
raise self._check_error
try:
self._check_dependencies()
return self._check()
except DependencyError as e:
self._check_error = e # cache for future calls
raise
def _check_dependencies(self):
for dependency in self.and_dependencies:
if not hasattr(dependency, 'check'):
dependency = CHECKER[dependency]
try:
dependency.check()
except DependencyError as e:
raise DependencyError(
checker=self,
message=(
'some dependencies for {0} were not satisfied'
).format(self.full_name()),
causes=[e])
self.or_pass = None
or_errors = []
for dependency in self.or_dependencies:
if not hasattr(dependency, 'check'):
dependency = CHECKER[dependency]
try:
version = dependency.check()
except DependencyError as e:
or_errors.append(e)
else:
self.or_pass = {
'dependency': dependency,
'version': version,
}
break # no need to test other dependencies
if self.or_dependencies and not self.or_pass:
raise DependencyError(
checker=self,
message=(
'{0} requires at least one of the following dependencies'
).format(self.full_name()),
causes=or_errors)
def _check(self):
version = self._get_version()
parsed_version = None
if hasattr(self, '_get_parsed_version'):
parsed_version = self._get_parsed_version()
if self.minimum_version:
self._check_version(version=version, parsed_version=parsed_version)
return version
def _get_version(self):
raise NotImplementedError(self)
def _minimum_version_string(self):
return self.version_delimiter.join(
str(part) for part in self.minimum_version)
def _check_version(self, version, parsed_version=None):
if not parsed_version:
parsed_version = self._parse_version(version=version)
if not parsed_version or parsed_version < self.minimum_version:
raise DependencyError(
checker=self,
message='outdated version of {0}: {1} (need >= {2})'.format(
self.full_name(), version, self._minimum_version_string()))
def _parse_version(self, version):
if not version:
return None
parsed_version = []
for part in version.split(self.version_delimiter):
try:
parsed_version.append(int(part))
except ValueError as e:
raise DependencyError(
checker=self,
message=(
'unparsable {0!r} in version {1} of {2}, (need >= {3})'
).format(
part, version, self.full_name(),
self._minimum_version_string()))# from e
return tuple(parsed_version)
class PythonDependency (Dependency):
def __init__(self, name='python', long_name='Python version',
minimum_version=(2, 6), **kwargs):
super(PythonDependency, self).__init__(
name=name, long_name=long_name, minimum_version=minimum_version,
**kwargs)
def _get_version(self):
return _sys.version
def _get_parsed_version(self):
return _sys.version_info
CHECKER['python'] = PythonDependency()
class CommandDependency (Dependency):
exe_extension = _distutils_ccompiler.new_compiler().exe_extension
def __init__(self, command, paths=None, version_options=('--version',),
stdin=None, version_regexp=None, version_stream='stdout',
**kwargs):
if 'name' not in kwargs:
kwargs['name'] = command
super(CommandDependency, self).__init__(**kwargs)
self.command = command
self.paths = paths
self.version_options = version_options
self.stdin = None
if not version_regexp:
regexp = r'([\d][\d{0}]*[\d])'.format(self.version_delimiter)
version_regexp = _re.compile(regexp)
self.version_regexp = version_regexp
self.version_stream = version_stream
def _get_command_version_stream(self, command=None, stdin=None,
expect=(0,)):
if command is None:
command = self.command + (self.exe_extension or '')
if not stdin:
stdin = self.stdin
if stdin:
popen_stdin = _subprocess.PIPE
else:
popen_stdin = None
try:
p = _subprocess.Popen(
[command] + list(self.version_options), stdin=popen_stdin,
stdout=_subprocess.PIPE, stderr=_subprocess.PIPE,
universal_newlines=True)
except OSError as e:
raise DependencyError(
checker=self,
message="could not find '{0}' executable".format(command),
)# from e
stdout,stderr = p.communicate(stdin)
status = p.wait()
if status not in expect:
lines = [
"failed to execute: {0} {1}".format(
command,
' '.join(_shlex.quote(arg)
for arg in self.version_options)),
'status: {0}'.format(status),
]
for name,string in [('stdout', stdout), ('stderr', stderr)]:
if string:
lines.extend([name + ':', string])
raise DependencyError(checker=self, message='\n'.join(lines))
for name,string in [('stdout', stdout), ('stderr', stderr)]:
if name == self.version_stream:
if not string:
raise DependencyError(
checker=self,
message='empty version stream on {0} for {1}'.format(
self.version_stream, command))
return string
raise NotImplementedError(self.version_stream)
def _get_version_stream(self, **kwargs):
paths = [self.command + (self.exe_extension or '')]
if self.exe_extension:
paths.append(self.command) # also look at the extension-less path
if self.paths:
paths.extend(self.paths)
or_errors = []
for path in paths:
try:
return self._get_command_version_stream(command=path, **kwargs)
except DependencyError as e:
or_errors.append(e)
raise DependencyError(
checker=self,
message='errors finding {0} version'.format(
self.full_name()),
causes=or_errors)
def _get_version(self):
version_stream = self._get_version_stream()
match = self.version_regexp.search(version_stream)
if not match:
raise DependencyError(
checker=self,
message='no version string in output:\n{0}'.format(
version_stream))
return match.group(1)
def _program_files_paths(*args):
"Utility for generating MS Windows search paths"
pf = _os.environ.get('ProgramFiles', '/usr/bin')
pfx86 = _os.environ.get('ProgramFiles(x86)', pf)
paths = [_os.path.join(pf, *args)]
if pfx86 != pf:
paths.append(_os.path.join(pfx86, *args))
return paths
for command,long_name,minimum_version,paths in [
('sh', 'Bourne Shell', None, None),
('ash', 'Almquist Shell', None, None),
('bash', 'Bourne Again Shell', None, None),
('csh', 'C Shell', None, None),
('ksh', 'KornShell', None, None),
('dash', 'Debian Almquist Shell', None, None),
('tcsh', 'TENEX C Shell', None, None),
('zsh', 'Z Shell', None, None),
('git', 'Git', (1, 7, 0), None),
('hg', 'Mercurial', (2, 0, 0), None),
('EasyMercurial', None, (1, 3), None),
('pip', None, None, None),
('sqlite3', 'SQLite 3', None, None),
('nosetests', 'Nose', (1, 0, 0), None),
('ipython', 'IPython script', (0, 13), None),
('emacs', 'Emacs', None, None),
('xemacs', 'XEmacs', None, None),
('vim', 'Vim', None, None),
('vi', None, None, None),
('nano', 'Nano', None, None),
('gedit', None, None, None),
('kate', 'Kate', None, None),
('notepad++', 'Notepad++', None,
_program_files_paths('Notepad++', 'notepad++.exe')),
('firefox', 'Firefox', None,
_program_files_paths('Mozilla Firefox', 'firefox.exe')),
('google-chrome', 'Google Chrome', None,
_program_files_paths('Google', 'Chrome', 'Application', 'chrome.exe')
),
('chromium', 'Chromium', None, None),
]:
if not long_name:
long_name = command
CHECKER[command] = CommandDependency(
command=command, paths=paths, long_name=long_name,
minimum_version=minimum_version)
del command, long_name, minimum_version, paths # cleanup namespace
class MakeDependency (CommandDependency):
makefile = '\n'.join([
'all:',
'\t@echo "MAKE_VERSION=$(MAKE_VERSION)"',
'\t@echo "MAKE=$(MAKE)"',
'',
])
def _get_version(self):
try:
return super(MakeDependency, self)._get_version()
except DependencyError as e:
version_options = self.version_options
self.version_options = ['-f', '-']
try:
stream = self._get_version_stream(stdin=self.makefile)
info = {}
for line in stream.splitlines():
try:
key,value = line.split('=', 1)
except ValueError as ve:
raise e# from NotImplementedError(stream)
info[key] = value
if info.get('MAKE_VERSION', None):
return info['MAKE_VERSION']
elif info.get('MAKE', None):
return None
raise e
finally:
self.version_options = version_options
CHECKER['make'] = MakeDependency(command='make', minimum_version=None)
class EasyInstallDependency (CommandDependency):
def _get_version(self):
try:
return super(EasyInstallDependency, self)._get_version()
except DependencyError as e:
version_stream = self.version_stream
try:
self.version_stream = 'stderr'
stream = self._get_version_stream(expect=(1,))
if 'option --version not recognized' in stream:
return 'unknown (possibly Setuptools?)'
finally:
self.version_stream = version_stream
CHECKER['easy_install'] = EasyInstallDependency(
command='easy_install', long_name='Setuptools easy_install',
minimum_version=None)
CHECKER['py.test'] = CommandDependency(
command='py.test', version_stream='stderr',
minimum_version=None)
class PathCommandDependency (CommandDependency):
"""A command that doesn't support --version or equivalent options
On some operating systems (e.g. OS X), a command's executable may
be hard to find, or not exist in the PATH. Work around that by
just checking for the existence of a characteristic file or
directory. Since the characteristic path may depend on OS,
installed version, etc., take a list of paths, and succeed if any
of them exists.
"""
def _get_command_version_stream(self, *args, **kwargs):
raise NotImplementedError()
def _get_version_stream(self, *args, **kwargs):
raise NotImplementedError()
def _get_version(self):
for path in self.paths:
if _os.path.exists(path):
return None
raise DependencyError(
checker=self,
message=(
'nothing exists at any of the expected paths for {0}:\n {1}'
).format(
self.full_name(),
'\n '.join(p for p in self.paths)))
for paths,name,long_name in [
([_os.path.join(_ROOT_PATH, 'Applications', 'Sublime Text 2.app')],
'sublime-text', 'Sublime Text'),
([_os.path.join(_ROOT_PATH, 'Applications', 'TextMate.app')],
'textmate', 'TextMate'),
([_os.path.join(_ROOT_PATH, 'Applications', 'TextWrangler.app')],
'textwrangler', 'TextWrangler'),
([_os.path.join(_ROOT_PATH, 'Applications', 'Safari.app')],
'safari', 'Safari'),
([_os.path.join(_ROOT_PATH, 'Applications', 'Xcode.app'), # OS X >=1.7
_os.path.join(_ROOT_PATH, 'Developer', 'Applications', 'Xcode.app'
) # OS X 1.6,
],
'xcode', 'Xcode'),
]:
if not long_name:
long_name = name
CHECKER[name] = PathCommandDependency(
command=None, paths=paths, name=name, long_name=long_name)
del paths, name, long_name # cleanup namespace
class PythonPackageDependency (Dependency):
def __init__(self, package, **kwargs):
if 'name' not in kwargs:
kwargs['name'] = package
if 'and_dependencies' not in kwargs:
kwargs['and_dependencies'] = []
if 'python' not in kwargs['and_dependencies']:
kwargs['and_dependencies'].append('python')
super(PythonPackageDependency, self).__init__(**kwargs)
self.package = package
def _get_version(self):
package = self._get_package(self.package)
return self._get_version_from_package(package)
def _get_package(self, package):
try:
return _importlib.import_module(package)
except ImportError as e:
raise DependencyError(
checker=self,
message="could not import the '{0}' package for {1}".format(
package, self.full_name()),
)# from e
def _get_version_from_package(self, package):
try:
version = package.__version__
except AttributeError:
version = None
return version
for package,name,long_name,minimum_version,and_dependencies in [
('nose', None, 'Nose Python package',
CHECKER['nosetests'].minimum_version, None),
('pytest', None, 'pytest Python package',
CHECKER['py.test'].minimum_version, None),
('jinja2', 'jinja', 'Jinja', (2, 6), None),
('zmq', 'pyzmq', 'PyZMQ', (2, 1, 4), None),
('IPython', None, 'IPython Python package',
CHECKER['ipython'].minimum_version, ['jinja', 'tornado', 'pyzmq']),
('argparse', None, 'Argparse', None, None),
('numpy', None, 'NumPy', None, None),
('scipy', None, 'SciPy', None, None),
('matplotlib', None, 'Matplotlib', None, None),
('pandas', None, 'Pandas', (0, 8), None),
('sympy', None, 'SymPy', None, None),
('Cython', None, None, None, None),
('networkx', None, 'NetworkX', None, None),
('mayavi.mlab', None, 'MayaVi', None, None),
('setuptools', None, 'Setuptools', None, None),
]:
if not name:
name = package
if not long_name:
long_name = name
kwargs = {}
if and_dependencies:
kwargs['and_dependencies'] = and_dependencies
CHECKER[name] = PythonPackageDependency(
package=package, name=name, long_name=long_name,
minimum_version=minimum_version, **kwargs)
# cleanup namespace
del package, name, long_name, minimum_version, and_dependencies, kwargs
class MercurialPythonPackage (PythonPackageDependency):
def _get_version(self):
try: # mercurial >= 1.2
package = _importlib.import_module('mercurial.util')
except ImportError as e: # mercurial <= 1.1.2
package = self._get_package('mercurial.version')
return package.get_version()
else:
return package.version()
CHECKER['mercurial'] = MercurialPythonPackage(
package='mercurial.util', name='mercurial',
long_name='Mercurial Python package',
minimum_version=CHECKER['hg'].minimum_version)
class TornadoPythonPackage (PythonPackageDependency):
def _get_version_from_package(self, package):
return package.version
def _get_parsed_version(self):
package = self._get_package(self.package)
return package.version_info
CHECKER['tornado'] = TornadoPythonPackage(
package='tornado', name='tornado', long_name='Tornado', minimum_version=(2, 0))
class SQLitePythonPackage (PythonPackageDependency):
def _get_version_from_package(self, package):
return _sys.version
def _get_parsed_version(self):
return _sys.version_info
CHECKER['sqlite3-python'] = SQLitePythonPackage(
package='sqlite3', name='sqlite3-python',
long_name='SQLite Python package',
minimum_version=CHECKER['sqlite3'].minimum_version)
class UserTaskDependency (Dependency):
"Prompt the user to complete a task and check for success"
def __init__(self, prompt, **kwargs):
super(UserTaskDependency, self).__init__(**kwargs)
self.prompt = prompt
def _check(self):
if _sys.version_info >= (3, ):
result = input(self.prompt)
else: # Python 2.x
result = raw_input(self.prompt)
return self._check_result(result)
def _check_result(self, result):
raise NotImplementedError()
class EditorTaskDependency (UserTaskDependency):
def __init__(self, **kwargs):
self.path = _os.path.expanduser(_os.path.join(
'~', 'swc-installation-test.txt'))
self.contents = 'Hello, world!'
super(EditorTaskDependency, self).__init__(
prompt=(
'Open your favorite text editor and create the file\n'
' {0}\n'
'containing the line:\n'
' {1}\n'
'Press enter here after you have done this.\n'
'You may remove the file after you have finished testing.'
).format(self.path, self.contents),
**kwargs)
def _check_result(self, result):
message = None
try:
with open(self.path, 'r') as f:
contents = f.read()
except IOError as e:
raise DependencyError(
checker=self,
message='could not open {0!r}: {1}'.format(self.path, e)
)# from e
if contents.strip() != self.contents:
raise DependencyError(
checker=self,
message=(
'file contents ({0!r}) did not match the expected {1!r}'
).format(contents, self.contents))
CHECKER['other-editor'] = EditorTaskDependency(
name='other-editor', long_name='')
class VirtualDependency (Dependency):
def _check(self):
return '{0} {1}'.format(
self.or_pass['dependency'].full_name(),
self.or_pass['version'])
for name,long_name,dependencies in [
('virtual-shell', 'command line shell', (
'bash',
'dash',
'ash',
'zsh',
'ksh',
'csh',
'tcsh',
'sh',
)),
('virtual-editor', 'text/code editor', (
'emacs',
'xemacs',
'vim',
'vi',
'nano',
'gedit',
'kate',
'notepad++',
'sublime-text',
'textmate',
'textwrangler',
'other-editor', # last because it requires user interaction
)),
('virtual-browser', 'web browser', (
'firefox',
'google-chrome',
'chromium',
'safari',
)),
('virtual-pypi-installer', 'PyPI installer', (
'pip',
'easy_install',
)),
]:
CHECKER[name] = VirtualDependency(
name=name, long_name=long_name, or_dependencies=dependencies)
del name, long_name, dependencies # cleanup namespace
def _print_info(key, value, indent=19):
print('{0}{1}: {2}'.format(key, ' '*(indent-len(key)), value))
def print_system_info():
print("If you do not understand why the above failures occurred,")
print("copy and send the *entire* output (all info above and summary")
print("below) to the instructor for help.")
print()
print('==================')
print('System information')
print('==================')
_print_info('os.name', _os.name)
_print_info('os.uname', _platform.uname())
_print_info('platform', _sys.platform)
_print_info('platform+', _platform.platform())
for pversion in (
'linux_distribution',
'mac_ver',
'win32_ver',
):
value = getattr(_platform, pversion)()
if value[0]:
_print_info(pversion, value)
_print_info('prefix', _sys.prefix)
_print_info('exec_prefix', _sys.exec_prefix)
_print_info('executable', _sys.executable)
_print_info('version_info', _sys.version_info)
_print_info('version', _sys.version)
_print_info('environment', '')
for key,value in sorted(_os.environ.items()):
print(' {0}={1}'.format(key, value))
print('==================')
def print_suggestions(instructor_fallback=True):
print()
print('For suggestions on installing missing packages, see')
print('http://software-carpentry.org/setup/')
print('')
print('For instructings on installing a particular package,')
print('see the failure message for that package printed above.')
if instructor_fallback:
print('')
print('For help, email the *entire* output of this script to')
print('your instructor.')
if __name__ == '__main__':
import optparse as _optparse
parser = _optparse.OptionParser(usage='%prog [options] [check...]')
epilog = __doc__
parser.format_epilog = lambda formatter: '\n' + epilog
parser.add_option(
'-v', '--verbose', action='store_true',
help=('print additional information to help troubleshoot '
'installation issues'))
options,args = parser.parse_args()
try:
passed = check(args)
except InvalidCheck as e:
print("I don't know how to check for {0!r}".format(e.check))
print('I do know how to check for:')
for key,checker in sorted(CHECKER.items()):
if checker.long_name != checker.name:
print(' {0} {1}({2})'.format(
key, ' '*(20-len(key)), checker.long_name))
else:
print(' {0}'.format(key))
_sys.exit(1)
if not passed:
if options.verbose:
print()
print_system_info()
print_suggestions(instructor_fallback=True)
_sys.exit(1)
|
py | b401a2d7572f2bee31e63e885c563f3905d2f882 |
def main():
content = open('docs/index.html')
resulting
pages = [
{
"filename": "content/about.html",
"output": "docs/about.html",
"title": "About Me",
},
{
"filename": "content/projects.html",
"output": "docs/project.html",
"title": "Projects",
},
{
"filename": "content/blog.html",
"output": "docs/blog.html",
"title": "Blog",
},
{
"filename": "content/first.html",
"output": "docs/index.html",
"title": "Index",
},
]
for page in pages:
def apply_template(content):
template = open("templates/base.html").read()
return results
def main()
content = open(page["filename"]).read()
resulting_html_for_index = apply_template(content)
finished_page = template.replace("{{content}}", content)
open(page["output"], "w+").write(finished_page)
print('done')
if __name__ == "__main__":
main()
|
py | b401a3b2c363d7e166625e440778879f2459f923 | # SPDX-Copyright: Copyright (c) Capital One Services, LLC
# SPDX-License-Identifier: Apache-2.0
# Copyright 2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility Module
Module which utility functions for use within the application
"""
import gzip
import os
import shutil
import sys
import threading
from collections import OrderedDict
from itertools import cycle
import yaml
from .errors import (
CompressionError,
CredentialsError,
LocopyConcatError,
LocopyIgnoreHeaderError,
LocopySplitError,
)
from .logger import INFO, get_logger
logger = get_logger(__name__, INFO)
def write_file(data, delimiter, filepath, mode="w"):
"""Write data to a file.
Parameters
----------
data : list
List of lists
delimiter : str
Delimiter by which columns will be separated
filepath : str
Location of the output file
mode : str
File writing mode. Examples include 'w' for write or 'a' for append.
Defaults to write mode.
See https://www.tutorialspoint.com/python/python_files_io.htm
"""
logger.debug("Attempting to write data to file: %s", filepath)
try:
with open(filepath, mode) as f:
for row in data:
f.write(delimiter.join([str(r) for r in row]) + "\n")
except Exception as e:
logger.error("Unable to write file to %s due to err: %s", filepath, e)
def compress_file(input_file, output_file):
"""Compresses a file (gzip)
Parameters
----------
input_file : str
Path to input file to compress
output_file : str
Path to write the compressed file
"""
try:
with open(input_file, "rb") as f_in:
with gzip.open(output_file, "wb") as f_out:
logger.info("compressing (gzip): %s to %s", input_file, output_file)
shutil.copyfileobj(f_in, f_out)
except Exception as e:
logger.error("Error compressing the file. err: %s", e)
raise CompressionError("Error compressing the file.")
def compress_file_list(file_list):
"""Compresses a list of files (gzip) and clean up the old files
Parameters
----------
file_list : list
List of strings with the file paths of the files to compress
Returns
-------
list
List of strings with the file paths of the compressed files (original file name with
gz appended)
"""
for i, f in enumerate(file_list):
gz = "{0}.gz".format(f)
compress_file(f, gz)
file_list[i] = gz
os.remove(f) # cleanup old files
return file_list
def split_file(input_file, output_file, splits=1, ignore_header=0):
"""Split a file into equal files by lines.
For example: ``myinputfile.txt`` will be split into ``myoutputfile.txt.01``
, ```myoutputfile.txt.02`` etc..
Parameters
----------
input_file : str
Path to input file to split
output_file : str
Name of the output file
splits : int, optional
Number of splits to perform. Must be greater than zero. Defaults to 1
ignore_header : int, optional
If ``ignore_header`` is > 0 then that number of rows will be removed from the beginning of
the files as they are split. Defaults to 0
Returns
-------
list
List of strings with the file paths of the split files
Raises
------
LocopySplitError
If ``splits`` is less than 1 or some processing error when splitting
"""
if type(splits) is not int or splits <= 0:
logger.error("Number of splits is invalid")
raise LocopySplitError("Number of splits must be greater than zero and an integer.")
if splits == 1:
return [input_file]
try:
pool = list(range(splits))
cpool = cycle(pool)
logger.info("splitting file: %s into %s files", input_file, splits)
# open output file handlers
files = [open("{0}.{1}".format(output_file, x), "wb") for x in pool]
# open input file and send line to different handler
with open(input_file, "rb") as f_in:
# if we have a value in ignore_header then skip those many lines to start
for _ in range(ignore_header):
next(f_in)
for line in f_in:
files[next(cpool)].write(line)
# close file connection
for x in pool:
files[x].close()
return [f.name for f in files]
except Exception as e:
logger.error("Error splitting the file. err: %s", e)
if len(files) > 0:
logger.error("Cleaning up intermediary files: %s", files)
for x in pool:
files[x].close()
os.remove(files[x].name)
raise LocopySplitError("Error splitting the file.")
def concatenate_files(input_list, output_file, remove=True):
"""Concatenate a list of files into one file.
Parameters
----------
input_list : list
List of strings with the paths to input files to concateneate
output_file : str
Path of the output file
remove: bool, optional
Removes the files from the input list if ``True``. Defaults to ``True``
Raises
------
LocopyConcatError
If ``input_list`` or there is a issue while concatenating the files into one
"""
if len(input_list) == 0:
raise LocopyConcatError("Input list is empty.")
try:
with open(output_file, "ab") as main_f:
for f in input_list:
with open(f, "rb") as temp_f:
for line in temp_f:
main_f.write(line)
if remove: # as we go for space consideration
os.remove(f)
except Exception as e:
logger.error("Error concateneating files. err: %s", e)
raise LocopyConcatError("Error concateneating files.")
def read_config_yaml(config_yaml):
"""
Reads a configuration YAML file to populate the database
connection attributes, and validate required ones. Example::
host: my.redshift.cluster.com
port: 5439
dbname: db
user: userid
password: password
Parameters
----------
config_yaml : str or file pointer
String representing the file location of the configuration file, or a
pointer to an open file object
Returns
-------
dict
A dictionary of parameters for setting up a connection to the database.
Raises
------
CredentialsError
If any connection items are missing from the YAML file
"""
try:
if isinstance(config_yaml, str):
with open(config_yaml) as config:
locopy_yaml = yaml.safe_load(config)
else:
locopy_yaml = yaml.safe_load(config_yaml)
except Exception as e:
logger.error("Error reading yaml. err: %s", e)
raise CredentialsError("Error reading yaml.")
return locopy_yaml
# make it more granular, eg. include length
def find_column_type(dataframe):
"""
Find data type of each column from the dataframe.
Following is the list of pandas data types that the function checks and their mapping in sql:
bool -> boolean
datetime64[ns] -> timestamp
M8[ns] -> timestamp
int -> int
float -> float
float object -> float
datetime object -> timestamp
object -> varchar
For all other data types, the column will be mapped to varchar type.
Parameters
----------
dataframe : Pandas dataframe
Returns
-------
dict
A dictionary of columns with their data type
"""
from datetime import datetime, date
import pandas as pd
import re
def validate_date_object(column):
try:
pd.to_datetime(column)
if re.search(r"\d+:\d+:\d+", column.sample(1).to_string(index=False)):
return "timestamp"
else:
return "date"
except (ValueError, TypeError):
return None
def validate_float_object(column):
try:
pd.to_numeric(column)
return "float"
except (ValueError, TypeError):
return None
column_type = []
for column in dataframe.columns:
logger.debug("Checking column: %s", column)
data = dataframe[column].dropna().reset_index(drop=True)
if data.size == 0:
column_type.append("varchar")
elif data.dtype in ["datetime64[ns]", "M8[ns]"]:
column_type.append("timestamp")
elif data.dtype == "bool":
column_type.append("boolean")
elif str(data.dtype).startswith("object"):
data_type = validate_float_object(data) or validate_date_object(data)
if not data_type:
column_type.append("varchar")
else:
column_type.append(data_type)
elif str(data.dtype).startswith("int"):
column_type.append("int")
elif str(data.dtype).startswith("float"):
column_type.append("float")
else:
column_type.append("varchar")
logger.info("Parsing column %s to %s", column, column_type[-1])
return OrderedDict(zip(list(dataframe.columns), column_type))
class ProgressPercentage(object):
"""
ProgressPercentage class is used by the S3Transfer upload_file callback
Please see the following url for more information:
http://boto3.readthedocs.org/en/latest/reference/customizations/s3.html#ref-s3transfer-usage
"""
def __init__(self, filename):
"""
Initiate the ProgressPercentage class, using the base information which
makes up a pipeline
Args:
filename (str): A name of the file which we will monitor the
progress of
"""
self._filename = filename
self._size = float(os.path.getsize(filename))
self._seen_so_far = 0
self._lock = threading.Lock()
def __call__(self, bytes_amount):
# To simplify we'll assume this is hooked up
# to a single filename.
with self._lock:
self._seen_so_far += bytes_amount
percentage = (self._seen_so_far / self._size) * 100
sys.stdout.write(
"\rTransfering [{0}] {1:.2f}%".format("#" * int(percentage / 10), percentage)
)
sys.stdout.flush()
def get_ignoreheader_number(options):
"""
Return the ``number_rows`` from ``IGNOREHEADER [ AS ] number_rows`` This doesn't not validate
that the ``AS`` is valid.
Parameters
----------
options : A list (str) of copy options that should be appended to the COPY
statement.
Returns
-------
int
The ``number_rows`` from ``IGNOREHEADER [ AS ] number_rows``
Raises
------
LocopyIgnoreHeaderError
If more than one IGNOREHEADER is found in the options
"""
ignore = [i for i in options if i.startswith("IGNOREHEADER ")]
if len(ignore) == 0:
return 0
elif len(ignore) == 1:
return int(ignore[0].strip().split(" ")[-1])
else:
raise LocopyIgnoreHeaderError("Found more than one IGNOREHEADER in the options")
|
py | b401a4fa7bfc130cfb188147c5acd0f9e5b07c76 | import Downloading.download
from Downloading.scrapper.ScrapperManager import ScrapperManager
if __name__ == '__main__':
doi = 'https://doi.org/10.1016/j.copsyc.2017.03.030'
doi = '10.1111/j.1529-1006.2007.00032.x'
doi = 'https://doi.org/10.3389/fpsyg.2019.03050'
doi = 'https://doi.org/10.3389/fnint.2011.00057'
the_path = '/home/proxpxd/Desktop/moje_programy/systemowe/Scihub/Documents/10.1210.er.2017-00246.pdf'
the_path = '/home/proxpxd/Desktop/moje_programy/systemowe/Scihub/Documents/10.1016.j.copsyc.2017.03.030.pdf'
the_path = '/home/proxpxd/Desktop/moje_programy/systemowe/Scihub/Documents/10.3389.fpsyg.2019.03050.pdf'
# doi = Downloading.download.get_doi_from_reference(doi)
dois = ['10.1111/j.1529-1006.2007.00032.x', 'https://doi.org/10.3389/fpsyg.2019.03050',
'https://doi.org/10.3389/fnint.2011.00057', 'https://doi.org/10.1016/j.copsyc.2017.03.030',
'10.1210/er.2017-00246']
dois = ['10.1007/BF02734133']
scr = ScrapperManager(doi)
for doi in dois:
doi = Downloading.download.get_doi_from_reference(doi)
print(doi)
scr.set_doi(doi)
scr.scrap_document()
# the_path = scr.scrap_document()
# manager = PdfMiner.PdfMangager.PdfManager(the_path)
# title, authors = manager.get_title_and_author()
# print(title)
# manager.trim_scihub_front_page_if_exists()
# print(convert_pdf_to_string(the_path))
# path = scr.scrap(doi)
# print(path)
# reader = PyPDF2.PdfFileReader(the_path)
# print(reader.documentInfo)
# for page in reader.pages:
# print(page.extractText(), ('\n' * 2) + '*' * 20)
|
py | b401a508bb3c34db718411cbfad01d0fce818149 | #!python3
import argparse
import os
from subprocess import run
def create_experiment(folder, experiment, screen, sbatch, nbr_cpu):
os.chdir(folder)
exp_path = os.getcwd() + '/Experiments/' + experiment.replace(".yaml", "")
os.makedirs(exp_path, exist_ok=True)
os.system('cp {0} {1}/config.yaml'.format(experiment, exp_path))
os.remove(exp_path + "/Snakefile") if os.path.exists(exp_path + "/Snakefile") else None
os.symlink(os.getcwd() + "/Snakefile", exp_path + "/Snakefile")
run_file = exp_path + "/snakeslurm.sh"
with open(run_file, 'w') as w:
w.write("#!/usr/bin/env bash\n")
run_str = 'snakemake '
if sbatch:
run_str += '-j 99 --cluster "sbatch -J {0} -p long -N 1 ' \
'-o {1}/slurm.%x.%j.out -e {1}/slurm.%x.%j.err '.format(experiment, exp_path)
run_str += '--cpus-per-task={params.threads} --mem={params.mem} -t {params.time}"\n'
else:
run_str += "-j {0} --printshellcmds".format(nbr_cpu)
w.write(run_str)
os.system("chmod 755 " + run_file)
cmd = 'cd ' + exp_path + ' && ./snakeslurm.sh'
screen_cmd = 'screen -dmS ' + experiment + ' bash -c "' + cmd + '"'
with open(exp_path + "/screen.sh", 'w') as w:
w.write("#!/usr/bin/env bash\n")
w.write(screen_cmd)
if screen:
print(screen_cmd)
run(screen_cmd, shell=True)
else:
print(cmd)
run(cmd, shell=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-f', '--folder', required=False, type=str, default="Heatmap", dest="folder")
parser.add_argument('-c', '--config', required=False, type=str, default="figure-4A.yaml", dest="experiment")
parser.add_argument('-s', '--screen', required=False, type=bool, default=False, dest="screen")
parser.add_argument('-b', '--sbatch', required=False, type=bool, default=False, dest="sbatch")
parser.add_argument('-j', '--nbr_cpu', required=False, type=int, default=6, dest="nbr_cpu")
args = parser.parse_args()
if args.experiment == "":
import uuid
args.experiment = str(uuid.uuid4())[:8]
create_experiment(args.folder, args.experiment, args.screen, args.sbatch, args.nbr_cpu)
|
py | b401a5f98105708c8acd1afbf127427d27622fc0 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TF general model utils."""
import functools
import inspect
import os
import re
import warnings
from typing import Dict, List, Optional, Union
import h5py
import numpy as np
import tensorflow as tf
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.saving import hdf5_format
from .configuration_utils import PretrainedConfig
from .file_utils import (
DUMMY_INPUTS,
TF2_WEIGHTS_NAME,
WEIGHTS_NAME,
ModelOutput,
cached_path,
hf_bucket_url,
is_remote_url,
)
from .generation_tf_utils import TFGenerationMixin
from .tokenization_utils_base import BatchEncoding
from .utils import logging
logger = logging.get_logger(__name__)
class TFModelUtilsMixin:
"""
A few utilities for :obj:`tf.keras.Model`, to be used as a mixin.
"""
def num_parameters(self, only_trainable: bool = False) -> int:
"""
Get the number of (optionally, trainable) parameters in the model.
Args:
only_trainable (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to return only the number of trainable parameters
Returns:
:obj:`int`: The number of parameters.
"""
if only_trainable:
return int(sum(np.prod(w.shape.as_list()) for w in self.trainable_variables))
else:
return self.count_params()
def keras_serializable(cls):
"""
Decorate a Keras Layer class to support Keras serialization.
This is done by:
1. Adding a :obj:`transformers_config` dict to the Keras config dictionary in :obj:`get_config` (called by Keras at
serialization time.
2. Wrapping :obj:`__init__` to accept that :obj:`transformers_config` dict (passed by Keras at deserialization
time) and convert it to a config object for the actual layer initializer.
3. Registering the class as a custom object in Keras (if the Tensorflow version supports this), so that it does not
need to be supplied in :obj:`custom_objects` in the call to :obj:`tf.keras.models.load_model`.
Args:
cls (a :obj:`tf.keras.layers.Layers subclass`):
Typically a :obj:`TF.MainLayer` class in this project, in general must accept a :obj:`config` argument to
its initializer.
Returns:
The same class object, with modifications for Keras deserialization.
"""
initializer = cls.__init__
config_class = getattr(cls, "config_class", None)
if config_class is None:
raise AttributeError("Must set `config_class` to use @keras_serializable")
@functools.wraps(initializer)
def wrapped_init(self, *args, **kwargs):
config = args[0] if args and isinstance(args[0], PretrainedConfig) else kwargs.pop("config", None)
if isinstance(config, dict):
config = config_class.from_dict(config)
initializer(self, config, *args, **kwargs)
elif isinstance(config, PretrainedConfig):
if len(args) > 0:
initializer(self, *args, **kwargs)
else:
initializer(self, config, *args, **kwargs)
else:
raise ValueError("Must pass either `config` (PretrainedConfig) or `config` (dict)")
self._config = config
self._kwargs = kwargs
cls.__init__ = wrapped_init
if not hasattr(cls, "get_config"):
raise TypeError("Only use @keras_serializable on tf.keras.layers.Layer subclasses")
if hasattr(cls.get_config, "_is_default"):
def get_config(self):
cfg = super(cls, self).get_config()
cfg["config"] = self._config.to_dict()
cfg.update(self._kwargs)
return cfg
cls.get_config = get_config
cls._keras_serializable = True
if hasattr(tf.keras.utils, "register_keras_serializable"):
cls = tf.keras.utils.register_keras_serializable()(cls)
return cls
class TFCausalLanguageModelingLoss:
"""
Loss function suitable for causal language modeling (CLM), that is, the task of guessing the next token.
.. note::
Any label of -100 will be ignored (along with the corresponding logits) in the loss computation.
"""
def compute_loss(self, labels, logits):
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE
)
# make sure only labels that are not equal to -100 do not affect loss
active_loss = tf.not_equal(tf.reshape(labels, (-1,)), -100)
reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss)
labels = tf.boolean_mask(tf.reshape(labels, (-1,)), active_loss)
return loss_fn(labels, reduced_logits)
class TFQuestionAnsweringLoss:
"""
Loss function suitable for question answering.
"""
def compute_loss(self, labels, logits):
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE
)
start_loss = loss_fn(labels["start_position"], logits[0])
end_loss = loss_fn(labels["end_position"], logits[1])
return (start_loss + end_loss) / 2.0
class TFTokenClassificationLoss:
"""
Loss function suitable for token classification.
.. note::
Any label of -100 will be ignored (along with the corresponding logits) in the loss computation.
"""
def compute_loss(self, labels, logits):
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE
)
# make sure only labels that are not equal to -100
# are taken into account as loss
if tf.math.reduce_any(labels == -1):
warnings.warn("Using `-1` to mask the loss for the token is deprecated. Please use `-100` instead.")
active_loss = tf.reshape(labels, (-1,)) != -1
else:
active_loss = tf.reshape(labels, (-1,)) != -100
reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss)
labels = tf.boolean_mask(tf.reshape(labels, (-1,)), active_loss)
return loss_fn(labels, reduced_logits)
class TFSequenceClassificationLoss:
"""
Loss function suitable for sequence classification.
"""
def compute_loss(self, labels, logits):
if len(shape_list(logits)) == 1 or shape_list(logits)[1] == 1:
loss_fn = tf.keras.losses.MeanSquaredError(reduction=tf.keras.losses.Reduction.NONE)
else:
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE
)
return loss_fn(labels, logits)
class TFMultipleChoiceLoss(TFSequenceClassificationLoss):
"""Loss function suitable for multiple choice tasks."""
class TFMaskedLanguageModelingLoss(TFCausalLanguageModelingLoss):
"""
Loss function suitable for masked language modeling (MLM), that is, the task of guessing the masked tokens.
.. note::
Any label of -100 will be ignored (along with the corresponding logits) in the loss computation.
"""
class TFNextSentencePredictionLoss:
"""
Loss function suitable for next sentence prediction (NSP), that is, the task of guessing the next sentence.
.. note::
Any label of -100 will be ignored (along with the corresponding logits) in the loss computation.
"""
def compute_loss(self, labels, logits):
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE
)
# make sure only labels that are not equal to -100
# are taken into account as loss
next_sentence_active_loss = tf.not_equal(tf.reshape(labels, (-1,)), -100)
next_sentence_reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, 2)), next_sentence_active_loss)
next_sentence_label = tf.boolean_mask(tf.reshape(labels, (-1,)), next_sentence_active_loss)
return loss_fn(next_sentence_label, next_sentence_reduced_logits)
def booleans_processing(config, **kwargs):
"""
Process the input booleans of each model in order to be sure they are compliant with the execution mode (eager or
graph)
Args:
config (:class:`~transformers.PretrainedConfig`):
The config of the running model.
**kwargs:
The boolean parameters
Returns:
A dictionary with the proper values for each boolean
"""
final_booleans = {}
if tf.executing_eagerly():
final_booleans["output_attentions"] = (
kwargs["output_attentions"] if kwargs["output_attentions"] is not None else config.output_attentions
)
final_booleans["output_hidden_states"] = (
kwargs["output_hidden_states"]
if kwargs["output_hidden_states"] is not None
else config.output_hidden_states
)
if "return_dict" in kwargs:
final_booleans["return_dict"] = (
kwargs["return_dict"] if kwargs["return_dict"] is not None else config.return_dict
)
if "use_cache" in kwargs:
final_booleans["use_cache"] = kwargs["use_cache"] if kwargs["use_cache"] is not None else config.use_cache
else:
if (
kwargs["output_attentions"] is not None
or kwargs["output_hidden_states"] is not None
or ("use_cache" in kwargs and kwargs["use_cache"] is not None)
):
logger.warning(
"The parameters `output_attentions`, `output_hidden_states` and `use_cache` cannot be updated when calling a model."
"They have to be set to True/False in the config object (i.e.: `config=XConfig.from_pretrained('name', output_attentions=True)`)."
)
final_booleans["output_attentions"] = config.output_attentions
final_booleans["output_hidden_states"] = config.output_hidden_states
if "return_dict" in kwargs:
if kwargs["return_dict"] is not None:
logger.warning(
"The parameter `return_dict` cannot be set in graph mode and will always be set to `True`."
)
final_booleans["return_dict"] = True
if "use_cache" in kwargs:
final_booleans["use_cache"] = config.use_cache
return final_booleans
def input_processing(func, config, input_ids, **kwargs):
"""
Process the input of each TensorFlow model including the booleans. In case of a list of symbolic inputs, each input
has to be named accordingly to the parameters name, i.e. `input_ids = tf.keras.Input(shape=(128,), dtype='int32',
name="input_ids")` otherwise the order of the tensors will not be guaranteed during the training.
Args:
func (:obj:`callable`):
The callable function of the TensorFlow model.
config (:class:`~transformers.PretrainedConfig`):
The config of the running model.
**kwargs:
The inputs of the model.
Returns:
Two lists, one for the missing layers, and another one for the unexpected layers.
"""
signature = dict(inspect.signature(func).parameters)
signature.pop("kwargs", None)
parameter_names = list(signature.keys())
output = {}
allowed_types = (tf.Tensor, bool, int, ModelOutput, tuple, list, dict)
if "inputs" in kwargs["kwargs_call"]:
warnings.warn(
"The `inputs` argument is deprecated and will be removed in a future version, use `input_ids` instead.",
FutureWarning,
)
output["input_ids"] = kwargs["kwargs_call"].pop("inputs")
if "decoder_cached_states" in kwargs["kwargs_call"]:
warnings.warn(
"The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
FutureWarning,
)
output["past_key_values"] = kwargs["kwargs_call"].pop("decoder_cached_states")
if len(kwargs["kwargs_call"]) > 0:
raise ValueError(
f"The following keyword arguments are not supported by this model: {list(kwargs['kwargs_call'].keys())}."
)
for k, v in kwargs.items():
if isinstance(v, allowed_types) or v is None:
output[k] = v
else:
raise ValueError(f"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.")
if isinstance(input_ids, (tuple, list)):
for i, input in enumerate(input_ids):
# EagerTensors don't allow to use the .name property so we check for a real Tensor
if type(input) == tf.Tensor:
# Tensor names have always the pattern name:device_id then we check only the
# name and not the device id
tensor_name = input.name.split(":")[0]
if tensor_name in parameter_names:
output[tensor_name] = input
else:
output[parameter_names[i]] = input
elif isinstance(input, allowed_types) or input is None:
output[parameter_names[i]] = input
else:
raise ValueError(
f"Data of type {type(input)} is not allowed only {allowed_types} is accepted for {parameter_names[i]}."
)
elif isinstance(input_ids, (dict, BatchEncoding)):
if "inputs" in input_ids:
warnings.warn(
"The `inputs` argument is deprecated and will be removed in a future version, use `input_ids` instead.",
FutureWarning,
)
output["input_ids"] = input_ids.pop("inputs")
if "decoder_cached_states" in input_ids:
warnings.warn(
"The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
FutureWarning,
)
output["past_key_values"] = input_ids.pop("decoder_cached_states")
for k, v in dict(input_ids).items():
if isinstance(v, allowed_types) or v is None:
output[k] = v
elif k not in parameter_names and "args" not in parameter_names:
logger.warn(
f"The parameter {k} does not belongs to the parameter list {parameter_names} and will be ignored."
)
continue
else:
raise ValueError(f"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.")
else:
if isinstance(input_ids, tf.Tensor) or input_ids is None:
output[parameter_names[0]] = input_ids
else:
raise ValueError(
f"Data of type {type(input_ids)} is not allowed only {allowed_types} is accepted for {parameter_names[0]}."
)
for name in parameter_names:
if name not in list(output.keys()) and name != "args":
output[name] = kwargs.pop(name, signature[name].default)
# When creating a SavedModel TF calls the method with LayerCall.__call__(args, **kwargs)
# So to respect the proper output we have to add this exception
if "args" in output:
if output["args"] is not None and type(output["args"]) == tf.Tensor:
tensor_name = output["args"].name.split(":")[0]
output[tensor_name] = output["args"]
else:
# `args` in this case is always the first parameter, then `input_ids`
output["input_ids"] = output["args"]
del output["args"]
if "kwargs" in output:
del output["kwargs"]
boolean_dict = {
k: v
for k, v in output.items()
if k in ["return_dict", "output_attentions", "output_hidden_states", "use_cache"]
}
output.update(
booleans_processing(
config=config,
**boolean_dict,
)
)
return output
def load_tf_weights(model, resolved_archive_file):
"""
Detect missing and unexpected layers and load the TF weights accordingly to their names and shapes.
Args:
model (:obj:`tf.keras.models.Model`):
The model to load the weights into.
resolved_archive_file (:obj:`str`):
The location of the H5 file.
Returns:
Two lists, one for the missing layers, and another one for the unexpected layers.
"""
missing_layers = []
unexpected_layers = []
# Read the H5 file
with h5py.File(resolved_archive_file, "r") as f:
# Retrieve the name of each layer from the H5 file
saved_h5_model_layers_name = set(hdf5_format.load_attributes_from_hdf5_group(f, "layer_names"))
# Find the missing layers from the high level list of layers
missing_layers = list(set([layer.name for layer in model.layers]) - saved_h5_model_layers_name)
# Find the unexpected layers from the high level list of layers
unexpected_layers = list(saved_h5_model_layers_name - set([layer.name for layer in model.layers]))
saved_weight_names_set = set()
symbolic_weights_names = set()
weight_value_tuples = []
# Compute missing and unexpected sub layers
# Store the weights in list of tuples that looks like [(weight_object, value_of_weight),...]
for layer in model.layers:
# if layer_name from the H5 file belongs to the layers from the instantiated model
if layer.name in saved_h5_model_layers_name:
# Get the H5 layer object from its name
h5_layer_object = f[layer.name]
# Get all the weights as a list from the layer object
symbolic_weights = layer.trainable_weights + layer.non_trainable_weights
saved_weights = {}
# Create a dict from the H5 saved model that looks like {"weight_name": weight_value}
# And a set with only the names
for weight_name in hdf5_format.load_attributes_from_hdf5_group(h5_layer_object, "weight_names"):
# TF names always start with the model name so we ignore it
name = "/".join(weight_name.split("/")[1:])
saved_weights[name] = np.asarray(h5_layer_object[weight_name])
# Add the updated name to the final list for computing missing/unexpected values
saved_weight_names_set.add(name)
# Loop over each weights from the instantiated model and compare with the weights from the H5 file
for symbolic_weight in symbolic_weights:
# TF names always start with the model name so we ignore it
symbolic_weight_name = "/".join(symbolic_weight.name.split("/")[1:])
# here we check if the current weight is among the weights from the H5 file
# If yes, get the weight_value of the corresponding weight from the H5 file
# If not, make the value to None
saved_weight_value = saved_weights.get(symbolic_weight_name, None)
# Add the updated name to the final list for computing missing/unexpected values
symbolic_weights_names.add(symbolic_weight_name)
# If the current weight is found
if saved_weight_value is not None:
# Check if the shape of the current weight and the one from the H5 file are different
if K.int_shape(symbolic_weight) != saved_weight_value.shape:
# If yes we reshape the weight from the H5 file accordingly to the current weight
# If the two shapes are not compatible we raise an issue
try:
array = np.reshape(saved_weight_value, K.int_shape(symbolic_weight))
except AssertionError as e:
e.args += (K.int_shape(symbolic_weight), saved_weight_value.shape)
raise e
else:
array = saved_weight_value
# We create the tuple that will be loaded and add it to the final list
weight_value_tuples.append((symbolic_weight, array))
# Load all the weights
K.batch_set_value(weight_value_tuples)
# Compute the missing and unexpected layers
missing_layers.extend(list(symbolic_weights_names - saved_weight_names_set))
unexpected_layers.extend(list(saved_weight_names_set - symbolic_weights_names))
return missing_layers, unexpected_layers
class TFPreTrainedModel(tf.keras.Model, TFModelUtilsMixin, TFGenerationMixin):
r"""
Base class for all TF models.
:class:`~transformers.TFPreTrainedModel` takes care of storing the configuration of the models and handles methods
for loading, downloading and saving models as well as a few methods common to all models to:
* resize the input embeddings,
* prune heads in the self-attention heads.
Class attributes (overridden by derived classes):
- **config_class** (:class:`~transformers.PretrainedConfig`) -- A subclass of
:class:`~transformers.PretrainedConfig` to use as configuration class for this model architecture.
- **base_model_prefix** (:obj:`str`) -- A string indicating the attribute associated to the base model in
derived classes of the same architecture adding modules on top of the base model.
"""
config_class = None
base_model_prefix = ""
# a list of re pattern of tensor names to ignore from the model when loading the model weights
# (and avoid unnecessary warnings).
_keys_to_ignore_on_load_missing = None
# a list of re pattern of tensor names to ignore from the weights when loading the model weights
# (and avoid unnecessary warnings).
_keys_to_ignore_on_load_unexpected = None
@property
def dummy_inputs(self) -> Dict[str, tf.Tensor]:
"""
Dummy inputs to build the network.
Returns:
:obj:`Dict[str, tf.Tensor]`: The dummy inputs.
"""
return {"input_ids": tf.constant(DUMMY_INPUTS)}
def __init__(self, config, *inputs, **kwargs):
super().__init__(*inputs, **kwargs)
if not isinstance(config, PretrainedConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `PretrainedConfig`. "
"To create a model from a pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
)
)
# Save config and origin of the pretrained weights if given in model
self.config = config
self.name_or_path = config.name_or_path
def get_input_embeddings(self) -> tf.keras.layers.Layer:
"""
Returns the model's input embeddings.
Returns:
:obj:`tf.keras.layers.Layer`: A torch module mapping vocabulary to hidden states.
"""
base_model = getattr(self, self.base_model_prefix, self)
if base_model is not self:
return base_model.get_input_embeddings()
else:
raise NotImplementedError
def set_input_embeddings(self, value):
"""
Set model's input embeddings.
Args:
value (:obj:`tf.keras.layers.Layer`):
A module mapping vocabulary to hidden states.
"""
base_model = getattr(self, self.base_model_prefix, self)
if base_model is not self:
base_model.set_input_embeddings(value)
else:
raise NotImplementedError
def get_output_embeddings(self) -> tf.keras.layers.Layer:
"""
Returns the model's output embeddings
Returns:
:obj:`tf.keras.layers.Layer`: A torch module mapping hidden states to vocabulary.
"""
return None # Overwrite for models with output embeddings
def get_output_layer_with_bias(self) -> Union[None, tf.keras.layers.Layer]:
"""
Get the layer that handles a bias attribute in case the model has an LM head with weights tied to the
embeddings.
Return:
:obj:`tf.keras.layers.Layer`: The layer that handles the bias, None if not an LM model.
"""
return None
def get_prefix_bias_name(self) -> Union[None, str]:
"""
Get the concatenated prefix name of the bias from the model name to the parent layer.
Return:
:obj:`str`: The prefix name of the bias.
"""
return None
def resize_token_embeddings(self, new_num_tokens=None) -> tf.Variable:
"""
Resizes input token embeddings matrix of the model if :obj:`new_num_tokens != config.vocab_size`.
Takes care of tying weights embeddings afterwards if the model class has a :obj:`tie_weights()` method.
Arguments:
new_num_tokens (:obj:`int`, `optional`):
The number of new tokens in the embedding matrix. Increasing the size will add newly initialized
vectors at the end. Reducing the size will remove vectors from the end. If not provided or :obj:`None`,
just returns a pointer to the input tokens :obj:`tf.Variable` module of the model without doing
anything.
Return:
:obj:`tf.Variable`: Pointer to the input tokens Embeddings Module of the model.
"""
model_embeds = self._resize_token_embeddings(new_num_tokens)
if new_num_tokens is None:
return model_embeds
return model_embeds
def _resize_token_embeddings(self, new_num_tokens):
# get_input_embeddings and set_input_embeddings need to be implemented in base layer.
base_model = getattr(self, self.base_model_prefix, self)
old_embeddings = base_model.get_input_embeddings()
new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)
base_model.set_input_embeddings(new_embeddings)
# Update base model and current model config
self.config.vocab_size = new_num_tokens
base_model.vocab_size = new_num_tokens
return base_model.get_input_embeddings()
def _get_word_embeddings(self, embeddings):
if hasattr(embeddings, "word_embeddings"):
# TFBertEmbeddings, TFAlbertEmbeddings, TFElectraEmbeddings
return embeddings.word_embeddings
elif hasattr(embeddings, "weight"):
# TFSharedEmbeddings
return embeddings.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
embeddings.build([])
if hasattr(embeddings, "word_embeddings"):
# TFBertEmbeddings, TFAlbertEmbeddings, TFElectraEmbeddings
return embeddings.word_embeddings
elif hasattr(embeddings, "weight"):
# TFSharedEmbeddings
return embeddings.weight
else:
raise ValueError("word embedding is not defined.")
def _get_resized_embeddings(self, old_embeddings, new_num_tokens=None) -> tf.Variable:
"""
Build a resized Embedding Module from a provided token Embedding Module. Increasing the size will add newly
initialized vectors at the end. Reducing the size will remove vectors from the end
Args:
old_embeddings (:obj:`tf.Variable`):
Old embeddings to be resized.
new_num_tokens (:obj:`int`, `optional`):
New number of tokens in the embedding matrix.
Increasing the size will add newly initialized vectors at the end. Reducing the size will remove
vectors from the end. If not provided or :obj:`None`, just returns a pointer to the input tokens
:obj:`tf.Variable`` module of the model without doing anything.
Return:
:obj:`tf.Variable`: Pointer to the resized Embedding Module or the old Embedding Module if
:obj:`new_num_tokens` is :obj:`None`
"""
word_embeddings = self._get_word_embeddings(old_embeddings)
bias_layer = self.get_output_layer_with_bias()
if new_num_tokens is None:
return word_embeddings
old_num_tokens, old_embedding_dim = word_embeddings.shape
if old_num_tokens == new_num_tokens:
return word_embeddings
# initialize new embeddings
# todo: initializer range is not always passed in config.
init_range = getattr(self.config, "initializer_range", 0.02)
name = (
self.name
+ "/"
+ self.base_model_prefix
+ "/"
+ old_embeddings.name
+ "/"
+ word_embeddings.name.split(":")[0]
)
new_embeddings = self.add_weight(
name=name,
shape=[new_num_tokens, old_embedding_dim],
initializer=get_initializer(init_range),
dtype=tf.float32,
)
init_weights = tf.make_ndarray(tf.make_tensor_proto(new_embeddings.value()))
# Copy token embeddings from the previous weights
num_tokens_to_copy = min(old_num_tokens, new_num_tokens)
init_weights[:num_tokens_to_copy] = word_embeddings.value()[:num_tokens_to_copy, :]
new_embeddings.assign(init_weights)
if bias_layer is not None:
if not hasattr(bias_layer, "bias"):
bias_layer.build([])
# Second check in order to be sure the attribute has been properly created
if not hasattr(bias_layer, "bias"):
raise ValueError("bias is not defined.")
# initialize bias
init_bias = np.zeros((new_num_tokens,))
init_bias[:num_tokens_to_copy] = bias_layer.bias.value()[
:num_tokens_to_copy
] # tf.make_ndarray(tf.make_tensor_proto(bias_layer.bias.value()))[:num_tokens_to_copy]
bias_layer.bias = self.add_weight(
shape=(new_num_tokens,),
initializer="zeros",
trainable=True,
name=self.get_prefix_bias_name() + "/bias",
)
bias_layer.bias.assign(init_bias)
output_embeddings = self.get_output_embeddings()
if output_embeddings is not None:
if self.get_input_embeddings() != output_embeddings:
if not hasattr(output_embeddings, "decoder"):
output_embeddings.build([])
# Second check in order to be sure the attribute has been properly created
if not hasattr(output_embeddings, "decoder"):
raise ValueError("decoder is not defined.")
# initialize decoder
init_weights = np.zeros((new_num_tokens, old_embedding_dim))
init_weights[:num_tokens_to_copy] = output_embeddings.decoder.value()[:num_tokens_to_copy, :]
output_embeddings.decoder = self.add_weight(
shape=(new_num_tokens, old_embedding_dim),
initializer="zeros",
trainable=True,
name=self.get_prefix_bias_name() + "/decoder/weight",
)
output_embeddings.decoder.assign(init_weights)
return new_embeddings
def prune_heads(self, heads_to_prune):
"""
Prunes heads of the base model.
Arguments:
heads_to_prune (:obj:`Dict[int, List[int]]`):
Dictionary with keys being selected layer indices (:obj:`int`) and associated values being the list of
heads to prune in said layer (list of :obj:`int`). For instance {1: [0, 2], 2: [2, 3]} will prune heads
0 and 2 on layer 1 and heads 2 and 3 on layer 2.
"""
raise NotImplementedError
def save_pretrained(self, save_directory):
"""
Save a model and its configuration file to a directory, so that it can be re-loaded using the
:func:`~transformers.TFPreTrainedModel.from_pretrained` class method.
Arguments:
save_directory (:obj:`str`):
Directory to which to save. Will be created if it doesn't exist.
"""
if os.path.isfile(save_directory):
logger.error("Provided path ({}) should be a directory, not a file".format(save_directory))
return
os.makedirs(save_directory, exist_ok=True)
# Save configuration file
self.config.save_pretrained(save_directory)
# If we save using the predefined names, we can load using `from_pretrained`
output_model_file = os.path.join(save_directory, TF2_WEIGHTS_NAME)
self.save_weights(output_model_file)
logger.info("Model weights saved in {}".format(output_model_file))
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""
Instantiate a pretrained TF 2.0 model from a pre-trained model configuration.
The warning `Weights from XXX not initialized from pretrained model` means that the weights of XXX do not come
pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning
task.
The warning `Weights from XXX not used in YYY` means that the layer XXX is not used by YYY, therefore those
weights are discarded.
Parameters:
pretrained_model_name_or_path (:obj:`str`, `optional`):
Can be either:
- A string, the `model id` of a pretrained model hosted inside a model repo on huggingface.co.
Valid model ids can be located at the root-level, like ``bert-base-uncased``, or namespaced under
a user or organization name, like ``dbmdz/bert-base-german-cased``.
- A path to a `directory` containing model weights saved using
:func:`~transformersTF.PreTrainedModel.save_pretrained`, e.g., ``./my_model_directory/``.
- A path or url to a `PyTorch state_dict save file` (e.g, ``./pt_model/pytorch_model.bin``). In
this case, ``from_pt`` should be set to :obj:`True` and a configuration object should be provided
as ``config`` argument. This loading path is slower than converting the PyTorch model in a
TensorFlow model using the provided conversion scripts and loading the TensorFlow model
afterwards.
- :obj:`None` if you are both providing the configuration and state dictionary (resp. with keyword
arguments ``config`` and ``state_dict``).
model_args (sequence of positional arguments, `optional`):
All remaning positional arguments will be passed to the underlying model's ``__init__`` method.
config (:obj:`Union[PretrainedConfig, str]`, `optional`):
Can be either:
- an instance of a class derived from :class:`~transformers.PretrainedConfig`,
- a string valid as input to :func:`~transformers.PretrainedConfig.from_pretrained`.
Configuration for the model to use instead of an automatically loaded configuation. Configuration can
be automatically loaded when:
- The model is a model provided by the library (loaded with the `model id` string of a pretrained
model).
- The model was saved using :func:`~transformers.TFPreTrainedModel.save_pretrained` and is reloaded
by supplying the save directory.
- The model is loaded by supplying a local directory as ``pretrained_model_name_or_path`` and a
configuration JSON file named `config.json` is found in the directory.
from_pt: (:obj:`bool`, `optional`, defaults to :obj:`False`):
Load the model weights from a PyTorch state_dict save file (see docstring of
``pretrained_model_name_or_path`` argument).
cache_dir (:obj:`str`, `optional`):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
force_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
cached versions if they exist.
resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to delete incompletely received files. Will attempt to resume the download if such a
file exists.
proxies: (:obj:`Dict[str, str], `optional`):
A dictionary of proxy servers to use by protocol or endpoint, e.g., :obj:`{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
output_loading_info(:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.
local_files_only(:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to only look at local files (e.g., not try doanloading the model).
use_auth_token (:obj:`str` or `bool`, `optional`):
The token to use as HTTP bearer authorization for remote files. If :obj:`True`, will use the token
generated when running :obj:`transformers-cli login` (stored in :obj:`~/.huggingface`).
revision(:obj:`str`, `optional`, defaults to :obj:`"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so ``revision`` can be any
identifier allowed by git.
mirror(:obj:`str`, `optional`, defaults to :obj:`None`):
Mirror source to accelerate downloads in China. If you are from China and have an accessibility
problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety.
Please refer to the mirror site for more information.
kwargs (remaining dictionary of keyword arguments, `optional`):
Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
:obj:`output_attentions=True`). Behaves differently depending on whether a ``config`` is provided or
automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the
underlying model's ``__init__`` method (we assume all relevant updates to the configuration have
already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class
initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of
``kwargs`` that corresponds to a configuration attribute will be used to override said attribute
with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration
attribute will be passed to the underlying model's ``__init__`` function.
.. note::
Passing :obj:`use_auth_token=True` is required when you want to use a private model.
Examples::
>>> from transformers import BertConfig, TFBertModel
>>> # Download model and configuration from huggingface.co and cache.
>>> model = TFBertModel.from_pretrained('bert-base-uncased')
>>> # Model was saved using `save_pretrained('./test/saved_model/')` (for example purposes, not runnable).
>>> model = TFBertModel.from_pretrained('./test/saved_model/')
>>> # Update configuration during loading.
>>> model = TFBertModel.from_pretrained('bert-base-uncased', output_attentions=True)
>>> assert model.config.output_attentions == True
>>> # Loading from a Pytorch model file instead of a TensorFlow checkpoint (slower, for example purposes, not runnable).
>>> config = BertConfig.from_json_file('./pt_model/my_pt_model_config.json')
>>> model = TFBertModel.from_pretrained('./pt_model/my_pytorch_model.bin', from_pt=True, config=config)
"""
config = kwargs.pop("config", None)
cache_dir = kwargs.pop("cache_dir", None)
from_pt = kwargs.pop("from_pt", False)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
output_loading_info = kwargs.pop("output_loading_info", False)
local_files_only = kwargs.pop("local_files_only", False)
use_auth_token = kwargs.pop("use_auth_token", None)
revision = kwargs.pop("revision", None)
mirror = kwargs.pop("mirror", None)
# Load config if we don't provide a configuration
if not isinstance(config, PretrainedConfig):
config_path = config if config is not None else pretrained_model_name_or_path
config, model_kwargs = cls.config_class.from_pretrained(
config_path,
*model_args,
cache_dir=cache_dir,
return_unused_kwargs=True,
force_download=force_download,
resume_download=resume_download,
proxies=proxies,
local_files_only=local_files_only,
use_auth_token=use_auth_token,
revision=revision,
**kwargs,
)
else:
model_kwargs = kwargs
# Load model
if pretrained_model_name_or_path is not None:
if os.path.isdir(pretrained_model_name_or_path):
if from_pt and os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)):
# Load from a PyTorch checkpoint in priority if from_pt
archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
elif os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)):
# Load from a TF 2.0 checkpoint
archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)
else:
raise EnvironmentError(
"Error no file named {} found in directory {} or `from_pt` set to False".format(
[WEIGHTS_NAME, TF2_WEIGHTS_NAME], pretrained_model_name_or_path
)
)
elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
archive_file = pretrained_model_name_or_path
elif os.path.isfile(pretrained_model_name_or_path + ".index"):
archive_file = pretrained_model_name_or_path + ".index"
else:
archive_file = hf_bucket_url(
pretrained_model_name_or_path,
filename=(WEIGHTS_NAME if from_pt else TF2_WEIGHTS_NAME),
revision=revision,
mirror=mirror,
)
try:
# Load from URL or cache if already cached
resolved_archive_file = cached_path(
archive_file,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
use_auth_token=use_auth_token,
)
except EnvironmentError as err:
logger.error(err)
msg = (
f"Can't load weights for '{pretrained_model_name_or_path}'. Make sure that:\n\n"
f"- '{pretrained_model_name_or_path}' is a correct model identifier listed on 'https://huggingface.co/models'\n\n"
f"- or '{pretrained_model_name_or_path}' is the correct path to a directory containing a file named one of {TF2_WEIGHTS_NAME}, {WEIGHTS_NAME}.\n\n"
)
raise EnvironmentError(msg)
if resolved_archive_file == archive_file:
logger.info("loading weights file {}".format(archive_file))
else:
logger.info("loading weights file {} from cache at {}".format(archive_file, resolved_archive_file))
else:
resolved_archive_file = None
config.name_or_path = pretrained_model_name_or_path
# Instantiate model.
model = cls(config, *model_args, **model_kwargs)
if from_pt:
from .modeling_tf_pytorch_utils import load_pytorch_checkpoint_in_tf2_model
# Load from a PyTorch checkpoint
return load_pytorch_checkpoint_in_tf2_model(model, resolved_archive_file, allow_missing_keys=True)
model(model.dummy_inputs, training=False) # build the network with dummy inputs
assert os.path.isfile(resolved_archive_file), "Error retrieving file {}".format(resolved_archive_file)
# 'by_name' allow us to do transfer learning by skipping/adding layers
# see https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3/tensorflow/python/keras/engine/network.py#L1339-L1357
try:
missing_keys, unexpected_keys = load_tf_weights(model, resolved_archive_file)
except OSError:
raise OSError(
"Unable to load weights from h5 file. "
"If you tried to load a TF 2.0 model from a PyTorch checkpoint, please set from_pt=True. "
)
model(model.dummy_inputs, training=False) # Make sure restore ops are run
if cls._keys_to_ignore_on_load_missing is not None:
for pat in cls._keys_to_ignore_on_load_missing:
missing_keys = [k for k in missing_keys if re.search(pat, k) is None]
if cls._keys_to_ignore_on_load_unexpected is not None:
for pat in cls._keys_to_ignore_on_load_unexpected:
unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None]
if len(unexpected_keys) > 0:
logger.warning(
f"Some layers from the model checkpoint at {pretrained_model_name_or_path} were not used when "
f"initializing {model.__class__.__name__}: {unexpected_keys}\n"
f"- This IS expected if you are initializing {model.__class__.__name__} from the checkpoint of a model trained on another task "
f"or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n"
f"- This IS NOT expected if you are initializing {model.__class__.__name__} from the checkpoint of a model that you expect "
f"to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)."
)
else:
logger.warning(f"All model checkpoint layers were used when initializing {model.__class__.__name__}.\n")
if len(missing_keys) > 0:
logger.warning(
f"Some layers of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} "
f"and are newly initialized: {missing_keys}\n"
f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference."
)
else:
logger.warning(
f"All the layers of {model.__class__.__name__} were initialized from the model checkpoint at {pretrained_model_name_or_path}.\n"
f"If your task is similar to the task the model of the checkpoint was trained on, "
f"you can already use {model.__class__.__name__} for predictions without further training."
)
if output_loading_info:
loading_info = {"missing_keys": missing_keys, "unexpected_keys": unexpected_keys}
return model, loading_info
return model
class TFConv1D(tf.keras.layers.Layer):
"""
1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2).
Basically works like a linear layer but the weights are transposed.
Args:
nf (:obj:`int`):
The number of output features.
nx (:obj:`int`):
The number of input features.
initializer_range (:obj:`float`, `optional`, defaults to 0.02):
The standard deviation to use to initialize the weights.
kwargs:
Additional keyword arguments passed along to the :obj:`__init__` of :obj:`tf.keras.layers.Layer`.
"""
def __init__(self, nf, nx, initializer_range=0.02, **kwargs):
super().__init__(**kwargs)
self.nf = nf
self.nx = nx
self.initializer_range = initializer_range
def build(self, input_shape):
self.weight = self.add_weight(
"weight", shape=[self.nx, self.nf], initializer=get_initializer(self.initializer_range)
)
self.bias = self.add_weight("bias", shape=[1, self.nf], initializer=tf.zeros_initializer())
def call(self, x):
bz, sl = shape_list(x)[:2]
x = tf.reshape(x, [-1, self.nx])
x = tf.matmul(x, self.weight) + self.bias
x = tf.reshape(x, [bz, sl, self.nf])
return x
class TFSharedEmbeddings(tf.keras.layers.Layer):
r"""
Construct shared token embeddings.
The weights of the embedding layer is usually shared with the weights of the linear decoder when doing language
modeling.
Args:
vocab_size (:obj:`int`):
The size of the vocabulary, e.g., the number of unique tokens.
hidden_size (:obj:`int`):
The size of the embedding vectors.
initializer_range (:obj:`float`, `optional`):
The standard deviation to use when initializing the weights. If no value is provided, it will default to
:math:`1/\sqrt{hidden\_size}`.
kwargs:
Additional keyword arguments passed along to the :obj:`__init__` of :obj:`tf.keras.layers.Layer`.
"""
def __init__(self, vocab_size: int, hidden_size: int, initializer_range: Optional[float] = None, **kwargs):
super().__init__(**kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.initializer_range = hidden_size ** -0.5 if initializer_range is None else initializer_range
def build(self, input_shape):
"""
Build shared token embedding layer Shared weights logic adapted from
https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24
"""
self.weight = self.add_weight(
"weight", shape=[self.vocab_size, self.hidden_size], initializer=get_initializer(self.initializer_range)
)
super().build(input_shape)
def get_config(self):
config = {
"vocab_size": self.vocab_size,
"hidden_size": self.hidden_size,
"initializer_range": self.initializer_range,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs: tf.Tensor, mode: str = "embedding") -> tf.Tensor:
"""
Get token embeddings of inputs or decode final hidden state.
Args:
inputs (:obj:`tf.Tensor`):
In embedding mode, should be an int64 tensor with shape :obj:`[batch_size, length]`.
In linear mode, should be a float tensor with shape :obj:`[batch_size, length, hidden_size]`.
mode (:obj:`str`, defaults to :obj:`"embedding"`):
A valid value is either :obj:`"embedding"` or :obj:`"linear"`, the first one indicates that the layer
should be used as an embedding layer, the second one that the layer should be used as a linear decoder.
Returns:
:obj:`tf.Tensor`: In embedding mode, the output is a float32 embedding tensor, with shape
:obj:`[batch_size, length, embedding_size]`.
In linear mode, the output is a float32 with shape :obj:`[batch_size, length, vocab_size]`.
Raises:
ValueError: if :obj:`mode` is not valid.
Shared weights logic is adapted from `here
<https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24>`__.
"""
if mode == "embedding":
return self._embedding(inputs)
elif mode == "linear":
return self._linear(inputs)
else:
raise ValueError("mode {} is not valid.".format(mode))
def _embedding(self, input_ids):
"""Applies embedding based on inputs tensor."""
return tf.gather(self.weight, input_ids)
def _linear(self, inputs):
"""
Computes logits by running inputs through a linear layer.
Args:
inputs: A float32 tensor with shape [..., hidden_size]
Returns:
float32 tensor with shape [..., vocab_size].
"""
first_dims = shape_list(inputs)[:-1]
x = tf.reshape(inputs, [-1, self.hidden_size])
logits = tf.matmul(x, self.weight, transpose_b=True)
return tf.reshape(logits, first_dims + [self.vocab_size])
class TFSequenceSummary(tf.keras.layers.Layer):
"""
Compute a single vector summary of a sequence hidden states.
Args:
config (:class:`~transformers.PretrainedConfig`):
The config used by the model. Relevant arguments in the config class of the model are (refer to the actual
config class of your model for the default values it uses):
- **summary_type** (:obj:`str`) -- The method to use to make this summary. Accepted values are:
- :obj:`"last"` -- Take the last token hidden state (like XLNet)
- :obj:`"first"` -- Take the first token hidden state (like Bert)
- :obj:`"mean"` -- Take the mean of all tokens hidden states
- :obj:`"cls_index"` -- Supply a Tensor of classification token position (GPT/GPT-2)
- :obj:`"attn"` -- Not implemented now, use multi-head attention
- **summary_use_proj** (:obj:`bool`) -- Add a projection after the vector extraction.
- **summary_proj_to_labels** (:obj:`bool`) -- If :obj:`True`, the projection outputs to
:obj:`config.num_labels` classes (otherwise to :obj:`config.hidden_size`).
- **summary_activation** (:obj:`Optional[str]`) -- Set to :obj:`"tanh"` to add a tanh activation to the
output, another string or :obj:`None` will add no activation.
- **summary_first_dropout** (:obj:`float`) -- Optional dropout probability before the projection and
activation.
- **summary_last_dropout** (:obj:`float`)-- Optional dropout probability after the projection and
activation.
initializer_range (:obj:`float`, defaults to 0.02): The standard deviation to use to initialize the weights.
kwargs:
Additional keyword arguments passed along to the :obj:`__init__` of :obj:`tf.keras.layers.Layer`.
"""
def __init__(self, config: PretrainedConfig, initializer_range: float = 0.02, **kwargs):
super().__init__(**kwargs)
self.summary_type = config.summary_type if hasattr(config, "summary_use_proj") else "last"
if self.summary_type == "attn":
# We should use a standard multi-head attention module with absolute positional embedding for that.
# Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276
# We can probably just use the multi-head attention module of PyTorch >=1.1.0
raise NotImplementedError
self.has_summary = hasattr(config, "summary_use_proj") and config.summary_use_proj
if self.has_summary:
if hasattr(config, "summary_proj_to_labels") and config.summary_proj_to_labels and config.num_labels > 0:
num_classes = config.num_labels
else:
num_classes = config.hidden_size
self.summary = tf.keras.layers.Dense(
num_classes, kernel_initializer=get_initializer(initializer_range), name="summary"
)
self.has_activation = hasattr(config, "summary_activation") and config.summary_activation == "tanh"
if self.has_activation:
self.activation = tf.keras.activations.tanh
self.has_first_dropout = hasattr(config, "summary_first_dropout") and config.summary_first_dropout > 0
if self.has_first_dropout:
self.first_dropout = tf.keras.layers.Dropout(config.summary_first_dropout)
self.has_last_dropout = hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0
if self.has_last_dropout:
self.last_dropout = tf.keras.layers.Dropout(config.summary_last_dropout)
def call(self, inputs, cls_index=None, training=False):
if not isinstance(inputs, (dict, tuple, list)):
hidden_states = inputs
elif isinstance(inputs, (tuple, list)):
hidden_states = inputs[0]
cls_index = inputs[1] if len(inputs) > 1 else None
assert len(inputs) <= 2, "Too many inputs."
else:
hidden_states = inputs.get("hidden_states")
cls_index = inputs.get("cls_index", None)
if self.summary_type == "last":
output = hidden_states[:, -1]
elif self.summary_type == "first":
output = hidden_states[:, 0]
elif self.summary_type == "mean":
output = tf.reduce_mean(hidden_states, axis=1)
elif self.summary_type == "cls_index":
hidden_shape = shape_list(hidden_states) # e.g. [batch, num choices, seq length, hidden dims]
if cls_index is None:
cls_index = tf.fill(
hidden_shape[:-2], hidden_shape[-2] - 1
) # A tensor full of shape [batch] or [batch, num choices] full of sequence length
cls_shape = shape_list(cls_index)
if len(cls_shape) <= len(hidden_shape) - 2:
cls_index = cls_index[..., tf.newaxis]
# else:
# cls_index = cls_index[..., tf.newaxis]
# cls_index = cls_index.expand((-1,) * (cls_index.dim()-1) + (hidden_states.size(-1),))
# shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states
output = tf.gather(hidden_states, cls_index, batch_dims=len(hidden_shape) - 2)
output = tf.squeeze(
output, axis=len(hidden_shape) - 2
) # shape of output: (batch, num choices, hidden_size)
elif self.summary_type == "attn":
raise NotImplementedError
if self.has_first_dropout:
output = self.first_dropout(output, training=training)
if self.has_summary:
output = self.summary(output)
if self.has_activation:
output = self.activation(output)
if self.has_last_dropout:
output = self.last_dropout(output, training=training)
return output
def shape_list(tensor: tf.Tensor) -> List[int]:
"""
Deal with dynamic shape in tensorflow cleanly.
Args:
tensor (:obj:`tf.Tensor`): The tensor we want the shape of.
Returns:
:obj:`List[int]`: The shape of the tensor as a list.
"""
dynamic = tf.shape(tensor)
if tensor.shape == tf.TensorShape(None):
return dynamic
static = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(static)]
def get_initializer(initializer_range: float = 0.02) -> tf.initializers.TruncatedNormal:
"""
Creates a :obj:`tf.initializers.TruncatedNormal` with the given range.
Args:
initializer_range (`float`, defaults to 0.02): Standard deviation of the initializer range.
Returns:
:obj:`tf.initializers.TruncatedNormal`: The truncated normal initializer.
"""
return tf.keras.initializers.TruncatedNormal(stddev=initializer_range)
class TFWrappedEmbeddings:
"""
this class wraps a the TFSharedEmbeddingTokens layer into a python 'no-keras-layer' class to avoid problem with
weight restoring. Also it makes sure that the layer is called from the correct scope to avoid problem with
saving/storing the correct weights
"""
def __init__(self, layer, abs_scope_name=None):
self._layer = layer
self._abs_scope_name = abs_scope_name
def call(self, inputs, mode="embedding"):
if self._abs_scope_name is None:
return self._layer.call(inputs, mode)
# if an abs scope name is given to the embedding variable, call variable from absolute scope
with tf.compat.v1.variable_scope(self._abs_scope_name, auxiliary_name_scope=False) as abs_scope_name:
with tf.name_scope(abs_scope_name.original_name_scope):
return self._layer.call(inputs, mode)
def __call__(self, inputs, mode="embedding"):
if self._abs_scope_name is None:
return self._layer(inputs, mode)
# if an abs scope name is given to the embedding variable, call variable from absolute scope
with tf.compat.v1.variable_scope(self._abs_scope_name, auxiliary_name_scope=False) as abs_scope_name:
with tf.name_scope(abs_scope_name.original_name_scope):
return self._layer(inputs, mode)
|
py | b401a803364ef3b0f2b9aecdc5035c98abddbb0f | #!/usr/bin/env python
from typing import List, Tuple, Optional
import subprocess as sp
import pickle
from pathlib import Path
import sublime
import sublime_plugin
import threading
pkgs_fetch_lock = threading.Lock()
def run_conda_search() -> List[Tuple[str, str, str, str]]:
p = sp.Popen(['conda', 'search', ], stdout=sp.PIPE, stderr=sp.PIPE)
stdout, stderr = p.communicate()
stdout = stdout.decode()
lines = stdout.split('\n')
for i, line in enumerate(lines):
if line[0] == '#':
break
return [tuple(line.strip().split()) for line in lines[i+1:] if line.split()]
def cache_pkgs_list(pkgs: List[Tuple[str, str, str, str]]) -> None:
cache_dir = Path(sublime.cache_path()) / 'sublime-nextflow'
cache_dir.mkdir(parents=True, exist_ok=True)
cache_path = cache_dir / 'conda_search.pickle'
with open(cache_path, 'wb') as fh:
pickle.dump(pkgs, fh)
def fetch_pkgs() -> None:
pkgs = run_conda_search()
cache_pkgs_list(pkgs)
def get_cached_pkgs_list() -> Optional[List[Tuple[str, str, str, str]]]:
cache_dir = Path(sublime.cache_path()) / 'sublime-nextflow'
cache_path = cache_dir / 'conda_search.pickle'
if not cache_path.exists():
return None
with open(cache_path, 'rb') as fh:
return pickle.load(fh)
class NextflowCondaPackagesInfoFetchCommand(sublime_plugin.WindowCommand):
def run(self):
with pkgs_fetch_lock:
thread = threading.Thread(target=fetch_pkgs)
thread.daemon = True
thread.start()
class NextflowCondaPackagesEventListener(sublime_plugin.EventListener):
def on_query_completions(self, view, prefix, locations):
if view.syntax().name != 'Nextflow':
return
if len(locations) > 1:
return
point = locations[0]
if not view.score_selector(point, 'source.nextflow meta.definition.process.nextflow meta.definition.conda-directive.nextflow string'):
return
pkgs = get_cached_pkgs_list()
if pkgs:
print(f'Retrieved {len(pkgs)} Conda packages from cache')
else:
print('Running nextflow_conda_packages_info_fetch command')
view.run_command('nextflow_conda_packages_info_fetch')
return
pkgs = pkgs[::-1]
flags = sublime.INHIBIT_REORDER | sublime.INHIBIT_WORD_COMPLETIONS
completions = sublime.CompletionList(
completions=[
sublime.CompletionItem(
trigger=f'{name}={version}={build}' if channel.startswith('pkgs/') else f'{channel}::{name}={version}={build}',
annotation=f'{channel}::{name}={version}={build}',
) for name,version,build,channel in pkgs
],
flags=flags
)
return completions
|
py | b401a9f09295cfd5840daa08562a00b847f9fa71 | # Copyright 2011-2013 Colin Scott
# Copyright 2011-2013 Andreas Wundsam
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
This is STS's end of the sync protocol. Listens to all controller-specific
syncers and dispatches messages to STS handlers.
'''
from sts.syncproto.base import SyncProtocolSpeaker, SyncMessage, SyncTime, SyncIODelegate
from pox.lib.util import parse_openflow_uri, connect_socket_with_backoff
import logging
log = logging.getLogger("sts_sync_proto")
class STSSyncProtocolSpeaker(SyncProtocolSpeaker):
def __init__(self, controller, state_master, io_delegate):
if state_master is None:
raise ValueError("state_master is null")
self.state_master = state_master
self.controller = controller
handlers = {
("ASYNC", "StateChange"): self._log_async_state_change,
("SYNC", "StateChange"): self._log_sync_state_change,
("REQUEST", "DeterministicValue"): self._get_deterministic_value
}
SyncProtocolSpeaker.__init__(self, handlers, io_delegate)
def _log_async_state_change(self, message):
self.state_master.state_change("ASYNC", message.xid, self.controller, message.time, message.fingerPrint, message.name, message.value)
def _log_sync_state_change(self, message):
# Note: control_flow needs to register a handler on state_master to ACK the
# controller
self.state_master.state_change("SYNC", message.xid, self.controller, message.time, message.fingerPrint, message.name, message.value)
def _get_deterministic_value(self, message):
self.state_master.get_deterministic_value(self.controller, message.name,
message.xid)
class STSSyncConnection(object):
""" A connection to a controller with the sts sync protocol """
def __init__(self, controller, state_master, sync_uri):
self.controller = controller
(self.mode, self.host, self.port) = parse_openflow_uri(sync_uri)
if state_master is None:
raise ValueError("state_master is null")
self.state_master = state_master
self._on_disconnect = []
self.io_delegate = None
self.speaker = None
def on_disconnect(self, func):
self._on_disconnect.append(func)
def connect(self, io_master):
if self.mode != "tcp":
raise RuntimeError("only tcp (active) mode supported by now")
socket = connect_socket_with_backoff(self.host, self.port)
self.io_delegate = SyncIODelegate(io_master, socket)
self.speaker = STSSyncProtocolSpeaker(controller=self.controller,
state_master=self.state_master, io_delegate=self.io_delegate)
def disconnect(self):
self.io_delegate.close()
for handler in self._on_disconnect:
handler(self)
def close(self):
self.disconnect()
def get_nom_snapshot(self):
if self.speaker:
return self.speaker.sync_request("NOMSnapshot", "", timeout=10)
else:
log.warn("STSSyncConnection: not connected. cannot handle requests")
def send_link_notification(self, link_attrs):
# Link attrs must be a list of the form:
# [dpid1, port1, dpid2, port2]
if self.speaker:
msg = SyncMessage(type="ASYNC", messageClass="LinkDiscovery",
value=link_attrs)
return self.speaker.send(msg)
else:
log.warn("STSSyncConnection: not connected. cannot send link")
def ack_sync_notification(self, messageClass, xid):
if self.speaker:
return self.speaker.ack_sync_notification(messageClass, xid)
else:
log.warn("STSSyncConnection: not connected. cannot ACK")
def send_deterministic_value(self, xid, value):
if self.speaker:
msg = SyncMessage(type="RESPONSE", messageClass="DeterministicValue",
time=value, xid=xid, value=value)
return self.speaker.send(msg)
else:
log.warn("STSSyncConnection: not connected. cannot ACK")
class STSSyncConnectionManager(object):
"""the connection manager for the STS sync protocols.
TODO: finish"""
def __init__(self, io_master, state_master):
self.io_master = io_master
self.sync_connections = []
if state_master is None:
raise ValueError("state_master is null")
self.state_master = state_master
def connect(self, controller, sync_uri):
s = STSSyncConnection(controller=controller, state_master=self.state_master, sync_uri=sync_uri)
s.connect(self.io_master)
s.on_disconnect(self.remove_connection)
self.sync_connections.append(s)
return s
def remove_connection(self, connection):
if connection in self.sync_connections:
self.sync_connections.remove(connection)
class STSSyncCallback(object):
""" override with your favorite functionality """
def state_change(self, type, xid, controller, time, fingerprint, name, value):
log.info("{}: controller: {} time: {} fingerprint: {} name: {} value: {}"\
.format(type, controller, time, fingerprint, name, value))
def get_deterministic_value(self, controller, name, xid):
if name == "gettimeofday":
return SyncTime.now()
|
py | b401ab6a4187a53fb81ef86ce4eb0566314ab4e4 | # Copyright (C) 2020 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Cycle Task RBAC Factory."""
from datetime import datetime
from ggrc import access_control
from ggrc.models import all_models
from integration.ggrc import Api, generator
from integration.ggrc.access_control.rbac_factories import base
from integration.ggrc.models import factories
class CycleTaskRBACFactory(base.BaseRBACFactory):
"""Cycle Task RBAC factory class."""
def __init__(self, user_id, acr, parent=None):
"""Set up objects for Cycle Task permission tests.
Args:
user_id: Id of user under which all operations will be run.
acr: Instance of ACR that should be assigned for tested user.
parent: Model name in scope of which objects should be set up.
"""
# pylint: disable=unused-argument
self.setup_workflow_scope(user_id, acr)
self.admin_control_id = {
name: id for id, name
in access_control.role.get_custom_roles_for("Control").items()
}["Admin"]
self.api = Api()
self.objgen = generator.ObjectGenerator()
self.objgen.api = self.api
if user_id:
self.user_id = user_id
user = all_models.Person.query.get(user_id)
self.api.set_user(user)
def create(self):
"""Create new Cycle Task object."""
cycle_tg = all_models.CycleTaskGroup.query.first()
return self.api.post(all_models.CycleTaskGroupObjectTask, {
"cycle_task_group_object_task": {
"title": "New Cycle Task",
"start_date": datetime.now().strftime("%Y-%m-%d"),
"end_date": datetime.now().strftime("%Y-%m-%d"),
"context": None,
"task_type": "text",
"cycle_task_group": {
"id": cycle_tg.id,
"type": "Task Group",
},
"cycle": {
"id": cycle_tg.cycle.id,
"type": "Cycle",
},
}
})
def read(self):
"""Read existing Cycle Task object."""
cycle_task = all_models.CycleTaskGroupObjectTask.query.first()
return self.api.get(cycle_task, cycle_task.id)
def update(self):
"""Update title of existing Cycle Task object."""
cycle_task = all_models.CycleTaskGroupObjectTask.query.first()
return self.api.put(cycle_task, {"title": factories.random_str()})
def delete(self):
"""Delete Cycle Task object."""
cycle_task = all_models.CycleTaskGroupObjectTask.query.first()
return self.api.delete(cycle_task)
def read_revisions(self):
"""Read revisions for Cycle Task object."""
responses = []
cycle_task = all_models.CycleTaskGroupObjectTask.query.first()
for query in ["source_type={}&source_id={}",
"destination_type={}&destination_id={}",
"resource_type={}&resource_id={}"]:
responses.append(
self.api.get_query(
all_models.CycleTaskGroupObjectTask,
query.format("cycle_task_group_object_task", cycle_task.id)
)
)
return responses
def add_comment(self):
"""Map new comment to cycle task."""
cycle_task = all_models.CycleTaskGroupObjectTask.query.get(
self.cycle_task_id
)
_, comment = self.objgen.generate_object(all_models.Comment, {
"description": factories.random_str(),
"context": None,
})
return self.objgen.generate_relationship(source=cycle_task,
destination=comment)[0]
def read_comment(self):
"""Read comments mapped to cycle task"""
cycle_task = all_models.CycleTaskGroupObjectTask.query.get(
self.cycle_task_id
)
with factories.single_commit():
comment = factories.CommentFactory(description=factories.random_str())
factories.RelationshipFactory(source=cycle_task, destination=comment)
query_request_data = [{
"fields": [],
"filters": {
"expression": {
"object_name": "CycleTaskGroupObjectTask",
"op": {
"name": "relevant"
},
"ids": [cycle_task.id]
}
},
"object_name": "Comment",
}]
response = self.api.send_request(
self.api.client.post,
data=query_request_data,
api_link="/query"
)
return response
def map_control(self):
"""Map Control on which user don't have any rights to Cycle Task."""
cycle_task = all_models.CycleTaskGroupObjectTask.query.first()
control = factories.ControlFactory()
return self.objgen.generate_relationship(
source=cycle_task,
destination=control,
)[0]
def map_created_control(self):
"""Map Control that was created by user to Cycle Task."""
cycle_task = all_models.CycleTaskGroupObjectTask.query.first()
control = factories.ControlFactory()
# pylint: disable=protected-access
for acl in control._access_control_list:
if acl.ac_role_id == self.admin_control_id:
factories.AccessControlPersonFactory(
person_id=self.user_id,
ac_list=acl,
)
return self.objgen.generate_relationship(
source=cycle_task,
destination=control,
)[0]
def read_mapped_control(self):
"""Read Control object mapped to Cycle Task."""
cycle_task = all_models.CycleTaskGroupObjectTask.query.first()
with factories.single_commit():
control = factories.ControlFactory()
factories.RelationshipFactory(source=cycle_task, destination=control)
return self.api.get(control, control.id)
def upmap_control(self):
"""Unmap Control from Task Group."""
cycle_task = all_models.CycleTaskGroupObjectTask.query.first()
with factories.single_commit():
control = factories.ControlFactory()
rel = factories.RelationshipFactory(
source=cycle_task, destination=control
)
return self.api.delete(rel)
def start(self):
"""Start Cycle Task."""
cycle_task = all_models.CycleTaskGroupObjectTask.query.first()
return self.api.put(cycle_task, {"status": "In Progress"})
def end(self):
"""End Cycle Task."""
cycle_task = all_models.CycleTaskGroupObjectTask.query.first()
return self.api.put(cycle_task, {"status": "Finished"})
def verify(self):
"""Verify Cycle Task."""
cycle_task = all_models.CycleTaskGroupObjectTask.query.first()
return self.api.put(cycle_task, {"status": "Verified"})
def decline(self):
"""Decline Cycle Task."""
cycle_task = all_models.CycleTaskGroupObjectTask.query.first()
return self.api.put(cycle_task, {"status": "Declined"})
def deprecate(self):
"""Deprecate Cycle Task."""
cycle_task = all_models.CycleTaskGroupObjectTask.query.first()
return self.api.put(cycle_task, {"status": "Deprecated"})
def restore(self):
"""Restore Cycle Task."""
cycle_task = all_models.CycleTaskGroupObjectTask.query.first()
return self.api.put(cycle_task, {"status": "Assigned"})
|
py | b401abb2df50a0bebd8aac3024b71cd0e5944e31 | import jax.numpy as jnp
from jax import lax
def calc_rank_jc(flag_contact, nf):
# int(np.sum( [1 for item in flag_contact if item != 0]) * nf
init_carry = 0.0
def f(carry, flag):
one_hot = jnp.heaviside(flag, 0.0)
new_carry = carry + one_hot
return new_carry, one_hot
rank_jc, one_hot = lax.scan(f, init_carry, flag_contact)
rank_jc = rank_jc * nf
return rank_jc, one_hot
|
py | b401aca478b10e7b773cd163fbed17115abecc7d | #!/usr/bin/env python
import os
import sys
import shutil
import fnmatch
import xml.dom.minidom as xmldom
from xml.dom import Node
def find_packages(path, arch=None):
for dirpath, dirnames, filenames in os.walk(path, topdown=False):
for dirname in dirnames:
if os.path.exists(os.path.join(dirpath, dirname, 'include')):
if not arch or dirname.endswith(arch):
yield dirname
def find_vcxproj_files(path):
for dirpath, dirnames, filenames in os.walk(path):
for filename in fnmatch.filter(filenames, '*.vcxproj'):
yield os.path.join(dirpath, filename)
def add_keys_to_node(node, data):
new = [] + data
for child in node.childNodes:
if child.nodeType == node.TEXT_NODE:
new += child.nodeValue.split(';')
new = list(set(new))
new.sort(key=lambda x: ('~'+x) if x[0] in ['$', '%'] else x)
child.nodeValue = ';'.join(new)
def add_to_all_files(element, include_dirs, library_dirs):
name = element.nodeName
if name.endswith('AdditionalIncludeDirectories'):
add_keys_to_node(element, include_dirs)
elif name.endswith('AdditionalLibraryDirectories'):
add_keys_to_node(element, library_dirs)
# elif name.endswith('AdditionalDependencies'):
# add_keys_to_node(element, LIBRARIES)
for child in element.childNodes:
add_to_all_files(child, include_dirs, library_dirs)
def add_vcpkg_dirs(filename, include_dirs, library_dirs):
dom = xmldom.parse(filename)
root = dom.documentElement
add_to_all_files(root, include_dirs, library_dirs)
os.rename(filename, filename+'.bak')
with open(filename, 'wt') as f:
dom.writexml(f, encoding='UTF-8')
def make_vcxproj_patch(filename):
vcpkg = os.environ.get('vcpkg', None)
if not vcpkg:
return 0
packages = [package for package in find_packages(vcpkg)]
include_dirs = [os.path.join(vcpkg, package, 'include') for package in packages]
library_dirs = [os.path.join(vcpkg, package, 'lib') for package in packages]
add_vcpkg_dirs(filename, include_dirs, library_dirs)
def pretty(node):
if node.hasChildNodes():
for child in node.childNodes:
pretty(child)
else:
if node.nodeType == Node.TEXT_NODE:
node.nodeValue = node.nodeValue.strip(' \t\r\n')
def make_vcxproj_template():
vcpkg = os.environ.get('vcpkg', None)
if not vcpkg:
return None
packages = [package for package in find_packages(vcpkg)]
include_dirs = [os.path.join(vcpkg, package, 'include') for package in packages]
library_dirs = [os.path.join(vcpkg, package, 'lib') for package in packages]
execute_dirs = [os.path.join(vcpkg, package, 'bin') for package in packages]
include_dirs.append('$(IncludePath)')
library_dirs.append('$(LibraryPath)')
#execute_dirs.append('$(ExecutablePath)')
local_app_data = os.environ.get('LOCALAPPDATA', '')
user_root_dir = os.path.join(local_app_data, 'Microsoft\\MSBuild\\v4.0')
print(user_root_dir)
user_props_file = os.path.join(user_root_dir, "Microsoft.Cpp.Win32.user.props")
backup_file = user_props_file + '.bak'
shutil.copy(backup_file, user_props_file)
dom = xmldom.parse(user_props_file)
doc = dom.childNodes[0]
property_group = dom.createElement('PropertyGroup')
include_path = dom.createElement('IncludePath')
library_path = dom.createElement('LibraryPath')
execute_path = dom.createElement('ExecutablePath')
local_environment = dom.createElement('LocalDebuggerEnvironment')
item_definition_group = dom.createElement('ItemDefinitionGroup')
cl_compile = dom.createElement('ClCompile')
additional_inc_dirs = dom.createElement('AdditionalIncludeDirectories')
vcpkg_include_dirs = dom.createTextNode(';'.join(include_dirs))
vcpkg_library_dirs = dom.createTextNode(';'.join(library_dirs))
vcpkg_execute_dirs = dom.createTextNode(';'.join(execute_dirs))
vcpkg_execute_path = dom.createTextNode('PATH='+';'.join(execute_dirs)+';\n$(LocalDebuggerEnvironment)')
solution_dir = dom.createTextNode('$(SolutionDir)')
include_path.appendChild(vcpkg_include_dirs)
library_path.appendChild(vcpkg_library_dirs)
execute_path.appendChild(vcpkg_execute_dirs)
local_environment.appendChild(vcpkg_execute_path)
additional_inc_dirs.appendChild(solution_dir)
property_group.childNodes.append(include_path)
property_group.childNodes.append(library_path)
#property_group.childNodes.append(execute_path)
property_group.childNodes.append(local_environment)
cl_compile.appendChild(additional_inc_dirs)
item_definition_group.childNodes.append(cl_compile)
doc.childNodes.append(property_group)
doc.childNodes.append(item_definition_group)
if not os.path.exists(backup_file):
os.rename(user_props_file, backup_file)
pretty(doc)
dom.normalize()
with open(user_props_file, 'w', encoding='UTF-8') as f:
dom.writexml(f, indent='\n', addindent=' ', encoding='UTF-8')
if __name__ == '__main__':
args = sys.argv[1:]
if not args:
make_vcxproj_template()
else:
make_vcxproj_patch(args)
sys.exit(0)
|
py | b401adcb28109063652480af397fd507391c9577 | from skdecide.builders.discrete_optimization.generic_tools.do_problem import Solution, Problem, ObjectiveRegister, EncodingRegister, \
TypeAttribute, ModeOptim, ObjectiveHandling, TypeObjective
from typing import List, Union
import numpy as np
import math
import matplotlib.pyplot as plt
class TTPSolution(Solution):
def __init__(self, tour: Union[List[int], np.array],
packing: Union[List[int], np.array],
**kwargs):
self.tour = tour
self.packing = packing
self.time_trip = kwargs.get("time_trip", 0)
self.distance = kwargs.get("distance", 0)
self.profit = kwargs.get("profit", 0)
self.weight_used = kwargs.get("weight_used", 0)
self.weight_end = kwargs.get("weight_end", 0)
self.objective = kwargs.get("objective", 0)
self.weight_array = kwargs.get("weight_array", 0)
def copy(self):
return TTPSolution(np.copy(self.tour), np.copy(self.packing),
time_trip=self.time_trip, distance=self.distance,
profit=self.profit, weight_used=self.weight_used,
weight_end=self.weight_end, objective=self.objective,
weight_array=self.weight_array)
def __str__(self):
return "Profit :"+str(self.profit)+"\nWeight: "+str(self.weight_used)+\
"\nTime Trip : "+str(self.time_trip)+"\nDistance : "+str(self.distance)+"\n"+\
"Objective : "+str(self.objective)
def change_problem(self, new_problem):
raise NotImplementedError
def distance(point1, point2):
return math.sqrt((point1["x"]-point2["x"])**2+(point1["y"]-point2["y"])**2)
class TTPModel(Problem):
def satisfy(self, variable: Solution) -> bool:
# TODO do it
return True
def get_attribute_register(self) -> EncodingRegister:
dict_attribute = {"tour":
{"name": "tour",
"type": [TypeAttribute.PERMUTATION,
TypeAttribute.PERMUTATION_TSP], # TODO, untrue at the moment..
"range": range(self.numberOfNodes),
"n": self.numberOfNodes}}
return EncodingRegister(dict_attribute)
def get_solution_type(self):
return TTPSolution
def get_objective_register(self) -> ObjectiveRegister:
return ObjectiveRegister(objective_sense=ModeOptim.MAXIMIZATION,
objective_handling=ObjectiveHandling.AGGREGATE,
dict_objective_to_doc={"obj":
{"type": TypeObjective.OBJECTIVE,
"default_weight": 1}})
def __init__(self,
problemName = None,
knapsackDataType = None,
numberOfNodes = None,
numberOfItems = None,
capacityOfKnapsack = None,
minSpeed = None,
maxSpeed = None,
rentingRatio = None,
edgeWeightType = None,
nodes = None,
items = None, nodes_array=None, items_array=None):
self.problemName = problemName
self.knapsackDataType = knapsackDataType
self.numberOfNodes = numberOfNodes
self.numberOfItems = numberOfItems
self.capacityOfKnapsack = capacityOfKnapsack
self.minSpeed = minSpeed
self.maxSpeed = maxSpeed
self.rentingRatio = rentingRatio
self.edgeWeightType = edgeWeightType
self.nodes = nodes
self.items = items
self.weights = np.array([item["weight"] for item in self.items])
self.profits = np.array([item["profit"] for item in self.items])
self.av = np.zeros((self.numberOfItems, self.numberOfNodes))
self.nodes_array = nodes_array
self.items_array = items_array
for i in range(len(self.items)):
self.av[i, self.items[i]["node_index"]] = 1
self.evaluate_function = build_obj_function(self)
self.evaluate_function_details = build_obj_function_details(self)
def evaluate(self, solution: TTPSolution):
objective, profit, distance_tour, time_trip, weight_used, weight_end = self.evaluate_function(solution.tour,
solution.packing)
solution.time_trip = time_trip
solution.distance = distance_tour
solution.profit = profit
solution.weight_used = weight_used
solution.weight_end = weight_end
solution.objective = objective
return {"obj": solution.objective}
def evaluate_details(self, solution: TTPSolution):
objective, profit, distance_tour, time_trip, weight_used, weight_end, weight_array = \
self.evaluate_function_details(solution.tour, solution.packing)
solution.time_trip=time_trip
solution.distance=distance_tour
solution.profit=profit
solution.weight_used=weight_used
solution.weight_end=weight_end
solution.objective=objective
solution.weight_array=weight_array
return solution.objective
def plot(self, solution: TTPSolution, ax=None):
if ax is None:
fig, ax = plt.subplots(1)
pp, = ax.plot([self.nodes_array[jj, 1] for jj in solution.tour]+[self.nodes_array[solution.tour[0], 1]],
[self.nodes_array[jj, 2] for jj in solution.tour]+[self.nodes_array[solution.tour[0], 2]])
return ax
from numba import njit
from functools import partial
def build_obj_function(ttp_model):
return partial(evaluate,
capacityOfKnapsack=ttp_model.capacityOfKnapsack,
rentingRatio=ttp_model.rentingRatio,
minSpeed=ttp_model.minSpeed,
maxSpeed=ttp_model.maxSpeed,
items_array=ttp_model.items_array,
nodes_array=ttp_model.nodes_array,
av=ttp_model.av)
def build_obj_function_details(ttp_model):
return partial(evaluate_with_details,
capacityOfKnapsack=ttp_model.capacityOfKnapsack,
rentingRatio=ttp_model.rentingRatio,
minSpeed=ttp_model.minSpeed,
maxSpeed=ttp_model.maxSpeed,
items_array=ttp_model.items_array,
nodes_array=ttp_model.nodes_array,
av=ttp_model.av)
@njit
def evaluate(tour, packing, capacityOfKnapsack, rentingRatio,
minSpeed, maxSpeed, items_array, nodes_array, av):
z = packing
weightofKnapsack = capacityOfKnapsack
rentRate = rentingRatio
vmin = minSpeed
vmax = maxSpeed
if(tour[0]!=tour[len(tour)-1]):
print("ERROR: The last city must be the same as the first city")
return
wc=0
time_trip=0
profit=0
distance_tour=0
for i in range(len(tour)-1):
selectedItem = [j for j in range(len(z)) if z[j]==tour[i]]
currentcitytemp = tour[i]
currentcity = currentcitytemp-1
availabilityCounter = 0
for p in selectedItem:
if av[p, tour[i]]!=1:
break
if(len(selectedItem)>0):
for item in selectedItem:
wc=wc+items_array[item, 2]
profit=profit+items_array[item, 1]
distance_i = math.sqrt((nodes_array[tour[i], 1]-nodes_array[tour[i+1], 1])**2
+(nodes_array[tour[i], 2]-nodes_array[tour[i+1], 2])**2)
distance_tour += distance_i
time_trip=time_trip+(distance_i/(vmax-wc*(vmax-vmin)/weightofKnapsack))
weight_used = wc
weight_end = weightofKnapsack-wc
objective = profit-time_trip*rentRate
return objective, profit, distance_tour, time_trip, weight_used, weight_end
@njit
def evaluate_with_details(tour, packing, capacityOfKnapsack, rentingRatio,
minSpeed, maxSpeed, items_array, nodes_array, av):
z = packing
weightofKnapsack = capacityOfKnapsack
rentRate = rentingRatio
vmin = minSpeed
vmax = maxSpeed
if(tour[0]!=tour[len(tour)-1]):
print("ERROR: The last city must be the same as the first city")
return
wc=0
time_trip=0
profit=0
distance_tour=0
lll = len(tour)-1
weight_array = np.zeros((lll), dtype=np.int32)
for i in range(lll):
selectedItem = [j for j in range(len(z)) if z[j]==tour[i]]
currentcitytemp = tour[i]
currentcity = currentcitytemp-1
availabilityCounter = 0
for p in selectedItem:
if av[p, tour[i]]!=1:
break
if(len(selectedItem)>0):
for item in selectedItem:
wc=wc+items_array[item, 2]
profit=profit+items_array[item, 1]
weight_array[i] = wc
distance_i = math.sqrt((nodes_array[tour[i], 1]-nodes_array[tour[i+1], 1])**2
+(nodes_array[tour[i], 2]-nodes_array[tour[i+1], 2])**2)
distance_tour += distance_i
time_trip=time_trip+(distance_i/(vmax-wc*(vmax-vmin)/weightofKnapsack))
weight_used = wc
weight_end = weightofKnapsack-wc
objective = profit-time_trip*rentRate
return objective, profit, distance_tour, time_trip, weight_used, weight_end, weight_array
|
py | b401add5cf5263d9cd1471d31ca7372ec972d535 | #!/usr/bin/env python
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from torch.utils.data import DataLoader
from callbacks import Seq2SeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from transformers import MBartTokenizer, T5ForConditionalGeneration
from transformers.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeq2SeqDataset,
Seq2SeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
logger = logging.getLogger(__name__)
class SummarizationModule(BaseTransformer):
mode = "summarization"
loss_names = ["loss"]
metric_names = ROUGE_KEYS
default_val_metric = "rouge2"
def __init__(self, hparams, **kwargs):
if hparams.sortish_sampler and hparams.gpus > 1:
hparams.replace_sampler_ddp = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("Dynamic Batch size does not work for multi-gpu training")
if hparams.sortish_sampler:
raise ValueError("--sortish_sampler and --max_tokens_per_batch may not be used simultaneously")
super().__init__(hparams, num_labels=None, mode=self.mode, **kwargs)
use_task_specific_params(self.model, "summarization")
save_git_info(self.hparams.output_dir)
self.metrics_save_path = Path(self.output_dir) / "metrics.json"
self.hparams_save_path = Path(self.output_dir) / "hparams.pkl"
pickle_save(self.hparams, self.hparams_save_path)
self.step_count = 0
self.metrics = defaultdict(list)
self.model_type = self.config.model_type
self.vocab_size = self.config.tgt_vocab_size if self.model_type == "fsmt" else self.config.vocab_size
self.dataset_kwargs: dict = dict(
data_dir=self.hparams.data_dir,
max_source_length=self.hparams.max_source_length,
prefix=self.model.config.prefix or "",
)
n_observations_per_split = {
"train": self.hparams.n_train,
"val": self.hparams.n_val,
"test": self.hparams.n_test,
}
self.n_obs = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
self.target_lens = {
"train": self.hparams.max_target_length,
"val": self.hparams.val_max_target_length,
"test": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f"target_lens: {self.target_lens}"
assert self.target_lens["train"] <= self.target_lens["test"], f"target_lens: {self.target_lens}"
if self.hparams.freeze_embeds:
freeze_embeds(self.model)
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder())
assert_all_frozen(self.model.get_encoder())
self.hparams.git_sha = get_git_info()["repo_sha"]
self.num_workers = hparams.num_workers
self.decoder_start_token_id = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer, MBartTokenizer):
self.decoder_start_token_id = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
self.model.config.decoder_start_token_id = self.decoder_start_token_id
self.dataset_class = (
Seq2SeqDataset if hasattr(self.tokenizer, "prepare_seq2seq_batch") else LegacySeq2SeqDataset
)
self.already_saved_batch = False
self.eval_beams = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
self.eval_max_length = self.hparams.eval_max_gen_length
else:
self.eval_max_length = self.model.config.max_length
self.val_metric = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def save_readable_batch(self, batch: Dict[str, torch.Tensor]) -> Dict[str, List[str]]:
"""A debugging utility"""
readable_batch = {
k: self.tokenizer.batch_decode(v.tolist()) if "mask" not in k else v.shape for k, v in batch.items()
}
save_json(readable_batch, Path(self.output_dir) / "text_batch.json")
save_json({k: v.tolist() for k, v in batch.items()}, Path(self.output_dir) / "tok_batch.json")
self.already_saved_batch = True
return readable_batch
def forward(self, input_ids, **kwargs):
return self.model(input_ids, **kwargs)
def ids_to_clean_text(self, generated_ids: List[int]):
gen_text = self.tokenizer.batch_decode(
generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True
)
return lmap(str.strip, gen_text)
def _step(self, batch: dict) -> Tuple:
pad_token_id = self.tokenizer.pad_token_id
src_ids, src_mask = batch["input_ids"], batch["attention_mask"]
tgt_ids = batch["labels"]
if isinstance(self.model, T5ForConditionalGeneration):
decoder_input_ids = self.model._shift_right(tgt_ids)
else:
decoder_input_ids = shift_tokens_right(tgt_ids, pad_token_id)
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
batch["decoder_input_ids"] = decoder_input_ids
self.save_readable_batch(batch)
outputs = self(src_ids, attention_mask=src_mask, decoder_input_ids=decoder_input_ids, use_cache=False)
lm_logits = outputs[0]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
ce_loss_fct = torch.nn.CrossEntropyLoss(ignore_index=pad_token_id)
assert lm_logits.shape[-1] == self.vocab_size
loss = ce_loss_fct(lm_logits.view(-1, lm_logits.shape[-1]), tgt_ids.view(-1))
else:
lprobs = torch.nn.functional.log_softmax(lm_logits, dim=-1)
loss, nll_loss = label_smoothed_nll_loss(
lprobs, tgt_ids, self.hparams.label_smoothing, ignore_index=pad_token_id
)
return (loss,)
@property
def pad(self) -> int:
return self.tokenizer.pad_token_id
def training_step(self, batch, batch_idx) -> Dict:
loss_tensors = self._step(batch)
logs = {name: loss for name, loss in zip(self.loss_names, loss_tensors)}
# tokens per batch
logs["tpb"] = batch["input_ids"].ne(self.pad).sum() + batch["labels"].ne(self.pad).sum()
logs["bs"] = batch["input_ids"].shape[0]
logs["src_pad_tok"] = batch["input_ids"].eq(self.pad).sum()
logs["src_pad_frac"] = batch["input_ids"].eq(self.pad).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def validation_step(self, batch, batch_idx) -> Dict:
return self._generative_step(batch)
def validation_epoch_end(self, outputs, prefix="val") -> Dict:
self.step_count += 1
losses = {k: torch.stack([x[k] for x in outputs]).mean() for k in self.loss_names}
loss = losses["loss"]
generative_metrics = {
k: np.array([x[k] for x in outputs]).mean() for k in self.metric_names + ["gen_time", "gen_len"]
}
metric_val = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
metric_tensor: torch.FloatTensor = torch.tensor(metric_val).type_as(loss)
generative_metrics.update({k: v.item() for k, v in losses.items()})
losses.update(generative_metrics)
all_metrics = {f"{prefix}_avg_{k}": x for k, x in losses.items()}
all_metrics["step_count"] = self.step_count
self.metrics[prefix].append(all_metrics) # callback writes this to self.metrics_save_path
preds = flatten_list([x["preds"] for x in outputs])
return {
"log": all_metrics,
"preds": preds,
f"{prefix}_loss": loss,
f"{prefix}_{self.val_metric}": metric_tensor,
}
def calc_generative_metrics(self, preds, target) -> Dict:
return calculate_rouge(preds, target)
def _generative_step(self, batch: dict) -> dict:
t0 = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
generated_ids = self.model.generate(
batch["input_ids"],
attention_mask=batch["attention_mask"],
use_cache=True,
decoder_start_token_id=self.decoder_start_token_id,
num_beams=self.eval_beams,
max_length=self.eval_max_length,
)
gen_time = (time.time() - t0) / batch["input_ids"].shape[0]
preds: List[str] = self.ids_to_clean_text(generated_ids)
target: List[str] = self.ids_to_clean_text(batch["labels"])
loss_tensors = self._step(batch)
base_metrics = {name: loss for name, loss in zip(self.loss_names, loss_tensors)}
rouge: Dict = self.calc_generative_metrics(preds, target)
summ_len = np.mean(lmap(len, generated_ids))
base_metrics.update(gen_time=gen_time, gen_len=summ_len, preds=preds, target=target, **rouge)
return base_metrics
def test_step(self, batch, batch_idx):
return self._generative_step(batch)
def test_epoch_end(self, outputs):
return self.validation_epoch_end(outputs, prefix="test")
def get_dataset(self, type_path) -> Seq2SeqDataset:
n_obs = self.n_obs[type_path]
max_target_length = self.target_lens[type_path]
dataset = self.dataset_class(
self.tokenizer,
type_path=type_path,
n_obs=n_obs,
max_target_length=max_target_length,
**self.dataset_kwargs,
)
return dataset
def get_dataloader(self, type_path: str, batch_size: int, shuffle: bool = False) -> DataLoader:
dataset = self.get_dataset(type_path)
if self.hparams.sortish_sampler and type_path != "test":
sampler = dataset.make_sortish_sampler(batch_size, distributed=self.hparams.gpus > 1)
return DataLoader(
dataset,
batch_size=batch_size,
collate_fn=dataset.collate_fn,
shuffle=False,
num_workers=self.num_workers,
sampler=sampler,
)
elif self.hparams.max_tokens_per_batch is not None and type_path != "test":
batch_sampler = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch, distributed=self.hparams.gpus > 1
)
return DataLoader(
dataset,
batch_sampler=batch_sampler,
collate_fn=dataset.collate_fn,
# shuffle=False,
num_workers=self.num_workers,
# batch_size=None,
)
else:
return DataLoader(
dataset,
batch_size=batch_size,
collate_fn=dataset.collate_fn,
shuffle=shuffle,
num_workers=self.num_workers,
sampler=None,
)
def train_dataloader(self) -> DataLoader:
dataloader = self.get_dataloader("train", batch_size=self.hparams.train_batch_size, shuffle=True)
return dataloader
def val_dataloader(self) -> DataLoader:
return self.get_dataloader("val", batch_size=self.hparams.eval_batch_size)
def test_dataloader(self) -> DataLoader:
return self.get_dataloader("test", batch_size=self.hparams.eval_batch_size)
@staticmethod
def add_model_specific_args(parser, root_dir):
BaseTransformer.add_model_specific_args(parser, root_dir)
add_generic_args(parser, root_dir)
parser.add_argument(
"--max_source_length",
default=1024,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--max_target_length",
default=56,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--val_max_target_length",
default=142, # these defaults are optimized for CNNDM. For xsum, see README.md.
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--test_max_target_length",
default=142,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument("--freeze_encoder", action="store_true")
parser.add_argument("--freeze_embeds", action="store_true")
parser.add_argument("--sortish_sampler", action="store_true", default=False)
parser.add_argument("--max_tokens_per_batch", type=int, default=None)
parser.add_argument("--logger_name", type=str, choices=["default", "wandb", "wandb_shared"], default="default")
parser.add_argument("--n_train", type=int, default=-1, required=False, help="# examples. -1 means use all.")
parser.add_argument("--n_val", type=int, default=500, required=False, help="# examples. -1 means use all.")
parser.add_argument("--n_test", type=int, default=-1, required=False, help="# examples. -1 means use all.")
parser.add_argument(
"--task", type=str, default="summarization", required=False, help="# examples. -1 means use all."
)
parser.add_argument("--label_smoothing", type=float, default=0.0, required=False)
parser.add_argument("--src_lang", type=str, default="", required=False)
parser.add_argument("--tgt_lang", type=str, default="", required=False)
parser.add_argument("--eval_beams", type=int, default=None, required=False)
parser.add_argument(
"--val_metric", type=str, default=None, required=False, choices=["bleu", "rouge2", "loss", None]
)
parser.add_argument("--eval_max_gen_length", type=int, default=None, help="never generate more than n tokens")
parser.add_argument("--save_top_k", type=int, default=1, required=False, help="How many checkpoints to save")
parser.add_argument(
"--early_stopping_patience",
type=int,
default=-1,
required=False,
help="-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So val_check_interval will effect it.",
)
return parser
class TranslationModule(SummarizationModule):
mode = "translation"
loss_names = ["loss"]
metric_names = ["bleu"]
default_val_metric = "bleu"
def __init__(self, hparams, **kwargs):
super().__init__(hparams, **kwargs)
self.dataset_kwargs["src_lang"] = hparams.src_lang
self.dataset_kwargs["tgt_lang"] = hparams.tgt_lang
def calc_generative_metrics(self, preds, target) -> dict:
return calculate_bleu(preds, target)
def main(args, model=None) -> SummarizationModule:
Path(args.output_dir).mkdir(exist_ok=True)
if len(os.listdir(args.output_dir)) > 3 and args.do_train:
raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
if model is None:
if "summarization" in args.task:
model: SummarizationModule = SummarizationModule(args)
else:
model: SummarizationModule = TranslationModule(args)
dataset = Path(args.data_dir).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir).startswith("/tmp")
or str(args.output_dir).startswith("/var")
):
logger = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
project = os.environ.get("WANDB_PROJECT", dataset)
logger = WandbLogger(name=model.output_dir.name, project=project)
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
logger = WandbLogger(name=model.output_dir.name, project=f"hf_{dataset}")
if args.early_stopping_patience >= 0:
es_callback = get_early_stopping_callback(model.val_metric, args.early_stopping_patience)
else:
es_callback = False
lower_is_better = args.val_metric == "loss"
trainer: pl.Trainer = generic_train(
model,
args,
logging_callback=Seq2SeqLoggingCallback(),
checkpoint_callback=get_checkpoint_callback(
args.output_dir, model.val_metric, args.save_top_k, lower_is_better
),
early_stopping_callback=es_callback,
logger=logger,
)
pickle_save(model.hparams, model.output_dir / "hparams.pkl")
if not args.do_predict:
return model
model.hparams.test_checkpoint = ""
checkpoints = list(sorted(glob.glob(os.path.join(args.output_dir, "*.ckpt"), recursive=True)))
if checkpoints:
model.hparams.test_checkpoint = checkpoints[-1]
trainer.resume_from_checkpoint = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams)
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser = pl.Trainer.add_argparse_args(parser)
parser = SummarizationModule.add_model_specific_args(parser, os.getcwd())
args = parser.parse_args()
main(args)
|
py | b401ae60b4645d7c46765a255add8f652f1cf0cc | import imaplib
import email
import re
from dateutil import parser
from app_utils import *
WORDS = ["EMAIL", "INBOX"]
def getSender(email):
"""
Returns the best-guess sender of an email.
Arguments:
email -- the email whose sender is desired
Returns:
Sender of the email.
"""
sender = email['From']
m = re.match(r'(.*)\s<.*>', sender)
if m:
return m.group(1)
return sender
def getDate(email):
return parser.parse(email.get('date'))
def getMostRecentDate(emails):
"""
Returns the most recent date of any email in the list provided.
Arguments:
emails -- a list of emails to check
Returns:
Date of the most recent email.
"""
dates = [getDate(e) for e in emails]
dates.sort(reverse=True)
if dates:
return dates[0]
return None
def fetchUnreadEmails(profile, since=None, markRead=False, limit=None):
"""
Fetches a list of unread email objects from a user's Gmail inbox.
Arguments:
profile -- contains information related to the user (e.g., Gmail address)
since -- if provided, no emails before this date will be returned
markRead -- if True, marks all returned emails as read in target inbox
Returns:
A list of unread email objects.
"""
conn = imaplib.IMAP4_SSL('imap.gmail.com')
conn.debug = 0
conn.login(profile['gmail_address'], profile['gmail_password'])
conn.select(readonly=(not markRead))
msgs = []
(retcode, messages) = conn.search(None, '(UNSEEN)')
if retcode == 'OK' and messages != ['']:
numUnread = len(messages[0].split(' '))
if limit and numUnread > limit:
return numUnread
for num in messages[0].split(' '):
# parse email RFC822 format
ret, data = conn.fetch(num, '(RFC822)')
msg = email.message_from_string(data[0][1])
if not since or getDate(msg) > since:
msgs.append(msg)
conn.close()
conn.logout()
return msgs
def handle(text, mic, profile):
"""
Responds to user-input, typically speech text, with a summary of
the user's Gmail inbox, reporting on the number of unread emails
in the inbox, as well as their senders.
Arguments:
text -- user-input, typically transcribed speech
mic -- used to interact with the user (for both input and output)
profile -- contains information related to the user (e.g., Gmail address)
"""
try:
msgs = fetchUnreadEmails(profile, limit=5)
if isinstance(msgs, int):
response = "You have %d unread emails." % msgs
mic.say(response)
return
senders = [getSender(e) for e in msgs]
except imaplib.IMAP4.error:
mic.say(
"I'm sorry. I'm not authenticated to work with your Gmail.")
return
if not senders:
mic.say("You have no unread emails.")
elif len(senders) == 1:
mic.say("You have one unread email from " + senders[0] + ".")
else:
response = "You have %d unread emails" % len(
senders)
unique_senders = list(set(senders))
if len(unique_senders) > 1:
unique_senders[-1] = 'and ' + unique_senders[-1]
response += ". Senders include: "
response += '...'.join(senders)
else:
response += " from " + unittest[0]
mic.say(response)
def isValid(text):
"""
Returns True if the input is related to email.
Arguments:
text -- user-input, typically transcribed speech
"""
return bool(re.search(r'\bemail\b', text, re.IGNORECASE))
|
py | b401ae6313270503c9d35614725a6d1a66ac2c2a | import numpy as np
class NormalizeOne:
r"""
Normalizes the node attributes by dividing each row by its sum, so that it
sums to 1:
$$
\X_i \leftarrow \frac{\X_i}{\sum_{j=1}^{N} \X_{ij}}
$$
"""
def __call__(self, graph):
x_sum = np.sum(graph.x, -1)
x_sum[x_sum == 0] = 1
graph.x = graph.x / x_sum[..., None]
return graph
|
py | b401afc2d3e189ae82a792595eb62579455010b5 | from ...error import GraphQLError
from ...language.printer import print_ast
from ...type.definition import GraphQLNonNull
from ...utils.is_valid_literal_value import is_valid_literal_value
from .base import ValidationRule
class DefaultValuesOfCorrectType(ValidationRule):
def enter_VariableDefinition(self, node, key, parent, path, ancestors):
name = node.variable.name.value
default_value = node.default_value
type = self.context.get_input_type()
if isinstance(type, GraphQLNonNull) and default_value:
return GraphQLError(
self.default_for_non_null_arg_message(name, type, type.of_type),
[default_value]
)
if type and default_value:
errors = is_valid_literal_value(type, default_value)
if errors:
return GraphQLError(
self.bad_value_for_default_arg_message(name, type, print_ast(default_value), errors),
[default_value]
)
@staticmethod
def default_for_non_null_arg_message(var_name, type, guess_type):
return u'Variable "${}" of type "{}" is required and will not use the default value. ' \
u'Perhaps you meant to use type "{}".'.format(var_name, type, guess_type)
@staticmethod
def bad_value_for_default_arg_message(var_name, type, value, verbose_errors):
message = (u'\n' + u'\n'.join(verbose_errors)) if verbose_errors else u''
return u'Variable "${}" of type "{}" has invalid default value: {}.{}'.format(var_name, type, value, message)
|
py | b401b0067fb69f15aa3e675a767a742a79458727 | #!/usr/bin/python
# ==============================================================================
# Author: Tao Li ([email protected])
# Date: May 30, 2015
# Question: 080-Remove-Duplicates-from-Sorted-Array-II
# Link: https://leetcode.com/problems/remove-duplicates-from-sorted-array-ii/
# ==============================================================================
# Follow up for "Remove Duplicates":
# What if duplicates are allowed at most twice?
#
# For example,
# Given sorted array nums = [1,1,1,2,2,3],
#
# Your function should return length = 5, with the first five elements of nums
# being 1, 1, 2, 2 and 3. It doesn't matter what you leave beyond the new length.
# ==============================================================================
# Method: hash table; pointer
# Time Complexity: O(n)
# Space Complexity: O(n)
# Note: Try not use hash table next time, try a new method of space complexity O(1)
# ==============================================================================
class Solution:
# @param {integer[]} nums
# @return {integer}
def removeDuplicates(self, nums):
size = len(nums)
if size <= 2:
return size
dic = {}
counter = 0
ptr = 0
for i in xrange(size):
if dic.get(nums[i]) >= 2:
continue
elif dic.get(nums[i]) is None:
dic[nums[i]] = 1
elif dic.get(nums[i]) == 1:
dic[nums[i]] = 2
nums[ptr] = nums[i]
ptr += 1
counter += 1
return counter
if __name__ == '__main__':
nums = [1,1,1,2,2,3]
print Solution().removeDuplicates(nums)
print nums |
py | b401b02c20980ea7698ee5c18e9a3cced2f33bf9 | """
This file offers the methods to automatically retrieve the graph Puniceicoccales bacterium Verruco_02.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def PuniceicoccalesBacteriumVerruco02(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Puniceicoccales bacterium Verruco_02 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Puniceicoccales bacterium Verruco_02 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="PuniceicoccalesBacteriumVerruco02",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
py | b401b0a11655fe2920062bf23d6da24fb9572b5d | import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from nltk import wordpunct_tokenize
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
import nltk
from nltk.stem.snowball import FrenchStemmer
from nltk.stem.snowball import PorterStemmer
from nltk.stem.snowball import SnowballStemmer
import re
import csv
# set the font size of plots
plt.rcParams['font.size'] = 14
np.set_printoptions(precision=3)
df = pd.read_csv(r"E:\Workspace\pfe-backend-project\TalentHunter\Job-Matching-master\data.csv",
encoding='utf-8-sig')
'''df = df.drop(columns="headquaters")
df = df.drop(columns="employees")
df = df.drop(columns="founded")
df = df.drop(columns="industry")'''
Q1_corpus = df.iloc[:, 5].tolist()
tokenizer = RegexpTokenizer('[^_\W]+')
corpus = [tokenizer.tokenize((str(doc)).lower()) for doc in Q1_corpus]
# identify language:
languages_ratios = {}
tokens = wordpunct_tokenize(str(corpus))
words = [word.lower() for word in tokens]
for language in stopwords.fileids():
stopwords_set = set(stopwords.words(language))
words_set = set(words)
common_elements = words_set.intersection(stopwords_set)
languages_ratios[language] = len(common_elements) # language "score"
language = max(languages_ratios, key=languages_ratios.get)
print("OKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK", language)
|
py | b401b136f575bf703ba93daf4963e7bda6ef018b | # ---------------------------------------------------------------
# caption_eval_hooks.py
# Set-up time: 2021/1/6 22:04
# Copyright (c) 2020 ICT
# Licensed under The MIT License [see LICENSE for details]
# Written by Kenneth-Wong (Wenbin-Wang) @ VIPL.ICT
# Contact: [email protected] [OR] [email protected]
# ---------------------------------------------------------------
import os.path as osp
import mmcv
from mmcv.runner import Hook
from torch.utils.data import DataLoader
import os.path as osp
class CaptionEvalHook(Hook):
"""Evaluation hook.
Attributes:
dataloader (DataLoader): A PyTorch dataloader.
interval (int): Evaluation interval (by epochs). Default: 1.
"""
def __init__(self, dataloader, interval=1, **eval_kwargs):
if not isinstance(dataloader, DataLoader):
raise TypeError(
'dataloader must be a pytorch DataLoader, but got {}'.format(
type(dataloader)))
self.dataloader = dataloader
self.interval = interval
self.eval_kwargs = eval_kwargs
def after_train_epoch(self, runner):
if not self.every_n_epochs(runner, self.interval):
return
from mmdet.apis import caption_single_gpu_test
results = caption_single_gpu_test(runner.model, self.dataloader, **self.eval_kwargs)
# save the results:
res_dir = osp.join(runner.work_dir, 'eval_results')
mmcv.mkdir_or_exist(res_dir)
mmcv.dump(results, osp.join(res_dir, self.dataloader.dataset.split + '_' + str(runner.epoch + 1) + '.json'))
self.evaluate(runner, results)
def evaluate(self, runner, results):
eval_res = self.dataloader.dataset.evaluate(
results, epoch=runner.epoch, logger=runner.logger)
# NOTE: Add suffix to distinguish evaluation on test set or val set.
for name, val in eval_res.items():
runner.log_buffer.output[name+'/'+self.dataloader.dataset.split] = val
runner.log_buffer.ready = True
class CaptionDistEvalHook(Hook):
"""Distributed evaluation hook.
Attributes:
dataloader (DataLoader): A PyTorch dataloader.
interval (int): Evaluation interval (by epochs). Default: 1.
tmpdir (str | None): Temporary directory to save the results of all
processes. Default: None.
gpu_collect (bool): Whether to use gpu or cpu to collect results.
Default: False.
"""
def __init__(self,
dataloader,
interval=1,
gpu_collect=False,
**eval_kwargs):
if not isinstance(dataloader, DataLoader):
raise TypeError(
'dataloader must be a pytorch DataLoader, but got {}'.format(
type(dataloader)))
self.dataloader = dataloader
self.interval = interval
self.gpu_collect = gpu_collect
self.eval_kwargs = eval_kwargs
def after_train_epoch(self, runner):
if not self.every_n_epochs(runner, self.interval):
return
from mmdet.apis import caption_multi_gpu_test
results = caption_multi_gpu_test(
runner.model,
self.dataloader,
tmpdir=osp.join(runner.work_dir, '.eval_hook'),
gpu_collect=self.gpu_collect,
**self.eval_kwargs)
if runner.rank == 0:
print('\n')
# save the results:
res_dir = osp.join(runner.work_dir, 'eval_results')
mmcv.mkdir_or_exist(res_dir)
mmcv.dump(results, osp.join(res_dir, self.dataloader.dataset.split + '_' + str(runner.epoch + 1) + '.json'))
self.evaluate(runner, results)
def evaluate(self, runner, results):
eval_res = self.dataloader.dataset.evaluate(
results, epoch=runner.epoch, logger=runner.logger)
for name, val in eval_res.items():
runner.log_buffer.output[name+'/'+self.dataloader.dataset.split] = val
runner.log_buffer.ready = True |
py | b401b195fec10c1a1e45a3796c3605b947999fed | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import numpy as np
from progress.bar import Bar
import time
import torch
from models.model import create_model, load_model
from utils.image import get_affine_transform
from utils.debugger import Debugger
class BaseDetector(object):
def __init__(self, opt):
if opt.gpus[0] >= 0:
opt.device = torch.device('cuda')
else:
opt.device = torch.device('cpu')
print('Creating model...')
self.model = create_model(opt.arch, opt.heads, opt.head_conv)
print(opt.load_model)
self.model = load_model(self.model, opt.load_model)
self.model = self.model.to(opt.device)
self.model.eval()
self.mean = np.array(opt.mean, dtype=np.float32).reshape(1, 1, 3)
self.std = np.array(opt.std, dtype=np.float32).reshape(1, 1, 3)
self.max_per_image = 100
self.num_classes = opt.num_classes
self.scales = opt.test_scales
self.opt = opt
self.pause = True
def pre_process(self, image, scale, meta=None):
height, width = image.shape[0:2]
new_height = int(height * scale)
new_width = int(width * scale)
if self.opt.fix_res:
inp_height, inp_width = self.opt.input_h, self.opt.input_w
c = np.array([new_width / 2., new_height / 2.], dtype=np.float32)
s = max(height, width) * 1.0
else:
inp_height = (new_height | self.opt.pad) + 1
inp_width = (new_width | self.opt.pad) + 1
c = np.array([new_width // 2, new_height // 2], dtype=np.float32)
s = np.array([inp_width, inp_height], dtype=np.float32)
trans_input = get_affine_transform(c, s, 0, [inp_width, inp_height])
resized_image = cv2.resize(image, (new_width, new_height))
inp_image = cv2.warpAffine(
resized_image, trans_input, (inp_width, inp_height),
flags=cv2.INTER_LINEAR)
inp_image = ((inp_image / 255. - self.mean) / self.std).astype(np.float32)
images = inp_image.transpose(2, 0, 1).reshape(1, 3, inp_height, inp_width)
if self.opt.flip_test:
images = np.concatenate((images, images[:, :, :, ::-1]), axis=0)
images = torch.from_numpy(images)
meta = {'c': c, 's': s,
'out_height': inp_height // self.opt.down_ratio,
'out_width': inp_width // self.opt.down_ratio}
return images, meta
def process(self, images, return_time=False):
raise NotImplementedError
def post_process(self, dets, meta, scale=1):
raise NotImplementedError
def merge_outputs(self, detections):
raise NotImplementedError
def debug(self, debugger, images, dets, output, scale=1):
raise NotImplementedError
def show_results(self, debugger, image, results):
raise NotImplementedError
def run_from_trainer(self, batch):
load_time, pre_time, net_time, dec_time, post_time = 0, 0, 0, 0, 0
merge_time, tot_time = 0, 0
# debugger = Debugger(dataset=self.opt.dataset, ipynb=(self.opt.debug==3),
# theme=self.opt.debugger_theme)
start_time = time.time()
loaded_time = time.time()
load_time += (loaded_time - start_time)
detections = []
for scale in self.scales:
scale_start_time = time.time()
images = batch['input']
meta = {k: v.cpu().numpy()[0] for k, v in batch['meta'].items()}
images = images.to(self.opt.device)
torch.cuda.synchronize()
pre_process_time = time.time()
pre_time += pre_process_time - scale_start_time
output, dets, forward_time = self.process(images, return_time=True)
torch.cuda.synchronize()
net_time += forward_time - pre_process_time
decode_time = time.time()
dec_time += decode_time - forward_time
if self.opt.debug >= 2:
self.debug(debugger, images, dets, output, scale)
dets = self.post_process(dets, meta, scale)
torch.cuda.synchronize()
post_process_time = time.time()
post_time += post_process_time - decode_time
detections.append(dets)
results = self.merge_outputs(detections)
torch.cuda.synchronize()
end_time = time.time()
merge_time += end_time - post_process_time
tot_time += end_time - start_time
if self.opt.debug >= 1:
self.show_results(debugger, image, results)
return {'results': results, 'tot': tot_time, 'load': load_time,
'pre': pre_time, 'net': net_time, 'dec': dec_time,
'post': post_time, 'merge': merge_time}
def run(self, image_or_path_or_tensor, meta=None):
load_time, pre_time, net_time, dec_time, post_time = 0, 0, 0, 0, 0
merge_time, tot_time = 0, 0
debugger = Debugger(dataset=self.opt.dataset, ipynb=(self.opt.debug==3),
theme=self.opt.debugger_theme)
start_time = time.time()
pre_processed = False
if isinstance(image_or_path_or_tensor, np.ndarray):
image = image_or_path_or_tensor
elif type(image_or_path_or_tensor) == type (''):
image = cv2.imread(image_or_path_or_tensor)
else:
image = image_or_path_or_tensor['image'][0].numpy()
pre_processed_images = image_or_path_or_tensor
pre_processed = True
loaded_time = time.time()
load_time += (loaded_time - start_time)
detections = []
for scale in self.scales:
scale_start_time = time.time()
if not pre_processed:
images, meta = self.pre_process(image, scale, meta)
else:
# import pdb; pdb.set_trace()
images = pre_processed_images['images'][scale][0]
meta = pre_processed_images['meta'][scale]
meta = {k: v.numpy()[0] for k, v in meta.items()}
images = images.to(self.opt.device)
torch.cuda.synchronize()
pre_process_time = time.time()
pre_time += pre_process_time - scale_start_time
output, dets, forward_time = self.process(images, return_time=True)
torch.cuda.synchronize()
net_time += forward_time - pre_process_time
decode_time = time.time()
dec_time += decode_time - forward_time
if self.opt.debug >= 2:
self.debug(debugger, images, dets, output, scale)
dets = self.post_process(dets, meta, scale)
torch.cuda.synchronize()
post_process_time = time.time()
post_time += post_process_time - decode_time
detections.append(dets)
results = self.merge_outputs(detections)
torch.cuda.synchronize()
end_time = time.time()
merge_time += end_time - post_process_time
tot_time += end_time - start_time
if self.opt.debug >= 1:
self.show_results(debugger, image, results)
return {'results': results, 'tot': tot_time, 'load': load_time,
'pre': pre_time, 'net': net_time, 'dec': dec_time,
'post': post_time, 'merge': merge_time}
|
py | b401b21982d34be2d3620ed704cef582e3c8a90f | import logging
import random
from json import load
from pathlib import Path
import discord
from redbot.core import commands
log = logging.getLogger(__name__)
with open(Path(__file__).parent / "pickup_lines.json", "r", encoding="utf8") as f:
pickup_lines = load(f)
class PickupLine(commands.Cog):
"""A cog that gives random cheesy pickup lines."""
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command()
async def pickupline(self, ctx: commands.Context) -> None:
"""
Gives you a random pickup line.
Note that most of them are very cheesy.
"""
random_line = random.choice(pickup_lines["lines"])
embed = discord.Embed(
title=":cheese: Your pickup line :cheese:",
description=random_line["line"],
color=0xFFC2FF,
)
embed.set_thumbnail(url=random_line.get("image", pickup_lines["placeholder"]))
await ctx.send(embed=embed)
|
py | b401b27655c76549c21feb7984c9e5a29e53bca8 | # encoding: UTF-8
# 系统模块
from queue import Queue, Empty
from threading import Thread
from time import sleep
from collections import defaultdict
# 第三方模块
from qtpy.QtCore import QTimer
# 自己开发的模块
from .eventType import *
########################################################################
class EventEngine(object):
"""
事件驱动引擎
事件驱动引擎中所有的变量都设置为了私有,这是为了防止不小心
从外部修改了这些变量的值或状态,导致bug。
变量说明
__queue:私有变量,事件队列
__active:私有变量,事件引擎开关
__thread:私有变量,事件处理线程
__timer:私有变量,计时器
__handlers:私有变量,事件处理函数字典
方法说明
__run: 私有方法,事件处理线程连续运行用
__process: 私有方法,处理事件,调用注册在引擎中的监听函数
__onTimer:私有方法,计时器固定事件间隔触发后,向事件队列中存入计时器事件
start: 公共方法,启动引擎
stop:公共方法,停止引擎
register:公共方法,向引擎中注册监听函数
unregister:公共方法,向引擎中注销监听函数
put:公共方法,向事件队列中存入新的事件
事件监听函数必须定义为输入参数仅为一个event对象,即:
函数
def func(event)
...
对象方法
def method(self, event)
...
"""
#----------------------------------------------------------------------
def __init__(self):
"""初始化事件引擎"""
# 事件队列
self.__queue = Queue()
# 事件引擎开关
self.__active = False
# 事件处理线程
self.__thread = Thread(target = self.__run)
# 计时器,用于触发计时器事件
self.__timer = QTimer()
self.__timer.timeout.connect(self.__onTimer)
# 这里的__handlers是一个字典,用来保存对应的事件调用关系
# 其中每个键对应的值是一个列表,列表中保存了对该事件进行监听的函数功能
self.__handlers = defaultdict(list)
# __generalHandlers是一个列表,用来保存通用回调函数(所有事件均调用)
self.__generalHandlers = []
#----------------------------------------------------------------------
def __run(self):
"""引擎运行"""
while self.__active == True:
try:
event = self.__queue.get(block = True, timeout = 1) # 获取事件的阻塞时间设为1秒
self.__process(event)
except Empty:
pass
#----------------------------------------------------------------------
def __process(self, event):
"""处理事件"""
# 检查是否存在对该事件进行监听的处理函数
if event.type_ in self.__handlers:
# 若存在,则按顺序将事件传递给处理函数执行
[handler(event) for handler in self.__handlers[event.type_]]
# 以上语句为Python列表解析方式的写法,对应的常规循环写法为:
#for handler in self.__handlers[event.type_]:
#handler(event)
# 调用通用处理函数进行处理
if self.__generalHandlers:
[handler(event) for handler in self.__generalHandlers]
#----------------------------------------------------------------------
def __onTimer(self):
"""向事件队列中存入计时器事件"""
# 创建计时器事件
event = Event(type_=EVENT_TIMER)
# 向队列中存入计时器事件
self.put(event)
#----------------------------------------------------------------------
def start(self, timer=True):
"""
引擎启动
timer:是否要启动计时器
"""
# 将引擎设为启动
self.__active = True
# 启动事件处理线程
self.__thread.start()
# 启动计时器,计时器事件间隔默认设定为1秒
if timer:
self.__timer.start(1000)
#----------------------------------------------------------------------
def stop(self):
"""停止引擎"""
# 将引擎设为停止
self.__active = False
# 停止计时器
self.__timer.stop()
# 等待事件处理线程退出
self.__thread.join()
#----------------------------------------------------------------------
def register(self, type_, handler):
"""注册事件处理函数监听"""
# 尝试获取该事件类型对应的处理函数列表,若无defaultDict会自动创建新的list
handlerList = self.__handlers[type_]
# 若要注册的处理器不在该事件的处理器列表中,则注册该事件
if handler not in handlerList:
handlerList.append(handler)
#----------------------------------------------------------------------
def unregister(self, type_, handler):
"""注销事件处理函数监听"""
# 尝试获取该事件类型对应的处理函数列表,若无则忽略该次注销请求
handlerList = self.__handlers[type_]
# 如果该函数存在于列表中,则移除
if handler in handlerList:
handlerList.remove(handler)
# 如果函数列表为空,则从引擎中移除该事件类型
if not handlerList:
del self.__handlers[type_]
#----------------------------------------------------------------------
def put(self, event):
"""向事件队列中存入事件"""
self.__queue.put(event)
#----------------------------------------------------------------------
def registerGeneralHandler(self, handler):
"""注册通用事件处理函数监听"""
if handler not in self.__generalHandlers:
self.__generalHandlers.append(handler)
#----------------------------------------------------------------------
def unregisterGeneralHandler(self, handler):
"""注销通用事件处理函数监听"""
if handler in self.__generalHandlers:
self.__generalHandlers.remove(handler)
########################################################################
class EventEngine2(object):
"""
计时器使用python线程的事件驱动引擎
"""
#----------------------------------------------------------------------
def __init__(self):
"""初始化事件引擎"""
# 事件队列
self.__queue = Queue()
# 事件引擎开关
self.__active = False
# 事件处理线程
self.__thread = Thread(target = self.__run)
# 计时器,用于触发计时器事件
self.__timer = Thread(target = self.__runTimer)
self.__timerActive = False # 计时器工作状态
self.__timerSleep = 1 # 计时器触发间隔(默认1秒)
# 这里的__handlers是一个字典,用来保存对应的事件调用关系
# 其中每个键对应的值是一个列表,列表中保存了对该事件进行监听的函数功能
self.__handlers = defaultdict(list)
# __generalHandlers是一个列表,用来保存通用回调函数(所有事件均调用)
self.__generalHandlers = []
#----------------------------------------------------------------------
def __run(self):
"""引擎运行"""
while self.__active == True:
try:
event = self.__queue.get(block = True, timeout = 1) # 获取事件的阻塞时间设为1秒
self.__process(event)
except Empty:
pass
#----------------------------------------------------------------------
def __process(self, event):
"""处理事件"""
# 检查是否存在对该事件进行监听的处理函数
if event.type_ in self.__handlers:
# 若存在,则按顺序将事件传递给处理函数执行
[handler(event) for handler in self.__handlers[event.type_]]
# 以上语句为Python列表解析方式的写法,对应的常规循环写法为:
#for handler in self.__handlers[event.type_]:
#handler(event)
# 调用通用处理函数进行处理
if self.__generalHandlers:
[handler(event) for handler in self.__generalHandlers]
#----------------------------------------------------------------------
def __runTimer(self):
"""运行在计时器线程中的循环函数"""
while self.__timerActive:
# 创建计时器事件
event = Event(type_=EVENT_TIMER)
# 向队列中存入计时器事件
self.put(event)
# 等待
sleep(self.__timerSleep)
#----------------------------------------------------------------------
def start(self, timer=True):
"""
引擎启动
timer:是否要启动计时器
"""
# 将引擎设为启动
self.__active = True
# 启动事件处理线程
self.__thread.start()
# 启动计时器,计时器事件间隔默认设定为1秒
if timer:
self.__timerActive = True
self.__timer.start()
#----------------------------------------------------------------------
def stop(self):
"""停止引擎"""
# 将引擎设为停止
self.__active = False
# 停止计时器
self.__timerActive = False
self.__timer.join()
# 等待事件处理线程退出
self.__thread.join()
#----------------------------------------------------------------------
def register(self, type_, handler):
"""注册事件处理函数监听"""
# 尝试获取该事件类型对应的处理函数列表,若无defaultDict会自动创建新的list
handlerList = self.__handlers[type_]
# 若要注册的处理器不在该事件的处理器列表中,则注册该事件
if handler not in handlerList:
handlerList.append(handler)
#----------------------------------------------------------------------
def unregister(self, type_, handler):
"""注销事件处理函数监听"""
# 尝试获取该事件类型对应的处理函数列表,若无则忽略该次注销请求
handlerList = self.__handlers[type_]
# 如果该函数存在于列表中,则移除
if handler in handlerList:
handlerList.remove(handler)
# 如果函数列表为空,则从引擎中移除该事件类型
if not handlerList:
del self.__handlers[type_]
#----------------------------------------------------------------------
def put(self, event):
"""向事件队列中存入事件"""
self.__queue.put(event)
#----------------------------------------------------------------------
def registerGeneralHandler(self, handler):
"""注册通用事件处理函数监听"""
if handler not in self.__generalHandlers:
self.__generalHandlers.append(handler)
#----------------------------------------------------------------------
def unregisterGeneralHandler(self, handler):
"""注销通用事件处理函数监听"""
if handler in self.__generalHandlers:
self.__generalHandlers.remove(handler)
########################################################################
class Event:
"""事件对象"""
#----------------------------------------------------------------------
def __init__(self, type_=None):
"""Constructor"""
self.type_ = type_ # 事件类型
self.dict_ = {} # 字典用于保存具体的事件数据
#----------------------------------------------------------------------
def test():
"""测试函数"""
import sys
from datetime import datetime
from PyQt4.QtCore import QCoreApplication
def simpletest(event):
print('处理每秒触发的计时器事件:%s' % str(datetime.now()))
app = QCoreApplication(sys.argv)
ee = EventEngine2()
#ee.register(EVENT_TIMER, simpletest)
ee.registerGeneralHandler(simpletest)
ee.start()
app.exec_()
# 直接运行脚本可以进行测试
if __name__ == '__main__':
test() |
py | b401b28704f5f96a18704819a812cedef66b4357 | # Module doctest.
# Released to the public domain 16-Jan-2001, by Tim Peters ([email protected]).
# Major enhancements and refactoring by:
# Jim Fulton
# Edward Loper
# Provided as-is; use at your own risk; no warranty; no promises; enjoy!
r"""Module doctest -- a framework for running examples in docstrings.
In simplest use, end each module M to be tested with:
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
Then running the module as a script will cause the examples in the
docstrings to get executed and verified:
python M.py
This won't display anything unless an example fails, in which case the
failing example(s) and the cause(s) of the failure(s) are printed to stdout
(why not stderr? because stderr is a lame hack <0.2 wink>), and the final
line of output is "Test failed.".
Run it with the -v switch instead:
python M.py -v
and a detailed report of all examples tried is printed to stdout, along
with assorted summaries at the end.
You can force verbose mode by passing "verbose=True" to testmod, or prohibit
it by passing "verbose=False". In either of those cases, sys.argv is not
examined by testmod.
There are a variety of other ways to run doctests, including integration
with the unittest framework, and support for running non-Python text
files containing doctests. There are also many ways to override parts
of doctest's default behaviors. See the Library Reference Manual for
details.
"""
__docformat__ = 'reStructuredText en'
__all__ = [
# 0, Option Flags
'register_optionflag',
'DONT_ACCEPT_TRUE_FOR_1',
'DONT_ACCEPT_BLANKLINE',
'NORMALIZE_WHITESPACE',
'ELLIPSIS',
'SKIP',
'IGNORE_EXCEPTION_DETAIL',
'COMPARISON_FLAGS',
'REPORT_UDIFF',
'REPORT_CDIFF',
'REPORT_NDIFF',
'REPORT_ONLY_FIRST_FAILURE',
'REPORTING_FLAGS',
'FAIL_FAST',
# 1. Utility Functions
# 2. Example & DocTest
'Example',
'DocTest',
# 3. Doctest Parser
'DocTestParser',
# 4. Doctest Finder
'DocTestFinder',
# 5. Doctest Runner
'DocTestRunner',
'OutputChecker',
'DocTestFailure',
'UnexpectedException',
'DebugRunner',
# 6. Test Functions
'testmod',
'testfile',
'run_docstring_examples',
# 7. Unittest Support
'DocTestSuite',
'DocFileSuite',
'set_unittest_reportflags',
# 8. Debugging Support
'script_from_examples',
'testsource',
'debug_src',
'debug',
]
import __future__
import difflib
import inspect
import linecache
import os
import pdb
import re
import sys
import traceback
import unittest
from io import StringIO
from collections import namedtuple
TestResults = namedtuple('TestResults', 'failed attempted')
# There are 4 basic classes:
# - Example: a <source, want> pair, plus an intra-docstring line number.
# - DocTest: a collection of examples, parsed from a docstring, plus
# info about where the docstring came from (name, filename, lineno).
# - DocTestFinder: extracts DocTests from a given object's docstring and
# its contained objects' docstrings.
# - DocTestRunner: runs DocTest cases, and accumulates statistics.
#
# So the basic picture is:
#
# list of:
# +------+ +---------+ +-------+
# |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results|
# +------+ +---------+ +-------+
# | Example |
# | ... |
# | Example |
# +---------+
# Option constants.
OPTIONFLAGS_BY_NAME = {}
def register_optionflag(name):
# Create a new flag unless `name` is already known.
return OPTIONFLAGS_BY_NAME.setdefault(name, 1 << len(OPTIONFLAGS_BY_NAME))
DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1')
DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE')
NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE')
ELLIPSIS = register_optionflag('ELLIPSIS')
SKIP = register_optionflag('SKIP')
IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL')
COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 |
DONT_ACCEPT_BLANKLINE |
NORMALIZE_WHITESPACE |
ELLIPSIS |
SKIP |
IGNORE_EXCEPTION_DETAIL)
REPORT_UDIFF = register_optionflag('REPORT_UDIFF')
REPORT_CDIFF = register_optionflag('REPORT_CDIFF')
REPORT_NDIFF = register_optionflag('REPORT_NDIFF')
REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE')
FAIL_FAST = register_optionflag('FAIL_FAST')
REPORTING_FLAGS = (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF |
REPORT_ONLY_FIRST_FAILURE |
FAIL_FAST)
# Special string markers for use in `want` strings:
BLANKLINE_MARKER = '<BLANKLINE>'
ELLIPSIS_MARKER = '...'
######################################################################
## Table of Contents
######################################################################
# 1. Utility Functions
# 2. Example & DocTest -- store test cases
# 3. DocTest Parser -- extracts examples from strings
# 4. DocTest Finder -- extracts test cases from objects
# 5. DocTest Runner -- runs test cases
# 6. Test Functions -- convenient wrappers for testing
# 7. Unittest Support
# 8. Debugging Support
# 9. Example Usage
######################################################################
## 1. Utility Functions
######################################################################
def _extract_future_flags(globs):
"""
Return the compiler-flags associated with the future features that
have been imported into the given namespace (globs).
"""
flags = 0
for fname in __future__.all_feature_names:
feature = globs.get(fname, None)
if feature is getattr(__future__, fname):
flags |= feature.compiler_flag
return flags
def _normalize_module(module, depth=2):
"""
Return the module specified by `module`. In particular:
- If `module` is a module, then return module.
- If `module` is a string, then import and return the
module with that name.
- If `module` is None, then return the calling module.
The calling module is assumed to be the module of
the stack frame at the given depth in the call stack.
"""
if inspect.ismodule(module):
return module
elif isinstance(module, str):
return __import__(module, globals(), locals(), ["*"])
elif module is None:
return sys.modules[sys._getframe(depth).f_globals['__name__']]
else:
raise TypeError("Expected a module, string, or None")
def _load_testfile(filename, package, module_relative, encoding):
if module_relative:
package = _normalize_module(package, 3)
filename = _module_relative_path(package, filename)
if getattr(package, '__loader__', None) is not None:
if hasattr(package.__loader__, 'get_data'):
file_contents = package.__loader__.get_data(filename)
file_contents = file_contents.decode(encoding)
# get_data() opens files as 'rb', so one must do the equivalent
# conversion as universal newlines would do.
return file_contents.replace(os.linesep, '\n'), filename
with open(filename, encoding=encoding) as f:
return f.read(), filename
def _indent(s, indent=4):
"""
Add the given number of space characters to the beginning of
every non-blank line in `s`, and return the result.
"""
# This regexp matches the start of non-blank lines:
return re.sub('(?m)^(?!$)', indent*' ', s)
def _exception_traceback(exc_info):
"""
Return a string containing a traceback message for the given
exc_info tuple (as returned by sys.exc_info()).
"""
# Get a traceback message.
excout = StringIO()
exc_type, exc_val, exc_tb = exc_info
traceback.print_exception(exc_type, exc_val, exc_tb, file=excout)
return excout.getvalue()
# Override some StringIO methods.
class _SpoofOut(StringIO):
def getvalue(self):
result = StringIO.getvalue(self)
# If anything at all was written, make sure there's a trailing
# newline. There's no way for the expected output to indicate
# that a trailing newline is missing.
if result and not result.endswith("\n"):
result += "\n"
return result
def truncate(self, size=None):
self.seek(size)
StringIO.truncate(self)
# Worst-case linear-time ellipsis matching.
def _ellipsis_match(want, got):
"""
Essentially the only subtle case:
>>> _ellipsis_match('aa...aa', 'aaa')
False
"""
if ELLIPSIS_MARKER not in want:
return want == got
# Find "the real" strings.
ws = want.split(ELLIPSIS_MARKER)
assert len(ws) >= 2
# Deal with exact matches possibly needed at one or both ends.
startpos, endpos = 0, len(got)
w = ws[0]
if w: # starts with exact match
if got.startswith(w):
startpos = len(w)
del ws[0]
else:
return False
w = ws[-1]
if w: # ends with exact match
if got.endswith(w):
endpos -= len(w)
del ws[-1]
else:
return False
if startpos > endpos:
# Exact end matches required more characters than we have, as in
# _ellipsis_match('aa...aa', 'aaa')
return False
# For the rest, we only need to find the leftmost non-overlapping
# match for each piece. If there's no overall match that way alone,
# there's no overall match period.
for w in ws:
# w may be '' at times, if there are consecutive ellipses, or
# due to an ellipsis at the start or end of `want`. That's OK.
# Search for an empty string succeeds, and doesn't change startpos.
startpos = got.find(w, startpos, endpos)
if startpos < 0:
return False
startpos += len(w)
return True
def _comment_line(line):
"Return a commented form of the given line"
line = line.rstrip()
if line:
return '# '+line
else:
return '#'
def _strip_exception_details(msg):
# Support for IGNORE_EXCEPTION_DETAIL.
# Get rid of everything except the exception name; in particular, drop
# the possibly dotted module path (if any) and the exception message (if
# any). We assume that a colon is never part of a dotted name, or of an
# exception name.
# E.g., given
# "foo.bar.MyError: la di da"
# return "MyError"
# Or for "abc.def" or "abc.def:\n" return "def".
start, end = 0, len(msg)
# The exception name must appear on the first line.
i = msg.find("\n")
if i >= 0:
end = i
# retain up to the first colon (if any)
i = msg.find(':', 0, end)
if i >= 0:
end = i
# retain just the exception name
i = msg.rfind('.', 0, end)
if i >= 0:
start = i+1
return msg[start: end]
class _OutputRedirectingPdb(pdb.Pdb):
"""
A specialized version of the python debugger that redirects stdout
to a given stream when interacting with the user. Stdout is *not*
redirected when traced code is executed.
"""
def __init__(self, out):
self.__out = out
self.__debugger_used = False
# do not play signal games in the pdb
pdb.Pdb.__init__(self, stdout=out, nosigint=True)
# still use input() to get user input
self.use_rawinput = 1
def set_trace(self, frame=None):
self.__debugger_used = True
if frame is None:
frame = sys._getframe().f_back
pdb.Pdb.set_trace(self, frame)
def set_continue(self):
# Calling set_continue unconditionally would break unit test
# coverage reporting, as Bdb.set_continue calls sys.settrace(None).
if self.__debugger_used:
pdb.Pdb.set_continue(self)
def trace_dispatch(self, *args):
# Redirect stdout to the given stream.
save_stdout = sys.stdout
sys.stdout = self.__out
# Call Pdb's trace dispatch method.
try:
return pdb.Pdb.trace_dispatch(self, *args)
finally:
sys.stdout = save_stdout
# [XX] Normalize with respect to os.path.pardir?
def _module_relative_path(module, test_path):
if not inspect.ismodule(module):
raise TypeError('Expected a module: %r' % module)
if test_path.startswith('/'):
raise ValueError('Module-relative files may not have absolute paths')
# Normalize the path. On Windows, replace "/" with "\".
test_path = os.path.join(*(test_path.split('/')))
# Find the base directory for the path.
if hasattr(module, '__file__'):
# A normal module/package
basedir = os.path.split(module.__file__)[0]
elif module.__name__ == '__main__':
# An interactive session.
if len(sys.argv)>0 and sys.argv[0] != '':
basedir = os.path.split(sys.argv[0])[0]
else:
basedir = os.curdir
else:
if hasattr(module, '__path__'):
for directory in module.__path__:
fullpath = os.path.join(directory, test_path)
if os.path.exists(fullpath):
return fullpath
# A module w/o __file__ (this includes builtins)
raise ValueError("Can't resolve paths relative to the module "
"%r (it has no __file__)"
% module.__name__)
# Combine the base directory and the test path.
return os.path.join(basedir, test_path)
######################################################################
## 2. Example & DocTest
######################################################################
## - An "example" is a <source, want> pair, where "source" is a
## fragment of source code, and "want" is the expected output for
## "source." The Example class also includes information about
## where the example was extracted from.
##
## - A "doctest" is a collection of examples, typically extracted from
## a string (such as an object's docstring). The DocTest class also
## includes information about where the string was extracted from.
class Example:
"""
A single doctest example, consisting of source code and expected
output. `Example` defines the following attributes:
- source: A single Python statement, always ending with a newline.
The constructor adds a newline if needed.
- want: The expected output from running the source code (either
from stdout, or a traceback in case of exception). `want` ends
with a newline unless it's empty, in which case it's an empty
string. The constructor adds a newline if needed.
- exc_msg: The exception message generated by the example, if
the example is expected to generate an exception; or `None` if
it is not expected to generate an exception. This exception
message is compared against the return value of
`traceback.format_exception_only()`. `exc_msg` ends with a
newline unless it's `None`. The constructor adds a newline
if needed.
- lineno: The line number within the DocTest string containing
this Example where the Example begins. This line number is
zero-based, with respect to the beginning of the DocTest.
- indent: The example's indentation in the DocTest string.
I.e., the number of space characters that precede the
example's first prompt.
- options: A dictionary mapping from option flags to True or
False, which is used to override default options for this
example. Any option flags not contained in this dictionary
are left at their default value (as specified by the
DocTestRunner's optionflags). By default, no options are set.
"""
def __init__(self, source, want, exc_msg=None, lineno=0, indent=0,
options=None):
# Normalize inputs.
if not source.endswith('\n'):
source += '\n'
if want and not want.endswith('\n'):
want += '\n'
if exc_msg is not None and not exc_msg.endswith('\n'):
exc_msg += '\n'
# Store properties.
self.source = source
self.want = want
self.lineno = lineno
self.indent = indent
if options is None: options = {}
self.options = options
self.exc_msg = exc_msg
def __eq__(self, other):
if type(self) is not type(other):
return NotImplemented
return self.source == other.source and \
self.want == other.want and \
self.lineno == other.lineno and \
self.indent == other.indent and \
self.options == other.options and \
self.exc_msg == other.exc_msg
def __hash__(self):
return hash((self.source, self.want, self.lineno, self.indent,
self.exc_msg))
class DocTest:
"""
A collection of doctest examples that should be run in a single
namespace. Each `DocTest` defines the following attributes:
- examples: the list of examples.
- globs: The namespace (aka globals) that the examples should
be run in.
- name: A name identifying the DocTest (typically, the name of
the object whose docstring this DocTest was extracted from).
- filename: The name of the file that this DocTest was extracted
from, or `None` if the filename is unknown.
- lineno: The line number within filename where this DocTest
begins, or `None` if the line number is unavailable. This
line number is zero-based, with respect to the beginning of
the file.
- docstring: The string that the examples were extracted from,
or `None` if the string is unavailable.
"""
def __init__(self, examples, globs, name, filename, lineno, docstring):
"""
Create a new DocTest containing the given examples. The
DocTest's globals are initialized with a copy of `globs`.
"""
assert not isinstance(examples, str), \
"DocTest no longer accepts str; use DocTestParser instead"
self.examples = examples
self.docstring = docstring
self.globs = globs.copy()
self.name = name
self.filename = filename
self.lineno = lineno
def __repr__(self):
if len(self.examples) == 0:
examples = 'no examples'
elif len(self.examples) == 1:
examples = '1 example'
else:
examples = '%d examples' % len(self.examples)
return ('<%s %s from %s:%s (%s)>' %
(self.__class__.__name__,
self.name, self.filename, self.lineno, examples))
def __eq__(self, other):
if type(self) is not type(other):
return NotImplemented
return self.examples == other.examples and \
self.docstring == other.docstring and \
self.globs == other.globs and \
self.name == other.name and \
self.filename == other.filename and \
self.lineno == other.lineno
def __hash__(self):
return hash((self.docstring, self.name, self.filename, self.lineno))
# This lets us sort tests by name:
def __lt__(self, other):
if not isinstance(other, DocTest):
return NotImplemented
return ((self.name, self.filename, self.lineno, id(self))
<
(other.name, other.filename, other.lineno, id(other)))
######################################################################
## 3. DocTestParser
######################################################################
class DocTestParser:
"""
A class used to parse strings containing doctest examples.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
.+$\n? # But any other line
)*)
''', re.MULTILINE | re.VERBOSE)
# A regular expression for handling `want` strings that contain
# expected exceptions. It divides `want` into three pieces:
# - the traceback header line (`hdr`)
# - the traceback stack (`stack`)
# - the exception message (`msg`), as generated by
# traceback.format_exception_only()
# `msg` may have multiple lines. We assume/require that the
# exception message is the first non-indented line starting with a word
# character following the traceback header line.
_EXCEPTION_RE = re.compile(r"""
# Grab the traceback header. Different versions of Python have
# said different things on the first traceback line.
^(?P<hdr> Traceback\ \(
(?: most\ recent\ call\ last
| innermost\ last
) \) :
)
\s* $ # toss trailing whitespace on the header.
(?P<stack> .*?) # don't blink: absorb stuff until...
^ (?P<msg> \w+ .*) # a line *starts* with alphanum.
""", re.VERBOSE | re.MULTILINE | re.DOTALL)
# A callable returning a true value iff its argument is a blank line
# or contains a single comment.
_IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match
def parse(self, string, name='<string>'):
"""
Divide the given string into examples and intervening text,
and return them as a list of alternating Examples and strings.
Line numbers for the Examples are 0-based. The optional
argument `name` is a name identifying this string, and is only
used for error messages.
"""
string = string.expandtabs()
# If all lines begin with the same indentation, then strip it.
min_indent = self._min_indent(string)
if min_indent > 0:
string = '\n'.join([l[min_indent:] for l in string.split('\n')])
output = []
charno, lineno = 0, 0
# Find all doctest examples in the string:
for m in self._EXAMPLE_RE.finditer(string):
# Add the pre-example text to `output`.
output.append(string[charno:m.start()])
# Update lineno (lines before this example)
lineno += string.count('\n', charno, m.start())
# Extract info from the regexp match.
(source, options, want, exc_msg) = \
self._parse_example(m, name, lineno)
# Create an Example, and add it to the list.
if not self._IS_BLANK_OR_COMMENT(source):
output.append( Example(source, want, exc_msg,
lineno=lineno,
indent=min_indent+len(m.group('indent')),
options=options) )
# Update lineno (lines inside this example)
lineno += string.count('\n', m.start(), m.end())
# Update charno.
charno = m.end()
# Add any remaining post-example text to `output`.
output.append(string[charno:])
return output
def get_doctest(self, string, globs, name, filename, lineno):
"""
Extract all doctest examples from the given string, and
collect them into a `DocTest` object.
`globs`, `name`, `filename`, and `lineno` are attributes for
the new `DocTest` object. See the documentation for `DocTest`
for more information.
"""
return DocTest(self.get_examples(string, name), globs,
name, filename, lineno, string)
def get_examples(self, string, name='<string>'):
"""
Extract all doctest examples from the given string, and return
them as a list of `Example` objects. Line numbers are
0-based, because it's most common in doctests that nothing
interesting appears on the same line as opening triple-quote,
and so the first interesting line is called \"line 1\" then.
The optional argument `name` is a name identifying this
string, and is only used for error messages.
"""
return [x for x in self.parse(string, name)
if isinstance(x, Example)]
def _parse_example(self, m, name, lineno):
"""
Given a regular expression match from `_EXAMPLE_RE` (`m`),
return a pair `(source, want)`, where `source` is the matched
example's source code (with prompts and indentation stripped);
and `want` is the example's expected output (with indentation
stripped).
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
# Get the example's indentation level.
indent = len(m.group('indent'))
# Divide source into lines; check that they're properly
# indented; and then strip their indentation & prompts.
source_lines = m.group('source').split('\n')
self._check_prompt_blank(source_lines, indent, name, lineno)
self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno)
source = '\n'.join([sl[indent+4:] for sl in source_lines])
# Divide want into lines; check that it's properly indented; and
# then strip the indentation. Spaces before the last newline should
# be preserved, so plain rstrip() isn't good enough.
want = m.group('want')
want_lines = want.split('\n')
if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
del want_lines[-1] # forget final newline & spaces after it
self._check_prefix(want_lines, ' '*indent, name,
lineno + len(source_lines))
want = '\n'.join([wl[indent:] for wl in want_lines])
# If `want` contains a traceback message, then extract it.
m = self._EXCEPTION_RE.match(want)
if m:
exc_msg = m.group('msg')
else:
exc_msg = None
# Extract options from the source.
options = self._find_options(source, name, lineno)
return source, options, want, exc_msg
# This regular expression looks for option directives in the
# source code of an example. Option directives are comments
# starting with "doctest:". Warning: this may give false
# positives for string-literals that contain the string
# "#doctest:". Eliminating these false positives would require
# actually parsing the string; but we limit them by ignoring any
# line containing "#doctest:" that is *followed* by a quote mark.
_OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$',
re.MULTILINE)
def _find_options(self, source, name, lineno):
"""
Return a dictionary containing option overrides extracted from
option directives in the given source string.
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
options = {}
# (note: with the current regexp, this will match at most once:)
for m in self._OPTION_DIRECTIVE_RE.finditer(source):
option_strings = m.group(1).replace(',', ' ').split()
for option in option_strings:
if (option[0] not in '+-' or
option[1:] not in OPTIONFLAGS_BY_NAME):
raise ValueError('line %r of the doctest for %s '
'has an invalid option: %r' %
(lineno+1, name, option))
flag = OPTIONFLAGS_BY_NAME[option[1:]]
options[flag] = (option[0] == '+')
if options and self._IS_BLANK_OR_COMMENT(source):
raise ValueError('line %r of the doctest for %s has an option '
'directive on a line with no example: %r' %
(lineno, name, source))
return options
# This regular expression finds the indentation of every non-blank
# line in a string.
_INDENT_RE = re.compile(r'^([ ]*)(?=\S)', re.MULTILINE)
def _min_indent(self, s):
"Return the minimum indentation of any non-blank line in `s`"
indents = [len(indent) for indent in self._INDENT_RE.findall(s)]
if len(indents) > 0:
return min(indents)
else:
return 0
def _check_prompt_blank(self, lines, indent, name, lineno):
"""
Given the lines of a source string (including prompts and
leading indentation), check to make sure that every prompt is
followed by a space character. If any line is not followed by
a space character, then raise ValueError.
"""
for i, line in enumerate(lines):
if len(line) >= indent+4 and line[indent+3] != ' ':
raise ValueError('line %r of the docstring for %s '
'lacks blank after %s: %r' %
(lineno+i+1, name,
line[indent:indent+3], line))
def _check_prefix(self, lines, prefix, name, lineno):
"""
Check that every line in the given list starts with the given
prefix; if any line does not, then raise a ValueError.
"""
for i, line in enumerate(lines):
if line and not line.startswith(prefix):
raise ValueError('line %r of the docstring for %s has '
'inconsistent leading whitespace: %r' %
(lineno+i+1, name, line))
######################################################################
## 4. DocTest Finder
######################################################################
class DocTestFinder:
"""
A class used to extract the DocTests that are relevant to a given
object, from its docstring and the docstrings of its contained
objects. Doctests can currently be extracted from the following
object types: modules, functions, classes, methods, staticmethods,
classmethods, and properties.
"""
def __init__(self, verbose=False, parser=DocTestParser(),
recurse=True, exclude_empty=True):
"""
Create a new doctest finder.
The optional argument `parser` specifies a class or
function that should be used to create new DocTest objects (or
objects that implement the same interface as DocTest). The
signature for this factory function should match the signature
of the DocTest constructor.
If the optional argument `recurse` is false, then `find` will
only examine the given object, and not any contained objects.
If the optional argument `exclude_empty` is false, then `find`
will include tests for objects with empty docstrings.
"""
self._parser = parser
self._verbose = verbose
self._recurse = recurse
self._exclude_empty = exclude_empty
def find(self, obj, name=None, module=None, globs=None, extraglobs=None):
"""
Return a list of the DocTests that are defined by the given
object's docstring, or by any of its contained objects'
docstrings.
The optional parameter `module` is the module that contains
the given object. If the module is not specified or is None, then
the test finder will attempt to automatically determine the
correct module. The object's module is used:
- As a default namespace, if `globs` is not specified.
- To prevent the DocTestFinder from extracting DocTests
from objects that are imported from other modules.
- To find the name of the file containing the object.
- To help find the line number of the object within its
file.
Contained objects whose module does not match `module` are ignored.
If `module` is False, no attempt to find the module will be made.
This is obscure, of use mostly in tests: if `module` is False, or
is None but cannot be found automatically, then all objects are
considered to belong to the (non-existent) module, so all contained
objects will (recursively) be searched for doctests.
The globals for each DocTest is formed by combining `globs`
and `extraglobs` (bindings in `extraglobs` override bindings
in `globs`). A new copy of the globals dictionary is created
for each DocTest. If `globs` is not specified, then it
defaults to the module's `__dict__`, if specified, or {}
otherwise. If `extraglobs` is not specified, then it defaults
to {}.
"""
# If name was not specified, then extract it from the object.
if name is None:
name = getattr(obj, '__name__', None)
if name is None:
raise ValueError("DocTestFinder.find: name must be given "
"when obj.__name__ doesn't exist: %r" %
(type(obj),))
# Find the module that contains the given object (if obj is
# a module, then module=obj.). Note: this may fail, in which
# case module will be None.
if module is False:
module = None
elif module is None:
module = inspect.getmodule(obj)
# Read the module's source code. This is used by
# DocTestFinder._find_lineno to find the line number for a
# given object's docstring.
try:
file = inspect.getsourcefile(obj)
except TypeError:
source_lines = None
else:
if not file:
# Check to see if it's one of our special internal "files"
# (see __patched_linecache_getlines).
file = inspect.getfile(obj)
if not file[0]+file[-2:] == '<]>': file = None
if file is None:
source_lines = None
else:
if module is not None:
# Supply the module globals in case the module was
# originally loaded via a PEP 302 loader and
# file is not a valid filesystem path
source_lines = linecache.getlines(file, module.__dict__)
else:
# No access to a loader, so assume it's a normal
# filesystem path
source_lines = linecache.getlines(file)
if not source_lines:
source_lines = None
# Initialize globals, and merge in extraglobs.
if globs is None:
if module is None:
globs = {}
else:
globs = module.__dict__.copy()
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
if '__name__' not in globs:
globs['__name__'] = '__main__' # provide a default module name
# Recursively explore `obj`, extracting DocTests.
tests = []
self._find(tests, obj, name, module, source_lines, globs, {})
# Sort the tests by alpha order of names, for consistency in
# verbose-mode output. This was a feature of doctest in Pythons
# <= 2.3 that got lost by accident in 2.4. It was repaired in
# 2.4.4 and 2.5.
tests.sort()
return tests
def _from_module(self, module, object):
"""
Return true if the given object is defined in the given
module.
"""
if module is None:
return True
elif inspect.getmodule(object) is not None:
return module is inspect.getmodule(object)
elif inspect.isfunction(object):
return module.__dict__ is object.__globals__
elif inspect.ismethoddescriptor(object):
if hasattr(object, '__objclass__') and hasattr(object.__objclass__, '__module__'):
obj_mod = object.__objclass__.__module__
elif hasattr(object, '__module__'):
obj_mod = object.__module__
else:
return True # [XX] no easy way to tell otherwise
return module.__name__ == obj_mod
elif inspect.isclass(object):
try:
return module.__name__ == object.__module__
except:
return True
elif hasattr(object, '__module__'):
return module.__name__ == object.__module__
elif isinstance(object, property):
return True # [XX] no way not be sure.
else:
raise ValueError("object must be a class or function")
def _find(self, tests, obj, name, module, source_lines, globs, seen):
"""
Find tests for the given object and any contained objects, and
add them to `tests`.
"""
if self._verbose:
print('Finding tests in %s' % name)
# If we've already processed this object, then ignore it.
if id(obj) in seen:
return
seen[id(obj)] = 1
# Find a test for this object, and add it to the list of tests.
test = self._get_test(obj, name, module, globs, source_lines)
if test is not None:
tests.append(test)
# Look for tests in a module's contained objects.
if inspect.ismodule(obj) and self._recurse:
for valname, val in obj.__dict__.items():
valname = '%s.%s' % (name, valname)
# Recurse to functions & classes.
if ((inspect.isroutine(inspect.unwrap(val))
or inspect.isclass(val)) and
self._from_module(module, val)):
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a module's __test__ dictionary.
if inspect.ismodule(obj) and self._recurse:
for valname, val in getattr(obj, '__test__', {}).items():
if not isinstance(valname, str):
raise ValueError("DocTestFinder.find: __test__ keys "
"must be strings: %r" %
(type(valname),))
if not (inspect.isroutine(val) or inspect.isclass(val) or
inspect.ismodule(val) or isinstance(val, str)):
raise ValueError("DocTestFinder.find: __test__ values "
"must be strings, functions, methods, "
"classes, or modules: %r" %
(type(val),))
valname = '%s.__test__.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a class's contained objects.
if inspect.isclass(obj) and self._recurse:
for valname, val in obj.__dict__.items():
# Special handling for staticmethod/classmethod.
if isinstance(val, staticmethod):
val = getattr(obj, valname)
if isinstance(val, classmethod):
val = getattr(obj, valname).__func__
# Recurse to methods, properties, and nested classes.
if ((inspect.isroutine(val) or inspect.isclass(val) or
isinstance(val, property)) and
self._from_module(module, val)):
valname = '%s.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
def _get_test(self, obj, name, module, globs, source_lines):
"""
Return a DocTest for the given object, if it defines a docstring;
otherwise, return None.
"""
# Extract the object's docstring. If it doesn't have one,
# then return None (no test for this object).
if isinstance(obj, str):
docstring = obj
else:
try:
if obj.__doc__ is None:
docstring = ''
else:
docstring = obj.__doc__
if not isinstance(docstring, str):
docstring = str(docstring)
except (TypeError, AttributeError):
docstring = ''
# Find the docstring's location in the file.
lineno = self._find_lineno(obj, source_lines)
# Don't bother if the docstring is empty.
if self._exclude_empty and not docstring:
return None
# Return a DocTest for this object.
if module is None:
filename = None
else:
# __file__ can be None for namespace packages.
filename = getattr(module, '__file__', None) or module.__name__
if filename[-4:] == ".pyc":
filename = filename[:-1]
return self._parser.get_doctest(docstring, globs, name,
filename, lineno)
def _find_lineno(self, obj, source_lines):
"""
Return a line number of the given object's docstring. Note:
this method assumes that the object has a docstring.
"""
lineno = None
# Find the line number for modules.
if inspect.ismodule(obj):
lineno = 0
# Find the line number for classes.
# Note: this could be fooled if a class is defined multiple
# times in a single file.
if inspect.isclass(obj):
if source_lines is None:
return None
pat = re.compile(r'^\s*class\s*%s\b' %
getattr(obj, '__name__', '-'))
for i, line in enumerate(source_lines):
if pat.match(line):
lineno = i
break
# Find the line number for functions & methods.
if inspect.ismethod(obj): obj = obj.__func__
if inspect.isfunction(obj): obj = obj.__code__
if inspect.istraceback(obj): obj = obj.tb_frame
if inspect.isframe(obj): obj = obj.f_code
if inspect.iscode(obj):
lineno = getattr(obj, 'co_firstlineno', None)-1
# Find the line number where the docstring starts. Assume
# that it's the first line that begins with a quote mark.
# Note: this could be fooled by a multiline function
# signature, where a continuation line begins with a quote
# mark.
if lineno is not None:
if source_lines is None:
return lineno+1
pat = re.compile(r'(^|.*:)\s*\w*("|\')')
for lineno in range(lineno, len(source_lines)):
if pat.match(source_lines[lineno]):
return lineno
# We couldn't find the line number.
return None
######################################################################
## 5. DocTest Runner
######################################################################
class DocTestRunner:
"""
A class used to run DocTest test cases, and accumulate statistics.
The `run` method is used to process a single DocTest case. It
returns a tuple `(f, t)`, where `t` is the number of test cases
tried, and `f` is the number of test cases that failed.
>>> tests = DocTestFinder().find(_TestClass)
>>> runner = DocTestRunner(verbose=False)
>>> tests.sort(key = lambda test: test.name)
>>> for test in tests:
... print(test.name, '->', runner.run(test))
_TestClass -> TestResults(failed=0, attempted=2)
_TestClass.__init__ -> TestResults(failed=0, attempted=2)
_TestClass.get -> TestResults(failed=0, attempted=2)
_TestClass.square -> TestResults(failed=0, attempted=1)
The `summarize` method prints a summary of all the test cases that
have been run by the runner, and returns an aggregated `(f, t)`
tuple:
>>> runner.summarize(verbose=1)
4 items passed all tests:
2 tests in _TestClass
2 tests in _TestClass.__init__
2 tests in _TestClass.get
1 tests in _TestClass.square
7 tests in 4 items.
7 passed and 0 failed.
Test passed.
TestResults(failed=0, attempted=7)
The aggregated number of tried examples and failed examples is
also available via the `tries` and `failures` attributes:
>>> runner.tries
7
>>> runner.failures
0
The comparison between expected outputs and actual outputs is done
by an `OutputChecker`. This comparison may be customized with a
number of option flags; see the documentation for `testmod` for
more information. If the option flags are insufficient, then the
comparison may also be customized by passing a subclass of
`OutputChecker` to the constructor.
The test runner's display output can be controlled in two ways.
First, an output function (`out) can be passed to
`TestRunner.run`; this function will be called with strings that
should be displayed. It defaults to `sys.stdout.write`. If
capturing the output is not sufficient, then the display output
can be also customized by subclassing DocTestRunner, and
overriding the methods `report_start`, `report_success`,
`report_unexpected_exception`, and `report_failure`.
"""
# This divider string is used to separate failure messages, and to
# separate sections of the summary.
DIVIDER = "*" * 70
def __init__(self, checker=None, verbose=None, optionflags=0):
"""
Create a new test runner.
Optional keyword arg `checker` is the `OutputChecker` that
should be used to compare the expected outputs and actual
outputs of doctest examples.
Optional keyword arg 'verbose' prints lots of stuff if true,
only failures if false; by default, it's true iff '-v' is in
sys.argv.
Optional argument `optionflags` can be used to control how the
test runner compares expected output to actual output, and how
it displays failures. See the documentation for `testmod` for
more information.
"""
self._checker = checker or OutputChecker()
if verbose is None:
verbose = '-v' in sys.argv
self._verbose = verbose
self.optionflags = optionflags
self.original_optionflags = optionflags
# Keep track of the examples we've run.
self.tries = 0
self.failures = 0
self._name2ft = {}
# Create a fake output target for capturing doctest output.
self._fakeout = _SpoofOut()
#/////////////////////////////////////////////////////////////////
# Reporting methods
#/////////////////////////////////////////////////////////////////
def report_start(self, out, test, example):
"""
Report that the test runner is about to process the given
example. (Only displays a message if verbose=True)
"""
if self._verbose:
if example.want:
out('Trying:\n' + _indent(example.source) +
'Expecting:\n' + _indent(example.want))
else:
out('Trying:\n' + _indent(example.source) +
'Expecting nothing\n')
def report_success(self, out, test, example, got):
"""
Report that the given example ran successfully. (Only
displays a message if verbose=True)
"""
if self._verbose:
out("ok\n")
def report_failure(self, out, test, example, got):
"""
Report that the given example failed.
"""
out(self._failure_header(test, example) +
self._checker.output_difference(example, got, self.optionflags))
def report_unexpected_exception(self, out, test, example, exc_info):
"""
Report that the given example raised an unexpected exception.
"""
out(self._failure_header(test, example) +
'Exception raised:\n' + _indent(_exception_traceback(exc_info)))
def _failure_header(self, test, example):
out = [self.DIVIDER]
if test.filename:
if test.lineno is not None and example.lineno is not None:
lineno = test.lineno + example.lineno + 1
else:
lineno = '?'
out.append('File "%s", line %s, in %s' %
(test.filename, lineno, test.name))
else:
out.append('Line %s, in %s' % (example.lineno+1, test.name))
out.append('Failed example:')
source = example.source
out.append(_indent(source))
return '\n'.join(out)
#/////////////////////////////////////////////////////////////////
# DocTest Running
#/////////////////////////////////////////////////////////////////
def __run(self, test, compileflags, out):
"""
Run the examples in `test`. Write the outcome of each example
with one of the `DocTestRunner.report_*` methods, using the
writer function `out`. `compileflags` is the set of compiler
flags that should be used to execute examples. Return a tuple
`(f, t)`, where `t` is the number of examples tried, and `f`
is the number of examples that failed. The examples are run
in the namespace `test.globs`.
"""
# Keep track of the number of failures and tries.
failures = tries = 0
# Save the option flags (since option directives can be used
# to modify them).
original_optionflags = self.optionflags
SUCCESS, FAILURE, BOOM = range(3) # `outcome` state
check = self._checker.check_output
# Process each example.
for examplenum, example in enumerate(test.examples):
# If REPORT_ONLY_FIRST_FAILURE is set, then suppress
# reporting after the first failure.
quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and
failures > 0)
# Merge in the example's options.
self.optionflags = original_optionflags
if example.options:
for (optionflag, val) in example.options.items():
if val:
self.optionflags |= optionflag
else:
self.optionflags &= ~optionflag
# If 'SKIP' is set, then skip this example.
if self.optionflags & SKIP:
continue
# Record that we started this example.
tries += 1
if not quiet:
self.report_start(out, test, example)
# Use a special filename for compile(), so we can retrieve
# the source code during interactive debugging (see
# __patched_linecache_getlines).
filename = '<doctest %s[%d]>' % (test.name, examplenum)
# Run the example in the given context (globs), and record
# any exception that gets raised. (But don't intercept
# keyboard interrupts.)
try:
# Don't blink! This is where the user's code gets run.
exec(compile(example.source, filename, "single",
compileflags, 1), test.globs)
self.debugger.set_continue() # ==== Example Finished ====
exception = None
except KeyboardInterrupt:
raise
except:
exception = sys.exc_info()
self.debugger.set_continue() # ==== Example Finished ====
got = self._fakeout.getvalue() # the actual output
self._fakeout.truncate(0)
outcome = FAILURE # guilty until proved innocent or insane
# If the example executed without raising any exceptions,
# verify its output.
if exception is None:
if check(example.want, got, self.optionflags):
outcome = SUCCESS
# The example raised an exception: check if it was expected.
else:
exc_msg = traceback.format_exception_only(*exception[:2])[-1]
if not quiet:
got += _exception_traceback(exception)
# If `example.exc_msg` is None, then we weren't expecting
# an exception.
if example.exc_msg is None:
outcome = BOOM
# We expected an exception: see whether it matches.
elif check(example.exc_msg, exc_msg, self.optionflags):
outcome = SUCCESS
# Another chance if they didn't care about the detail.
elif self.optionflags & IGNORE_EXCEPTION_DETAIL:
if check(_strip_exception_details(example.exc_msg),
_strip_exception_details(exc_msg),
self.optionflags):
outcome = SUCCESS
# Report the outcome.
if outcome is SUCCESS:
if not quiet:
self.report_success(out, test, example, got)
elif outcome is FAILURE:
if not quiet:
self.report_failure(out, test, example, got)
failures += 1
elif outcome is BOOM:
if not quiet:
self.report_unexpected_exception(out, test, example,
exception)
failures += 1
else:
assert False, ("unknown outcome", outcome)
if failures and self.optionflags & FAIL_FAST:
break
# Restore the option flags (in case they were modified)
self.optionflags = original_optionflags
# Record and return the number of failures and tries.
self.__record_outcome(test, failures, tries)
return TestResults(failures, tries)
def __record_outcome(self, test, f, t):
"""
Record the fact that the given DocTest (`test`) generated `f`
failures out of `t` tried examples.
"""
f2, t2 = self._name2ft.get(test.name, (0,0))
self._name2ft[test.name] = (f+f2, t+t2)
self.failures += f
self.tries += t
__LINECACHE_FILENAME_RE = re.compile(r'<doctest '
r'(?P<name>.+)'
r'\[(?P<examplenum>\d+)\]>$')
def __patched_linecache_getlines(self, filename, module_globals=None):
m = self.__LINECACHE_FILENAME_RE.match(filename)
if m and m.group('name') == self.test.name:
example = self.test.examples[int(m.group('examplenum'))]
return example.source.splitlines(keepends=True)
else:
return self.save_linecache_getlines(filename, module_globals)
def run(self, test, compileflags=None, out=None, clear_globs=True):
"""
Run the examples in `test`, and display the results using the
writer function `out`.
The examples are run in the namespace `test.globs`. If
`clear_globs` is true (the default), then this namespace will
be cleared after the test runs, to help with garbage
collection. If you would like to examine the namespace after
the test completes, then use `clear_globs=False`.
`compileflags` gives the set of flags that should be used by
the Python compiler when running the examples. If not
specified, then it will default to the set of future-import
flags that apply to `globs`.
The output of each example is checked using
`DocTestRunner.check_output`, and the results are formatted by
the `DocTestRunner.report_*` methods.
"""
self.test = test
if compileflags is None:
compileflags = _extract_future_flags(test.globs)
save_stdout = sys.stdout
if out is None:
encoding = save_stdout.encoding
if encoding is None or encoding.lower() == 'utf-8':
out = save_stdout.write
else:
# Use backslashreplace error handling on write
def out(s):
s = str(s.encode(encoding, 'backslashreplace'), encoding)
save_stdout.write(s)
sys.stdout = self._fakeout
# Patch pdb.set_trace to restore sys.stdout during interactive
# debugging (so it's not still redirected to self._fakeout).
# Note that the interactive output will go to *our*
# save_stdout, even if that's not the real sys.stdout; this
# allows us to write test cases for the set_trace behavior.
save_trace = sys.gettrace()
save_set_trace = pdb.set_trace
self.debugger = _OutputRedirectingPdb(save_stdout)
self.debugger.reset()
pdb.set_trace = self.debugger.set_trace
# Patch linecache.getlines, so we can see the example's source
# when we're inside the debugger.
self.save_linecache_getlines = linecache.getlines
linecache.getlines = self.__patched_linecache_getlines
# Make sure sys.displayhook just prints the value to stdout
save_displayhook = sys.displayhook
sys.displayhook = sys.__displayhook__
try:
return self.__run(test, compileflags, out)
finally:
sys.stdout = save_stdout
pdb.set_trace = save_set_trace
sys.settrace(save_trace)
linecache.getlines = self.save_linecache_getlines
sys.displayhook = save_displayhook
if clear_globs:
test.globs.clear()
import builtins
builtins._ = None
#/////////////////////////////////////////////////////////////////
# Summarization
#/////////////////////////////////////////////////////////////////
def summarize(self, verbose=None):
"""
Print a summary of all the test cases that have been run by
this DocTestRunner, and return a tuple `(f, t)`, where `f` is
the total number of failed examples, and `t` is the total
number of tried examples.
The optional `verbose` argument controls how detailed the
summary is. If the verbosity is not specified, then the
DocTestRunner's verbosity is used.
"""
if verbose is None:
verbose = self._verbose
notests = []
passed = []
failed = []
totalt = totalf = 0
for x in self._name2ft.items():
name, (f, t) = x
assert f <= t
totalt += t
totalf += f
if t == 0:
notests.append(name)
elif f == 0:
passed.append( (name, t) )
else:
failed.append(x)
if verbose:
if notests:
print(len(notests), "items had no tests:")
notests.sort()
for thing in notests:
print(" ", thing)
if passed:
print(len(passed), "items passed all tests:")
passed.sort()
for thing, count in passed:
print(" %3d tests in %s" % (count, thing))
if failed:
print(self.DIVIDER)
print(len(failed), "items had failures:")
failed.sort()
for thing, (f, t) in failed:
print(" %3d of %3d in %s" % (f, t, thing))
if verbose:
print(totalt, "tests in", len(self._name2ft), "items.")
print(totalt - totalf, "passed and", totalf, "failed.")
if totalf:
print("***Test Failed***", totalf, "failures.")
elif verbose:
print("Test passed.")
return TestResults(totalf, totalt)
#/////////////////////////////////////////////////////////////////
# Backward compatibility cruft to maintain doctest.master.
#/////////////////////////////////////////////////////////////////
def merge(self, other):
d = self._name2ft
for name, (f, t) in other._name2ft.items():
if name in d:
# Don't print here by default, since doing
# so breaks some of the buildbots
#print("*** DocTestRunner.merge: '" + name + "' in both" \
# " testers; summing outcomes.")
f2, t2 = d[name]
f = f + f2
t = t + t2
d[name] = f, t
class OutputChecker:
"""
A class used to check the whether the actual output from a doctest
example matches the expected output. `OutputChecker` defines two
methods: `check_output`, which compares a given pair of outputs,
and returns true if they match; and `output_difference`, which
returns a string describing the differences between two outputs.
"""
def _toAscii(self, s):
"""
Convert string to hex-escaped ASCII string.
"""
return str(s.encode('ASCII', 'backslashreplace'), "ASCII")
def check_output(self, want, got, optionflags):
"""
Return True iff the actual output from an example (`got`)
matches the expected output (`want`). These strings are
always considered to match if they are identical; but
depending on what option flags the test runner is using,
several non-exact match types are also possible. See the
documentation for `TestRunner` for more information about
option flags.
"""
# If `want` contains hex-escaped character such as "\u1234",
# then `want` is a string of six characters(e.g. [\,u,1,2,3,4]).
# On the other hand, `got` could be another sequence of
# characters such as [\u1234], so `want` and `got` should
# be folded to hex-escaped ASCII string to compare.
got = self._toAscii(got)
want = self._toAscii(want)
# Handle the common case first, for efficiency:
# if they're string-identical, always return true.
if got == want:
return True
# The values True and False replaced 1 and 0 as the return
# value for boolean comparisons in Python 2.3.
if not (optionflags & DONT_ACCEPT_TRUE_FOR_1):
if (got,want) == ("True\n", "1\n"):
return True
if (got,want) == ("False\n", "0\n"):
return True
# <BLANKLINE> can be used as a special sequence to signify a
# blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
# Replace <BLANKLINE> in want with a blank line.
want = re.sub(r'(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER),
'', want)
# If a line in got contains only spaces, then remove the
# spaces.
got = re.sub(r'(?m)^[^\S\n]+$', '', got)
if got == want:
return True
# This flag causes doctest to ignore any differences in the
# contents of whitespace strings. Note that this can be used
# in conjunction with the ELLIPSIS flag.
if optionflags & NORMALIZE_WHITESPACE:
got = ' '.join(got.split())
want = ' '.join(want.split())
if got == want:
return True
# The ELLIPSIS flag says to let the sequence "..." in `want`
# match any substring in `got`.
if optionflags & ELLIPSIS:
if _ellipsis_match(want, got):
return True
# We didn't find any match; return false.
return False
# Should we do a fancy diff?
def _do_a_fancy_diff(self, want, got, optionflags):
# Not unless they asked for a fancy diff.
if not optionflags & (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF):
return False
# If expected output uses ellipsis, a meaningful fancy diff is
# too hard ... or maybe not. In two real-life failures Tim saw,
# a diff was a major help anyway, so this is commented out.
# [todo] _ellipsis_match() knows which pieces do and don't match,
# and could be the basis for a kick-ass diff in this case.
##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want:
## return False
# ndiff does intraline difference marking, so can be useful even
# for 1-line differences.
if optionflags & REPORT_NDIFF:
return True
# The other diff types need at least a few lines to be helpful.
return want.count('\n') > 2 and got.count('\n') > 2
def output_difference(self, example, got, optionflags):
"""
Return a string describing the differences between the
expected output for a given example (`example`) and the actual
output (`got`). `optionflags` is the set of option flags used
to compare `want` and `got`.
"""
want = example.want
# If <BLANKLINE>s are being used, then replace blank lines
# with <BLANKLINE> in the actual output string.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got)
# Check if we should use diff.
if self._do_a_fancy_diff(want, got, optionflags):
# Split want & got into lines.
want_lines = want.splitlines(keepends=True)
got_lines = got.splitlines(keepends=True)
# Use difflib to find their differences.
if optionflags & REPORT_UDIFF:
diff = difflib.unified_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'unified diff with -expected +actual'
elif optionflags & REPORT_CDIFF:
diff = difflib.context_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'context diff with expected followed by actual'
elif optionflags & REPORT_NDIFF:
engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK)
diff = list(engine.compare(want_lines, got_lines))
kind = 'ndiff with -expected +actual'
else:
assert 0, 'Bad diff option'
return 'Differences (%s):\n' % kind + _indent(''.join(diff))
# If we're not using diff, then simply list the expected
# output followed by the actual output.
if want and got:
return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got))
elif want:
return 'Expected:\n%sGot nothing\n' % _indent(want)
elif got:
return 'Expected nothing\nGot:\n%s' % _indent(got)
else:
return 'Expected nothing\nGot nothing\n'
class DocTestFailure(Exception):
"""A DocTest example has failed in debugging mode.
The exception instance has variables:
- test: the DocTest object being run
- example: the Example object that failed
- got: the actual output
"""
def __init__(self, test, example, got):
self.test = test
self.example = example
self.got = got
def __str__(self):
return str(self.test)
class UnexpectedException(Exception):
"""A DocTest example has encountered an unexpected exception
The exception instance has variables:
- test: the DocTest object being run
- example: the Example object that failed
- exc_info: the exception info
"""
def __init__(self, test, example, exc_info):
self.test = test
self.example = example
self.exc_info = exc_info
def __str__(self):
return str(self.test)
class DebugRunner(DocTestRunner):
r"""Run doc tests but raise an exception as soon as there is a failure.
If an unexpected exception occurs, an UnexpectedException is raised.
It contains the test, the example, and the original exception:
>>> runner = DebugRunner(verbose=False)
>>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
... {}, 'foo', 'foo.py', 0)
>>> try:
... runner.run(test)
... except UnexpectedException as f:
... failure = f
>>> failure.test is test
True
>>> failure.example.want
'42\n'
>>> exc_info = failure.exc_info
>>> raise exc_info[1] # Already has the traceback
Traceback (most recent call last):
...
KeyError
We wrap the original exception to give the calling application
access to the test and example information.
If the output doesn't match, then a DocTestFailure is raised:
>>> test = DocTestParser().get_doctest('''
... >>> x = 1
... >>> x
... 2
... ''', {}, 'foo', 'foo.py', 0)
>>> try:
... runner.run(test)
... except DocTestFailure as f:
... failure = f
DocTestFailure objects provide access to the test:
>>> failure.test is test
True
As well as to the example:
>>> failure.example.want
'2\n'
and the actual output:
>>> failure.got
'1\n'
If a failure or error occurs, the globals are left intact:
>>> del test.globs['__builtins__']
>>> test.globs
{'x': 1}
>>> test = DocTestParser().get_doctest('''
... >>> x = 2
... >>> raise KeyError
... ''', {}, 'foo', 'foo.py', 0)
>>> runner.run(test)
Traceback (most recent call last):
...
doctest.UnexpectedException: <DocTest foo from foo.py:0 (2 examples)>
>>> del test.globs['__builtins__']
>>> test.globs
{'x': 2}
But the globals are cleared if there is no error:
>>> test = DocTestParser().get_doctest('''
... >>> x = 2
... ''', {}, 'foo', 'foo.py', 0)
>>> runner.run(test)
TestResults(failed=0, attempted=1)
>>> test.globs
{}
"""
def run(self, test, compileflags=None, out=None, clear_globs=True):
r = DocTestRunner.run(self, test, compileflags, out, False)
if clear_globs:
test.globs.clear()
return r
def report_unexpected_exception(self, out, test, example, exc_info):
raise UnexpectedException(test, example, exc_info)
def report_failure(self, out, test, example, got):
raise DocTestFailure(test, example, got)
######################################################################
## 6. Test Functions
######################################################################
# These should be backwards compatible.
# For backward compatibility, a global instance of a DocTestRunner
# class, updated by testmod.
master = None
def testmod(m=None, name=None, globs=None, verbose=None,
report=True, optionflags=0, extraglobs=None,
raise_on_error=False, exclude_empty=False):
"""m=None, name=None, globs=None, verbose=None, report=True,
optionflags=0, extraglobs=None, raise_on_error=False,
exclude_empty=False
Test examples in docstrings in functions and classes reachable
from module m (or the current module if m is not supplied), starting
with m.__doc__.
Also test examples reachable from dict m.__test__ if it exists and is
not None. m.__test__ maps names to functions, classes and strings;
function and class docstrings are tested even if the name is private;
strings are tested directly, as if they were docstrings.
Return (#failures, #tests).
See help(doctest) for an overview.
Optional keyword arg "name" gives the name of the module; by default
use m.__name__.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use m.__dict__. A copy of this
dict is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used. This is new in 2.4.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. This is new in 2.3. Possible values (see the
docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
SKIP
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
# If no module was given, then use __main__.
if m is None:
# DWA - m will still be None if this wasn't invoked from the command
# line, in which case the following TypeError is about as good an error
# as we should expect
m = sys.modules.get('__main__')
# Check that we were actually given a module.
if not inspect.ismodule(m):
raise TypeError("testmod: module required; %r" % (m,))
# If no name was given, then use the module's name.
if name is None:
name = m.__name__
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(exclude_empty=exclude_empty)
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(m, name, globs=globs, extraglobs=extraglobs):
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return TestResults(runner.failures, runner.tries)
def testfile(filename, module_relative=True, name=None, package=None,
globs=None, verbose=None, report=True, optionflags=0,
extraglobs=None, raise_on_error=False, parser=DocTestParser(),
encoding=None):
"""
Test examples in the given file. Return (#failures, #tests).
Optional keyword arg "module_relative" specifies how filenames
should be interpreted:
- If "module_relative" is True (the default), then "filename"
specifies a module-relative path. By default, this path is
relative to the calling module's directory; but if the
"package" argument is specified, then it is relative to that
package. To ensure os-independence, "filename" should use
"/" characters to separate path segments, and should not
be an absolute path (i.e., it may not begin with "/").
- If "module_relative" is False, then "filename" specifies an
os-specific path. The path may be absolute or relative (to
the current working directory).
Optional keyword arg "name" gives the name of the test; by default
use the file's basename.
Optional keyword argument "package" is a Python package or the
name of a Python package whose directory should be used as the
base directory for a module relative filename. If no package is
specified, then the calling module's directory is used as the base
directory for module relative filenames. It is an error to
specify "package" if "module_relative" is False.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use {}. A copy of this dict
is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. Possible values (see the docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
SKIP
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Optional keyword arg "parser" specifies a DocTestParser (or
subclass) that should be used to extract tests from the files.
Optional keyword arg "encoding" specifies an encoding that should
be used to convert the file to unicode.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path
text, filename = _load_testfile(filename, package, module_relative,
encoding or "utf-8")
# If no name was given, then use the file's name.
if name is None:
name = os.path.basename(filename)
# Assemble the globals.
if globs is None:
globs = {}
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
if '__name__' not in globs:
globs['__name__'] = '__main__'
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
# Read the file, convert it to a test, and run it.
test = parser.get_doctest(text, globs, name, filename, 0)
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return TestResults(runner.failures, runner.tries)
def run_docstring_examples(f, globs, verbose=False, name="NoName",
compileflags=None, optionflags=0):
"""
Test examples in the given object's docstring (`f`), using `globs`
as globals. Optional argument `name` is used in failure messages.
If the optional argument `verbose` is true, then generate output
even if there are no failures.
`compileflags` gives the set of flags that should be used by the
Python compiler when running the examples. If not specified, then
it will default to the set of future-import flags that apply to
`globs`.
Optional keyword arg `optionflags` specifies options for the
testing and output. See the documentation for `testmod` for more
information.
"""
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(verbose=verbose, recurse=False)
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(f, name, globs=globs):
runner.run(test, compileflags=compileflags)
######################################################################
## 7. Unittest Support
######################################################################
_unittest_reportflags = 0
def set_unittest_reportflags(flags):
"""Sets the unittest option flags.
The old flag is returned so that a runner could restore the old
value if it wished to:
>>> import doctest
>>> old = doctest._unittest_reportflags
>>> doctest.set_unittest_reportflags(REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE) == old
True
>>> doctest._unittest_reportflags == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
Only reporting flags can be set:
>>> doctest.set_unittest_reportflags(ELLIPSIS)
Traceback (most recent call last):
...
ValueError: ('Only reporting flags allowed', 8)
>>> doctest.set_unittest_reportflags(old) == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
"""
global _unittest_reportflags
if (flags & REPORTING_FLAGS) != flags:
raise ValueError("Only reporting flags allowed", flags)
old = _unittest_reportflags
_unittest_reportflags = flags
return old
class DocTestCase(unittest.TestCase):
def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
checker=None):
unittest.TestCase.__init__(self)
self._dt_optionflags = optionflags
self._dt_checker = checker
self._dt_test = test
self._dt_setUp = setUp
self._dt_tearDown = tearDown
def setUp(self):
test = self._dt_test
if self._dt_setUp is not None:
self._dt_setUp(test)
def tearDown(self):
test = self._dt_test
if self._dt_tearDown is not None:
self._dt_tearDown(test)
test.globs.clear()
def runTest(self):
test = self._dt_test
old = sys.stdout
new = StringIO()
optionflags = self._dt_optionflags
if not (optionflags & REPORTING_FLAGS):
# The option flags don't include any reporting flags,
# so add the default reporting flags
optionflags |= _unittest_reportflags
runner = DocTestRunner(optionflags=optionflags,
checker=self._dt_checker, verbose=False)
try:
runner.DIVIDER = "-"*70
failures, tries = runner.run(
test, out=new.write, clear_globs=False)
finally:
sys.stdout = old
if failures:
raise self.failureException(self.format_failure(new.getvalue()))
def format_failure(self, err):
test = self._dt_test
if test.lineno is None:
lineno = 'unknown line number'
else:
lineno = '%s' % test.lineno
lname = '.'.join(test.name.split('.')[-1:])
return ('Failed doctest test for %s\n'
' File "%s", line %s, in %s\n\n%s'
% (test.name, test.filename, lineno, lname, err)
)
def debug(self):
r"""Run the test case without results and without catching exceptions
The unit test framework includes a debug method on test cases
and test suites to support post-mortem debugging. The test code
is run in such a way that errors are not caught. This way a
caller can catch the errors and initiate post-mortem debugging.
The DocTestCase provides a debug method that raises
UnexpectedException errors if there is an unexpected
exception:
>>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
... {}, 'foo', 'foo.py', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except UnexpectedException as f:
... failure = f
The UnexpectedException contains the test, the example, and
the original exception:
>>> failure.test is test
True
>>> failure.example.want
'42\n'
>>> exc_info = failure.exc_info
>>> raise exc_info[1] # Already has the traceback
Traceback (most recent call last):
...
KeyError
If the output doesn't match, then a DocTestFailure is raised:
>>> test = DocTestParser().get_doctest('''
... >>> x = 1
... >>> x
... 2
... ''', {}, 'foo', 'foo.py', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except DocTestFailure as f:
... failure = f
DocTestFailure objects provide access to the test:
>>> failure.test is test
True
As well as to the example:
>>> failure.example.want
'2\n'
and the actual output:
>>> failure.got
'1\n'
"""
self.setUp()
runner = DebugRunner(optionflags=self._dt_optionflags,
checker=self._dt_checker, verbose=False)
runner.run(self._dt_test, clear_globs=False)
self.tearDown()
def id(self):
return self._dt_test.name
def __eq__(self, other):
if type(self) is not type(other):
return NotImplemented
return self._dt_test == other._dt_test and \
self._dt_optionflags == other._dt_optionflags and \
self._dt_setUp == other._dt_setUp and \
self._dt_tearDown == other._dt_tearDown and \
self._dt_checker == other._dt_checker
def __hash__(self):
return hash((self._dt_optionflags, self._dt_setUp, self._dt_tearDown,
self._dt_checker))
def __repr__(self):
name = self._dt_test.name.split('.')
return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
__str__ = __repr__
def shortDescription(self):
return "Doctest: " + self._dt_test.name
class SkipDocTestCase(DocTestCase):
def __init__(self, module):
self.module = module
DocTestCase.__init__(self, None)
def setUp(self):
self.skipTest("DocTestSuite will not work with -O2 and above")
def test_skip(self):
pass
def shortDescription(self):
return "Skipping tests from %s" % self.module.__name__
__str__ = shortDescription
class _DocTestSuite(unittest.TestSuite):
def _removeTestAtIndex(self, index):
pass
def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None,
**options):
"""
Convert doctest tests for a module to a unittest test suite.
This converts each documentation string in a module that
contains doctest tests to a unittest test case. If any of the
tests in a doc string fail, then the test case fails. An exception
is raised showing the name of the file containing the test and a
(sometimes approximate) line number.
The `module` argument provides the module to be tested. The argument
can be either a module or a module name.
If no argument is given, the calling module is used.
A number of options may be provided as keyword arguments:
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
"""
if test_finder is None:
test_finder = DocTestFinder()
module = _normalize_module(module)
tests = test_finder.find(module, globs=globs, extraglobs=extraglobs)
if not tests and sys.flags.optimize >=2:
# Skip doctests when running with -O2
suite = _DocTestSuite()
suite.addTest(SkipDocTestCase(module))
return suite
tests.sort()
suite = _DocTestSuite()
for test in tests:
if len(test.examples) == 0:
continue
if not test.filename:
filename = module.__file__
if filename[-4:] == ".pyc":
filename = filename[:-1]
test.filename = filename
suite.addTest(DocTestCase(test, **options))
return suite
class DocFileCase(DocTestCase):
def id(self):
return '_'.join(self._dt_test.name.split('.'))
def __repr__(self):
return self._dt_test.filename
__str__ = __repr__
def format_failure(self, err):
return ('Failed doctest test for %s\n File "%s", line 0\n\n%s'
% (self._dt_test.name, self._dt_test.filename, err)
)
def DocFileTest(path, module_relative=True, package=None,
globs=None, parser=DocTestParser(),
encoding=None, **options):
if globs is None:
globs = {}
else:
globs = globs.copy()
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path.
doc, path = _load_testfile(path, package, module_relative,
encoding or "utf-8")
if "__file__" not in globs:
globs["__file__"] = path
# Find the file and read it.
name = os.path.basename(path)
# Convert it to a test, and wrap it in a DocFileCase.
test = parser.get_doctest(doc, globs, name, path, 0)
return DocFileCase(test, **options)
def DocFileSuite(*paths, **kw):
"""A unittest suite for one or more doctest files.
The path to each doctest file is given as a string; the
interpretation of that string depends on the keyword argument
"module_relative".
A number of options may be provided as keyword arguments:
module_relative
If "module_relative" is True, then the given file paths are
interpreted as os-independent module-relative paths. By
default, these paths are relative to the calling module's
directory; but if the "package" argument is specified, then
they are relative to that package. To ensure os-independence,
"filename" should use "/" characters to separate path
segments, and may not be an absolute path (i.e., it may not
begin with "/").
If "module_relative" is False, then the given file paths are
interpreted as os-specific paths. These paths may be absolute
or relative (to the current working directory).
package
A Python package or the name of a Python package whose directory
should be used as the base directory for module relative paths.
If "package" is not specified, then the calling module's
directory is used as the base directory for module relative
filenames. It is an error to specify "package" if
"module_relative" is False.
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
parser
A DocTestParser (or subclass) that should be used to extract
tests from the files.
encoding
An encoding that will be used to convert the files to unicode.
"""
suite = _DocTestSuite()
# We do this here so that _normalize_module is called at the right
# level. If it were called in DocFileTest, then this function
# would be the caller and we might guess the package incorrectly.
if kw.get('module_relative', True):
kw['package'] = _normalize_module(kw.get('package'))
for path in paths:
suite.addTest(DocFileTest(path, **kw))
return suite
######################################################################
## 8. Debugging Support
######################################################################
def script_from_examples(s):
r"""Extract script from text with examples.
Converts text with examples to a Python script. Example input is
converted to regular code. Example output and all other words
are converted to comments:
>>> text = '''
... Here are examples of simple math.
...
... Python has super accurate integer addition
...
... >>> 2 + 2
... 5
...
... And very friendly error messages:
...
... >>> 1/0
... To Infinity
... And
... Beyond
...
... You can use logic if you want:
...
... >>> if 0:
... ... blah
... ... blah
... ...
...
... Ho hum
... '''
>>> print(script_from_examples(text))
# Here are examples of simple math.
#
# Python has super accurate integer addition
#
2 + 2
# Expected:
## 5
#
# And very friendly error messages:
#
1/0
# Expected:
## To Infinity
## And
## Beyond
#
# You can use logic if you want:
#
if 0:
blah
blah
#
# Ho hum
<BLANKLINE>
"""
output = []
for piece in DocTestParser().parse(s):
if isinstance(piece, Example):
# Add the example's source code (strip trailing NL)
output.append(piece.source[:-1])
# Add the expected output:
want = piece.want
if want:
output.append('# Expected:')
output += ['## '+l for l in want.split('\n')[:-1]]
else:
# Add non-example text.
output += [_comment_line(l)
for l in piece.split('\n')[:-1]]
# Trim junk on both ends.
while output and output[-1] == '#':
output.pop()
while output and output[0] == '#':
output.pop(0)
# Combine the output, and return it.
# Add a courtesy newline to prevent exec from choking (see bug #1172785)
return '\n'.join(output) + '\n'
def testsource(module, name):
"""Extract the test sources from a doctest docstring as a script.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the doc string with tests to be debugged.
"""
module = _normalize_module(module)
tests = DocTestFinder().find(module)
test = [t for t in tests if t.name == name]
if not test:
raise ValueError(name, "not found in tests")
test = test[0]
testsrc = script_from_examples(test.docstring)
return testsrc
def debug_src(src, pm=False, globs=None):
"""Debug a single doctest docstring, in argument `src`'"""
testsrc = script_from_examples(src)
debug_script(testsrc, pm, globs)
def debug_script(src, pm=False, globs=None):
"Debug a test script. `src` is the script, as a string."
import pdb
if globs:
globs = globs.copy()
else:
globs = {}
if pm:
try:
exec(src, globs, globs)
except:
print(sys.exc_info()[1])
p = pdb.Pdb(nosigint=True)
p.reset()
p.interaction(None, sys.exc_info()[2])
else:
pdb.Pdb(nosigint=True).run("exec(%r)" % src, globs, globs)
def debug(module, name, pm=False):
"""Debug a single doctest docstring.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the docstring with tests to be debugged.
"""
module = _normalize_module(module)
testsrc = testsource(module, name)
debug_script(testsrc, pm, module.__dict__)
######################################################################
## 9. Example Usage
######################################################################
class _TestClass:
"""
A pointless class, for sanity-checking of docstring testing.
Methods:
square()
get()
>>> _TestClass(13).get() + _TestClass(-12).get()
1
>>> hex(_TestClass(13).square().get())
'0xa9'
"""
def __init__(self, val):
"""val -> _TestClass object with associated value val.
>>> t = _TestClass(123)
>>> print(t.get())
123
"""
self.val = val
def square(self):
"""square() -> square TestClass's associated value
>>> _TestClass(13).square().get()
169
"""
self.val = self.val ** 2
return self
def get(self):
"""get() -> return TestClass's associated value.
>>> x = _TestClass(-42)
>>> print(x.get())
-42
"""
return self.val
__test__ = {"_TestClass": _TestClass,
"string": r"""
Example of a string object, searched as-is.
>>> x = 1; y = 2
>>> x + y, x * y
(3, 2)
""",
"bool-int equivalence": r"""
In 2.2, boolean expressions displayed
0 or 1. By default, we still accept
them. This can be disabled by passing
DONT_ACCEPT_TRUE_FOR_1 to the new
optionflags argument.
>>> 4 == 4
1
>>> 4 == 4
True
>>> 4 > 4
0
>>> 4 > 4
False
""",
"blank lines": r"""
Blank lines can be marked with <BLANKLINE>:
>>> print('foo\n\nbar\n')
foo
<BLANKLINE>
bar
<BLANKLINE>
""",
"ellipsis": r"""
If the ellipsis flag is used, then '...' can be used to
elide substrings in the desired output:
>>> print(list(range(1000))) #doctest: +ELLIPSIS
[0, 1, 2, ..., 999]
""",
"whitespace normalization": r"""
If the whitespace normalization flag is used, then
differences in whitespace are ignored.
>>> print(list(range(30))) #doctest: +NORMALIZE_WHITESPACE
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29]
""",
}
def _test():
import argparse
parser = argparse.ArgumentParser(description="doctest runner")
parser.add_argument('-v', '--verbose', action='store_true', default=False,
help='print very verbose output for all tests')
parser.add_argument('-o', '--option', action='append',
choices=OPTIONFLAGS_BY_NAME.keys(), default=[],
help=('specify a doctest option flag to apply'
' to the test run; may be specified more'
' than once to apply multiple options'))
parser.add_argument('-f', '--fail-fast', action='store_true',
help=('stop running tests after first failure (this'
' is a shorthand for -o FAIL_FAST, and is'
' in addition to any other -o options)'))
parser.add_argument('file', nargs='+',
help='file containing the tests to run')
args = parser.parse_args()
testfiles = args.file
# Verbose used to be handled by the "inspect argv" magic in DocTestRunner,
# but since we are using argparse we are passing it manually now.
verbose = args.verbose
options = 0
for option in args.option:
options |= OPTIONFLAGS_BY_NAME[option]
if args.fail_fast:
options |= FAIL_FAST
for filename in testfiles:
if filename.endswith(".py"):
# It is a module -- insert its dir into sys.path and try to
# import it. If it is part of a package, that possibly
# won't work because of package imports.
dirname, filename = os.path.split(filename)
sys.path.insert(0, dirname)
m = __import__(filename[:-3])
del sys.path[0]
failures, _ = testmod(m, verbose=verbose, optionflags=options)
else:
failures, _ = testfile(filename, module_relative=False,
verbose=verbose, optionflags=options)
if failures:
return 1
return 0
if __name__ == "__main__":
sys.exit(_test())
|
py | b401b2ac4db947963296c9ff067fd8ccca04a431 | #!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test multiple RPC users."""
from test_framework.test_framework import NEOMTestFramework
from test_framework.util import (
assert_equal,
get_datadir_path,
str_to_b64str,
)
import os
import http.client
import urllib.parse
import subprocess
from random import SystemRandom
import string
import configparser
import sys
class HTTPBasicsTest(NEOMTestFramework):
def set_test_params(self):
self.num_nodes = 2
def setup_chain(self):
super().setup_chain()
#Append rpcauth to neom.conf before initialization
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
rpcuser = "rpcuser=rpcuser💻"
rpcpassword = "rpcpassword=rpcpassword🔑"
self.user = ''.join(SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(10))
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
gen_rpcauth = config['environment']['RPCAUTH']
p = subprocess.Popen([sys.executable, gen_rpcauth, self.user], stdout=subprocess.PIPE, universal_newlines=True)
lines = p.stdout.read().splitlines()
rpcauth3 = lines[1]
self.password = lines[3]
with open(os.path.join(get_datadir_path(self.options.tmpdir, 0), "neom.conf"), 'a', encoding='utf8') as f:
f.write(rpcauth+"\n")
f.write(rpcauth2+"\n")
f.write(rpcauth3+"\n")
with open(os.path.join(get_datadir_path(self.options.tmpdir, 1), "neom.conf"), 'a', encoding='utf8') as f:
f.write(rpcuser+"\n")
f.write(rpcpassword+"\n")
def run_test(self):
##################################################
# Check correctness of the rpcauth config option #
##################################################
url = urllib.parse.urlparse(self.nodes[0].url)
#Old authpair
authpair = url.username + ':' + url.password
#New authpair generated via share/rpcauth tool
password = "cA773lm788buwYe4g4WT+05pKyNruVKjQ25x3n0DQcM="
#Second authpair with different username
password2 = "8/F3uMDw4KSEbw96U3CA1C4X05dkHDN2BPFjTgZW4KI="
authpairnew = "rt:"+password
self.log.info('Correct...')
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Use new authpair to confirm both work
self.log.info('Correct...')
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong login name with rt's password
self.log.info('Wrong...')
authpairnew = "rtwrong:"+password
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
#Wrong password for rt
self.log.info('Wrong...')
authpairnew = "rt:"+password+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
#Correct for rt2
self.log.info('Correct...')
authpairnew = "rt2:"+password2
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong password for rt2
self.log.info('Wrong...')
authpairnew = "rt2:"+password2+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
#Correct for randomly generated user
self.log.info('Correct...')
authpairnew = self.user+":"+self.password
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong password for randomly generated user
self.log.info('Wrong...')
authpairnew = self.user+":"+self.password+"Wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
###############################################################
# Check correctness of the rpcuser/rpcpassword config options #
###############################################################
url = urllib.parse.urlparse(self.nodes[1].url)
# rpcuser and rpcpassword authpair
self.log.info('Correct...')
rpcuserauthpair = "rpcuser💻:rpcpassword🔑"
headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong login name with rpcuser's password
rpcuserauthpair = "rpcuserwrong:rpcpassword"
headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
#Wrong password for rpcuser
self.log.info('Wrong...')
rpcuserauthpair = "rpcuser:rpcpasswordwrong"
headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
if __name__ == '__main__':
HTTPBasicsTest ().main ()
|
py | b401b2bfa65e8c8f0fc90158ec8672af0be91ca7 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="mlpug",
version="0.0.52",
author="Freddy Snijder",
author_email="[email protected]",
description="A machine learning library agnostic framework for model training",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/nuhame/mlpug",
packages=setuptools.find_packages(),
install_requires=[
'visionscaper-pybase',
'tensorboardX'
],
dependency_links=['git+https://github.com/visionscaper/pybase.git'],
classifiers=[
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
python_requires='>=3.7',
) |
py | b401b36ac9eaea9baeb7c6fdd2a182b426b89d2a | import graphene
from django.conf import settings
from ...account import error_codes as account_error_codes
from ...app import error_codes as app_error_codes
from ...attribute import error_codes as attribute_error_codes
from ...channel import error_codes as channel_error_codes
from ...checkout import error_codes as checkout_error_codes
from ...core import JobStatus, TimePeriodType
from ...core import error_codes as core_error_codes
from ...core.permissions import get_permissions_enum_list
from ...core.units import (
AreaUnits,
DistanceUnits,
MeasurementUnits,
VolumeUnits,
WeightUnits,
)
from ...csv import error_codes as csv_error_codes
from ...discount import error_codes as discount_error_codes
from ...giftcard import error_codes as giftcard_error_codes
from ...invoice import error_codes as invoice_error_codes
from ...menu import error_codes as menu_error_codes
from ...order import error_codes as order_error_codes
from ...page import error_codes as page_error_codes
from ...payment import error_codes as payment_error_codes
from ...plugins import error_codes as plugin_error_codes
from ...product import error_codes as product_error_codes
from ...shipping import error_codes as shipping_error_codes
from ...site import error_codes as site_error_codes
from ...warehouse import error_codes as warehouse_error_codes
from ...webhook import error_codes as webhook_error_codes
from ..notifications import error_codes as external_notifications_error_codes
from .utils import str_to_enum
class OrderDirection(graphene.Enum):
ASC = ""
DESC = "-"
@property
def description(self):
# Disable all the no-member violations in this function
# pylint: disable=no-member
if self == OrderDirection.ASC:
return "Specifies an ascending sort order."
if self == OrderDirection.DESC:
return "Specifies a descending sort order."
raise ValueError("Unsupported enum value: %s" % self.value)
class ReportingPeriod(graphene.Enum):
TODAY = "TODAY"
THIS_MONTH = "THIS_MONTH"
def to_enum(enum_cls, *, type_name=None, **options) -> graphene.Enum:
"""Create a Graphene enum from a class containing a set of options.
:param enum_cls:
The class to build the enum from.
:param type_name:
The name of the type. Default is the class name + 'Enum'.
:param options:
- description:
Contains the type description (default is the class's docstring)
- deprecation_reason:
Contains the deprecation reason.
The default is enum_cls.__deprecation_reason__ or None.
:return:
"""
# note this won't work until
# https://github.com/graphql-python/graphene/issues/956 is fixed
deprecation_reason = getattr(enum_cls, "__deprecation_reason__", None)
if deprecation_reason:
options.setdefault("deprecation_reason", deprecation_reason)
type_name = type_name or (enum_cls.__name__ + "Enum")
enum_data = [(str_to_enum(code.upper()), code) for code, name in enum_cls.CHOICES]
return graphene.Enum(type_name, enum_data, **options)
LanguageCodeEnum = graphene.Enum(
"LanguageCodeEnum",
[(lang[0].replace("-", "_").upper(), lang[0]) for lang in settings.LANGUAGES],
)
JobStatusEnum = to_enum(JobStatus)
PermissionEnum = graphene.Enum("PermissionEnum", get_permissions_enum_list())
TimePeriodTypeEnum = to_enum(TimePeriodType)
# unit enums
MeasurementUnitsEnum = to_enum(MeasurementUnits)
DistanceUnitsEnum = to_enum(DistanceUnits)
AreaUnitsEnum = to_enum(AreaUnits)
VolumeUnitsEnum = to_enum(VolumeUnits)
WeightUnitsEnum = to_enum(WeightUnits)
unit_enums = [DistanceUnitsEnum, AreaUnitsEnum, VolumeUnitsEnum, WeightUnitsEnum]
AccountErrorCode = graphene.Enum.from_enum(account_error_codes.AccountErrorCode)
AppErrorCode = graphene.Enum.from_enum(app_error_codes.AppErrorCode)
AttributeErrorCode = graphene.Enum.from_enum(attribute_error_codes.AttributeErrorCode)
ChannelErrorCode = graphene.Enum.from_enum(channel_error_codes.ChannelErrorCode)
CheckoutErrorCode = graphene.Enum.from_enum(checkout_error_codes.CheckoutErrorCode)
ExternalNotificationTriggerErrorCode = graphene.Enum.from_enum(
external_notifications_error_codes.ExternalNotificationErrorCodes
)
ExportErrorCode = graphene.Enum.from_enum(csv_error_codes.ExportErrorCode)
DiscountErrorCode = graphene.Enum.from_enum(discount_error_codes.DiscountErrorCode)
PluginErrorCode = graphene.Enum.from_enum(plugin_error_codes.PluginErrorCode)
GiftCardErrorCode = graphene.Enum.from_enum(giftcard_error_codes.GiftCardErrorCode)
MenuErrorCode = graphene.Enum.from_enum(menu_error_codes.MenuErrorCode)
OrderSettingsErrorCode = graphene.Enum.from_enum(
site_error_codes.OrderSettingsErrorCode
)
GiftCardSettingsErrorCode = graphene.Enum.from_enum(
site_error_codes.GiftCardSettingsErrorCode
)
MetadataErrorCode = graphene.Enum.from_enum(core_error_codes.MetadataErrorCode)
OrderErrorCode = graphene.Enum.from_enum(order_error_codes.OrderErrorCode)
InvoiceErrorCode = graphene.Enum.from_enum(invoice_error_codes.InvoiceErrorCode)
PageErrorCode = graphene.Enum.from_enum(page_error_codes.PageErrorCode)
PaymentErrorCode = graphene.Enum.from_enum(payment_error_codes.PaymentErrorCode)
PermissionGroupErrorCode = graphene.Enum.from_enum(
account_error_codes.PermissionGroupErrorCode
)
ProductErrorCode = graphene.Enum.from_enum(product_error_codes.ProductErrorCode)
CollectionErrorCode = graphene.Enum.from_enum(product_error_codes.CollectionErrorCode)
ShopErrorCode = graphene.Enum.from_enum(core_error_codes.ShopErrorCode)
ShippingErrorCode = graphene.Enum.from_enum(shipping_error_codes.ShippingErrorCode)
StockErrorCode = graphene.Enum.from_enum(warehouse_error_codes.StockErrorCode)
UploadErrorCode = graphene.Enum.from_enum(core_error_codes.UploadErrorCode)
WarehouseErrorCode = graphene.Enum.from_enum(warehouse_error_codes.WarehouseErrorCode)
WebhookErrorCode = graphene.Enum.from_enum(webhook_error_codes.WebhookErrorCode)
TranslationErrorCode = graphene.Enum.from_enum(core_error_codes.TranslationErrorCode)
|
py | b401b48ef9cc96638b696f0b256c92b5bfd0e519 | # Author: Walber C J Rocha
# University: Universidade Federal do Recôncavo da Bahia
import os, pickle, socket, sys, threading
# Global variables
MB = 64
MAX_CACHE_SIZE = MB*(1024*1024)
BUFFER_SIZE = 1024
CACHE_SIZE = 0
CACHE = { }
# -----------------
def remove_element_cache(size_file):
size_to_remove = 0
key_to_remove = ''
count = 0
for key in CACHE:
file = CACHE.get(key)
current_key = key
current_size = file['size']
if(file['size'] >= size_file):
size_to_remove = current_size
key_to_remove = current_key
break
else:
if(size_to_remove >= count):
count = current_size
size_to_remove = count
key_to_remove = current_key
CACHE.pop(key_to_remove)
return (CACHE_SIZE - size_to_remove)
def get_cache_files():
list = []
for key in CACHE.keys():
list.append(key)
return list
def client_connect(directory, conn, addr, lock):
global CACHE
global CACHE_SIZE
os.chdir(directory)
request = conn.recv(BUFFER_SIZE).decode()
print(f'Client {addr} is requesting file {request}')
if(request == 'list'):
conn.send(pickle.dumps(get_cache_files()))
conn.close()
print('Cache request sent to the client')
else:
lock.acquire()
if(CACHE.get(str(request))):
print(f'Cache hit. File {request} sent to the client.')
payload_file = CACHE.get(str(request))
data = pickle.loads(payload_file['data'])
conn.send(data)
conn.close()
else:
if(os.path.isfile(request)):
with open(request, 'rb') as file:
file_size = os.path.getsize(request)
payload_file = file.read()
if(file_size <= MAX_CACHE_SIZE):
payload_to_cache = b''
while(payload_file):
conn.send(payload_file)
payload_to_cache += payload_file
payload_file = file.read(BUFFER_SIZE)
payload_serialize = pickle.dumps(payload_to_cache)
while(CACHE_SIZE+file_size > MAX_CACHE_SIZE):
CACHE_SIZE = remove_element_cache(file_size)
to_cache = {str(request): {'size': file_size, 'data': payload_serialize}}
CACHE_SIZE += file_size
CACHE.update(to_cache)
else:
while(payload_file):
conn.send(payload_file)
payload_file = file.read(BUFFER_SIZE)
file.close()
conn.close()
print(f'Cache miss. File {request} sent to the client')
else:
conn.send(b'File does not exist')
conn.close()
print(f'File {request} does not exist')
lock.release()
if __name__ == "__main__":
HOST = 'localhost'
PORT = sys.argv[1]
DIRECTORY = sys.argv[2]
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((HOST, int(PORT)))
while True:
s.listen()
conn, addr = s.accept()
lock = threading.Semaphore()
new_client = threading.Thread(target=client_connect, args=(DIRECTORY, conn, addr, lock))
new_client.start()
s.close() |
py | b401b4b8cb392485cfa76c1e8b24b440173ff10e | """
Tests common utility functionality of binaries shipped with Tezos.
"""
import subprocess
from typing import List
from process import process_utils
from tools import paths
from . import protocol
PROTO_BINARIES = [
binary + "-" + protocol.DAEMON
for binary in ["tezos-baker", "tezos-endorser", "tezos-accuser"]
]
BINARIES = [
"tezos-codec",
"tezos-client",
"tezos-admin-client",
"tezos-protocol-compiler",
"tezos-node",
"tezos-snoop",
"tezos-validator",
] + PROTO_BINARIES
def run_cmd(cmd: List[str]) -> str:
"""Pretty print a command. Execute it, print and return its standard
output."""
print(process_utils.format_command(cmd))
process_ret = subprocess.run(
cmd, check=True, capture_output=True, text=True
)
print(process_ret.stdout)
return process_ret.stdout.strip()
class TestBinaries:
def test_version(self):
"""Tests that all binaries accept the --version flag and that the
report the same version"""
versions = set()
for binary in BINARIES:
version = run_cmd([paths.TEZOS_HOME + binary, "--version"])
versions.add(version)
assert len(versions) == 1, "All binaries should report the same version"
|
py | b401b4db05da4dc13a7bc991c0a40b8c404da78e | import radia as rad
import numpy as np
from rsradia.magnets import geometry
TRIANGLE_MIN_SIZE, TRIANGLE_MAX_SIZE = 0.5, 1.0
def _create_point_table(pole_width, pole_separation, pole_height, top_height, leg_width, gap_height,
bevel_base=0.0, chamfer_base=None, chamfer_angle=None, fillet_base=None, fillet_height=None, fillet_radius=None):
"""
Construct 2D slice of an H-dipole in the YZ plane.
All distances are half-widths/lengths of the full magnet. Points are specified in the first quadrant and mirrored
to other quadrants later. The point list is returned in counter-clockwise order starting from the midpoint above
the gap center on the pole face.
:param pole_width: (float)
:param pole_separation: (float)
:param pole_height: (float)
:param top_height: (float)
:param leg_width: (float)
:param gap_height: (float)
:fillet_base: (float)
:fillet_height: (float)
:fillet_radius: (float)
:return:
"""
assert np.any([fillet_base, fillet_height, fillet_radius]) == np.all([fillet_base, fillet_height, fillet_radius]), "fillet height, base, and radius must all be defined"
assert np.any([chamfer_base, chamfer_angle]) == np.all([chamfer_base, chamfer_angle]), "chamfer base and angle must both be defined"
if chamfer_angle:
assert chamfer_angle > 0. and chamfer_angle < 90., "Chamfer angle must between 0 and 90 degrees"
p1 = [0.0, top_height + pole_height + gap_height]
p2 = [pole_width + pole_separation, top_height + pole_height + gap_height]
p3 = [pole_width + pole_separation + leg_width, gap_height + pole_height + top_height]
p4 = [pole_width + pole_separation + leg_width, 0.0]
p5 = [pole_width + pole_separation, 0.0]
p6 = [pole_width + pole_separation, pole_height + gap_height]
p7 = [pole_width, pole_height + gap_height]
p8 = [pole_width - bevel_base, gap_height]
p_middle = [0.0, gap_height]
point_table = [p1, p2, p3, p4, p5, p6, p7, p8, p_middle]
# if a chamfer is applied on top of a bevel it is taken relative to the pole width after the bevel
if chamfer_base:
p8_start = p8.copy()
p8 = [p8[0] - chamfer_base, p8[1]]
# Intersection will need to be between the vertical bounds of p7 and p8 to be valid
ymin, ymax = p8[1], p7[1]
# Get a point on the chamfer cut line to find intersection with existing wall
prox_x, prox_y = p8[0] + np.cos(np.pi * chamfer_angle / 180.), p8[1] + np.sin(np.pi * chamfer_angle / 180.)
intersect_x, intersect_y = geometry.get_intersection(p8_start, p7, p8, (prox_x, prox_y))
assert intersect_y > ymin and intersect_y < ymax, "Chamfer cut does not intersect pole wall"
point_table[7] = p8
point_table.insert(7, [intersect_x, intersect_y])
if np.any([fillet_base, fillet_height, fillet_radius]):
a = [p8[0] - fillet_base, p8[1]]
point_table[7 + 1 * (chamfer_base is not None)] = a
b = [p8[0], p8[1] + fillet_height]
center = geometry.get_circle_center(a, b, fillet_radius)
# Define counter-clockwise list of points from a to b
arc_points = [[xp, yp] for xp, yp in zip(*geometry.get_arc_points(a[0], b[0], center, fillet_radius, N=5))]
# Insert clockwise list of points from a to b between p7 and p8
point_table = [list(pt) for pt in np.insert(np.array(point_table), 7, arc_points[:1:-1], axis=0)]
return point_table[::-1]
def _get_all_points_top(table):
"""Reflect point table from `create_point_table` to quadrant 2 to form top of H-dipole"""
coordinates = []
for point in table:
if point[0] != 0.:
reflection = [-point[0], point[1]]
coordinates.append(reflection)
coordinates = table + coordinates[::-1]
return coordinates
def _get_all_points_bottom(table):
"""Reflect point table from `get_all_points_bottom` to quadrant 3 & 4 to form bottom of H-dipole"""
coordinates = []
for point in table:
reflection = [point[0], -point[1]]
coordinates.append(reflection)
return coordinates[::-1]
def create_pole(coordinates, center, length, mode=0, triangle_min_size=TRIANGLE_MIN_SIZE, triangle_max_size=TRIANGLE_MAX_SIZE):
"""
Form geometry for the full pole piece of an H-dipole using radia.ObjMltExtTri.
:param coordinates: (list) List of points defining the boundaries of the pole piece in the YZ plane.
:param center: (float) Center point of dipole in x (longitudinal center for beam frame).
:param length: (float) Length of the dipole in x
:param mode: (int) If 0 (default) then the pole piece is divisioned into polygons based on point ordering from
coordinate list. If != 0 then a Triangular mesh is automatically generated.
:param triangle_min_size: (float) Only used if mode != 0. Sets the minimum triangle area for automatic division.
:param triangle_max_size: (float) Only used if mode != 0. Sets the maximum triangle area for automatic division.
:return: Radia object containing top and bottom pole pieces.
"""
x = center
lx = length
pole_sub = [[1, 1] for _ in range(len(coordinates))]
# simple build
if not mode:
pole = rad.ObjMltExtTri(x, lx, coordinates, pole_sub)
else:
str_param = 'ki->Numb,TriAngMin->' + str(triangle_min_size) + ',TriAreaMax->' + str(triangle_max_size)
pole = rad.ObjMltExtTri(x, lx, coordinates, pole_sub, 'x', [0., 0., 0.], str_param)
return pole
def make_racetrack_coil(center, radii, sizes, segments=15, current=1):
"""
Create coil for H-dipole. Coil runs in the XY plane.
:param center: (list) Center of the coil in [x, y, z].
:param radii: (list) Inner and outer edges for the coil.
:param sizes: (list) Straight sections lengths in X and Y; coil height in Z.
:param segments: (int) Number of segments for coil corners (default: 15).
:param current: (float) Current carried by the coil (default: 1).
:return: Radia object representing the coil
"""
return rad.ObjRaceTrk(center, radii, sizes[:2], sizes[2], segments, current, 'man', 'z')
def make_dipole(pole_dimensions, center, length, current=-10000,
trimesh_mode=0, triangle_min_size=TRIANGLE_MIN_SIZE, triangle_max_size=TRIANGLE_MAX_SIZE, longitudinal_divisions=4):
"""
Construct a complete H-dipole made of iron.
:param pole_dimensions: (dict) Parameters describing geometry of pole piece. See `_create_point_table`.
:param center: (float) Center point of dipole in x (longitudinal center for beam frame).
:param length: (float) Length of the dipole in x
:param current: (float) Current carried by dipole coils (default: -10000)
:param trimesh_mode: (int) If 0 (default) then the pole piece is divisioned into polygons based on point ordering
from coordinate list. If != 0 then a Triangular mesh is automatically generated.
:param longitudinal_divisions: (int) Number of slices to divide up the dipole into along the x-axis (default: 4)
:return:
"""
# coil_factor increases coil size slightly to accommodate sharp corners of pole piece
coil_length_factor = 1.005
coil_height_factor = 3. / 4.
coil_or_factor = 0.85
# Geometry for the poles
table_quadrant_one = _create_point_table(**pole_dimensions)
top_coodinates = _get_all_points_top(table_quadrant_one)
bottom_coordinates = _get_all_points_bottom(top_coodinates)
top_pole = create_pole(top_coodinates, center, length, mode=trimesh_mode, triangle_min_size=triangle_min_size, triangle_max_size=triangle_max_size)
bottom_pole = create_pole(bottom_coordinates, center, length, mode=trimesh_mode, triangle_min_size=triangle_min_size, triangle_max_size=triangle_max_size)
# Material for the poles (uses Iron)
ironmat = rad.MatSatIsoFrm([20000, 2], [0.1, 2], [0.1, 2])
rad.MatApl(top_pole, ironmat)
rad.MatApl(bottom_pole, ironmat)
# Coils
coil_outer_radius = pole_dimensions['pole_separation'] * coil_or_factor
top_coil = make_racetrack_coil(center=[0, 0.0, pole_dimensions['gap_height'] + pole_dimensions['pole_height'] / 2.],
radii=[0.1, coil_outer_radius],
sizes=[length * coil_length_factor,
pole_dimensions['pole_width'] * 2 * coil_length_factor,
pole_dimensions['pole_height'] * coil_height_factor],
current=current)
bottom_coil = make_racetrack_coil(center=[0, 0.0, -1. * (pole_dimensions['gap_height'] + pole_dimensions['pole_height'] / 2.)],
radii=[0.1, coil_outer_radius],
sizes=[length * coil_length_factor,
pole_dimensions['pole_width'] * 2 * coil_length_factor,
pole_dimensions['pole_height'] * coil_height_factor],
current=current)
# Visualization
rad.ObjDrwAtr(top_pole, [0, 0.4, 0.8])
rad.ObjDrwAtr(bottom_pole, [0, 0.4, 0.8])
rad.ObjDrwAtr(top_coil, [0.2, 0.9, 0.6])
rad.ObjDrwAtr(bottom_coil, [0.2, 0.9, 0.6])
# Element Division
rad.ObjDivMag(top_pole, [longitudinal_divisions, 1, 1])
rad.ObjDivMag(bottom_pole, [longitudinal_divisions, 1, 1])
return rad.ObjCnt([top_pole, bottom_pole, top_coil, bottom_coil]) |
py | b401b5296b111cdb7f003ad2c85ea3528327a3fa | # -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import webapp2
# Use routines from this base class
import base
# Testing Cree language and variantsn
# Should this be inherited from base.languageTemplate?
class langInfo():
def __init__(self):
self.LanguageCode = 'cr'
self.Language = 'Cree'
self.Language_native = 'Cree languages'
self.encoding_font_list = [
]
self.unicode_font_list = [
{ 'source': '/fonts/cree/NotoSansCanadianAboriginal-Regular.ttf',
'family': 'NotoSansCanadianAboriginal',
'longName': 'Noto Sans Canadian Aboriginal',
},
{ 'family': 'bjcrus',
'longName': 'BJ Cree',
'source': '/fonts/cree/bjcrus.ttf'
},
{'family': 'oskiblackfoot5',
'longName': 'Oski Blackfoot',
'source': '/fonts/cree/oskiblackfoot5.ttf'
},
{ 'family': 'kisiska',
'longName': 'Kisiska',
'source': '/fonts/cree/kisiska.otf'
},
{ 'family': 'aboriginalSans',
'longName': 'Aboriginal Sans',
'source': '/fonts/cree/AboriginalSansREGULAR.ttf'
},
{'family': 'aboriginalSerif',
'longName': 'Aboriginal Serif',
'source': '/fonts/cree/Aboriginal Serif REGULAR 939.ttf'
},
{'family': 'Euphemia',
'longName': 'Euphemia regular',
'source': '/fonts/cree/Euphemia UCAS Regular 2.6.6.ttf',
'origin': 'http://tiro.com/syllabics/resources/'
},
{'family': 'Uqammaq',
'longName': 'Uqammaq regular',
'source': '/fonts/cree/Uqammaq_Regular.ttf',
'origin': 'http://tiro.com/syllabics/resources/'
},
{'family': 'Pigiarniq',
'longName': 'Pigiarniq regular',
'source': '/fonts/cree/Pigiarniq_Regular.ttf',
'origin': 'http://tiro.com/syllabics/resources/'
},
{'family': 'Masinahikan_h',
'longName': 'Masinahikan',
'source': '/fonts/cree/Masinahikan_h.ttf',
'origin': 'languagegeek.com'
},
]
self.lang_list = [
{'shortName': 'crk_phonetic',
'longName': 'ᓀᐦᐃᔭᐍᐏᐣ (Plains Cree)',
'nativeName': 'ᓀᐦᐃᔭᐍᐏᐣ'
},
# {'shortName': 'crk',
# 'longName': 'Plains Cree'
# },
# {'shortName': 'cwd',
# 'longName': 'Woods Cree'
# },
# {'shortName': 'csw',
# 'longName': 'Swampy Cree'
# },
# {'shortName': 'crl',
# 'longName': 'Northern East Cree'
# },
# {'shortName': 'crj',
# 'longName': 'Southern East Cree'
# },
# {'shortName': 'nsk',
# 'longName': 'Naskapi'
# },
# {'shortName': 'moe',
# 'longName': 'Montagnais'
# },
# {'shortName': 'atj',
# 'longName': 'Atikamekw'
# },
]
self.links = [
{'linkText': 'Keyboard',
'ref': '/aho/'
},
{'linkText': 'Converter',
'ref': '/' + self.LanguageCode + '/convertUI/'
},
{'linkText': 'Keyboard transforms',
'ref': '/' + self.LanguageCode + '/kbtransforms/'
},
{'linkText': 'Plains Cree Keyboard',
'ref': 'https://www.altlab.dev/plains-cree-syllabics-key-sequences/'
},
{'linkText': 'Unicode block',
'ref': 'https://www.unicode.org/charts/PDF/U1400.pdf'
},
{'linkText': 'Resources',
'ref': '/' + self.LanguageCode + '/downloads/'
},
{'linkText': 'Language Geek fonts',
'ref': 'http://www.languagegeek.com/font/fontdownload.html'
},
{'linkText': 'Try Plains Cree on Google Input Tools',
'ref': 'https://www.google.com/intl/sa/inputtools/try/'
},
]
self.kb_list = [
{'shortName': 'crk_phonetic',
'longName': 'Plains Cree Phonetic',
'nativeName': 'ᓀᐦᐃᔭᐍᐏᐣ',
'fontFamily': 'NotoSansCanadianAboriginal',
},]
# Resource files
self.text_file_list = [
]
self.baseHexUTF16 = u'\u1400'
self.base_consonant = u'\u1400'
if sys.maxunicode >= 0x10000:
self.unicodeChars = [unichr(x) for x in range(0x1400, 0x167F)]
self.diacritic_list = []
else:
self.unicodeChars = [unichr(x) for x in range(0x1400, 0x167F)]
self.diacritic_list = []
self.converters = None
# Python-based transliteration tool.
self.transliterator = None
# Test data for showing in converter.
self.test_data = '' #ᐊ ᐃ ᐄ ᐅ ᐆ ᐊ ᐋ'
return
langInstance = langInfo()
app = webapp2.WSGIApplication(
[
('/cr/', base.LanguagesHomeHandler),
('/cr/keyboard/', base.LanguagesHomeHandler),
('/cr/convertUI/', base.ConvertUIHandler),
('/cr/downloads/', base.Downloads),
('/cr/converter/', base.ConvertUIHandler),
('/cr/encodingRules/', base.EncodingRules),
('/cr/diacritic/', base.DiacriticHandler),
('/cr/render/', base.EncodingRules),
('/' + langInstance.LanguageCode + '/kbtransforms/', base.KeyboardTransforms),
],
debug=True,
config={'langInfo': langInstance}
)
|
py | b401b7282156b3f04e57b88aa0ed6f4707d24217 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import setuptools
with open('README.md') as readme_file:
readme = readme_file.read()
setuptools.setup(
name='ska-oso-oet',
version="2.13.2",
description="This project contains the code for the Observation Execution Tool, the application which provides high-level scripting facilities and a high-level scripting UI for the SKA.",
long_description=readme + '\n\n',
author="Stewart Williams",
author_email='[email protected]',
url='https://github.com/ska-telescope/ska-oso-oet',
package_dir={"": "src"},
packages=setuptools.find_packages(where="src"),
entry_points={
'console_scripts': ['oet=oet.procedure.application.restclient:main']
},
include_package_data=True,
license="BSD license",
zip_safe=False,
keywords='ska_observation_execution_tool',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 3.7',
],
test_suite='tests/unit',
install_requires=[
'fire',
"Flask==1.1.2", # AT2-813 hot fix
"Jinja2==2.11.2", # AT2-813 hot fix
"itsdangerous==1.1.0", # AT2-813 hot fix
"Werkzeug==1.0.1", # AT2-813 hot fix
"MarkupSafe==1.1.1", # AT2-813 hot fix
"click==7.1.2", # AT2-813 hot fix
'jsonpickle',
'pypubsub',
'pytango',
'requests',
'ska-ser-logging',
'ska-ser-skuid',
'sseclient',
'tabulate',
'tblib',
],
setup_requires=[
# dependency for `python setup.py test`
'pytest-runner',
# dependencies for `python setup.py build_sphinx`
'sphinx',
'recommonmark'
],
tests_require=[
'pytest',
'pytest-cov',
'pytest-json-report',
'pycodestyle',
'requests-mock'
],
extras_require={
'dev': ['prospector[with_pyroma]', 'yapf', 'isort']
}
)
|
py | b401b72ac0ca1b9c53fccbfc4e8ff353cc0df070 | """
saphostctrl - Commands ``saphostctrl``
======================================
Parsers included in this module are:
SAPHostCtrlInstances - Command ``saphostctrl -function GetCIMObject -enuminstances SAPInstance``
------------------------------------------------------------------------------------------------
"""
from insights import parser, CommandParser
from insights.core.filters import add_filter
from insights.parsers import ParseException, SkipException
from insights.specs import Specs
SAP_INST_FILTERS = [
'******',
'CreationClassName',
'SID',
'SystemNumber',
'InstanceName',
'Hostname',
'SapVersionInfo',
'FullQualifiedHostname'
]
add_filter(Specs.saphostctl_getcimobject_sapinstance, SAP_INST_FILTERS)
@parser(Specs.saphostctl_getcimobject_sapinstance)
class SAPHostCtrlInstances(CommandParser):
"""
This class provides processing for the output of the
``/usr/sap/hostctrl/exe/saphostctrl -function GetCIMObject -enuminstances SAPInstance``
command on SAP systems.
Sample output of the command::
*********************************************************
CreationClassName , String , SAPInstance
SID , String , D89
SystemNumber , String , 88
InstanceName , String , HDB88
Hostname , String , hdb88
FullQualifiedHostname , String , hdb88.example.com
IPAddress , String , 10.0.0.88
SapVersionInfo , String , 749, patch 211, changelist 1754007
*********************************************************
CreationClassName , String , SAPInstance
SID , String , D90
SystemNumber , String , 90
InstanceName , String , HDB90
Hostname , String , hdb90
FullQualifiedHostname , String , hdb90.example.com
IPAddress , String , 10.0.0.90
SapVersionInfo , String , 749, patch 211, changelist 1754007
Examples:
>>> type(sap_inst)
<class 'insights.parsers.saphostctrl.SAPHostCtrlInstances'>
>>> sap_inst.data[-1]['CreationClassName']
'SAPInstance'
>>> sap_inst.data[-1]['SID']
'D90'
>>> sap_inst.data[-1]['SapVersionInfo'] # Note: captured as one string
'749, patch 211, changelist 1754007'
>>> sap_inst.data[0]['InstanceType'] # Inferred code from InstanceName
'HDB'
Attributes:
data (list): List of dicts where keys are the lead name of each line and
values are the string value.
instances (list): The list of instances found in the cluster output.
sids (list): The list of SID found in the cluster output.
types (list): The list of instance types found in the cluster output.
Raises:
SkipException: When input is empty.
ParseException: When input cannot be parsed.
"""
REQUIRED_DIRECTIVES = (
'CreationClassName',
'SID',
'SystemNumber',
'InstanceName',
'Hostname',
'SapVersionInfo',
'FullQualifiedHostname'
)
def parse_content(self, content):
if not content:
raise SkipException("Empty content")
if len(content) == 1:
raise ParseException("Incorrect content: '{0}'".format(content))
self.data = []
self.instances = []
_current_instance = {}
_sids = set()
_types = set()
def _update_instance(inst):
for _dir in self.REQUIRED_DIRECTIVES:
if _dir not in inst:
raise ParseException('Missing: "{0}"'.format(_dir))
if not inst['InstanceName'].endswith(inst['SystemNumber']):
raise ParseException(
'InstanceName: "{0}" missing match with SystemNumber: "{1}"'.format(inst['InstanceName'], inst['SystemNumber']))
# InstanceType = The chars in InstanceName before the SystemNumber
# subtract len(sysnumber) characters from instance name
inst['InstanceType'] = inst['InstanceName'][0:-len(inst['SystemNumber'])]
_current_instance = {}
for line in (l.strip() for l in content):
if line.startswith('******'):
# Skip separator lines but save and reset current instance
if _current_instance:
_update_instance(_current_instance)
self.instances.append(_current_instance['InstanceName'])
self.data.append(_current_instance)
_types.add(_current_instance['InstanceType'])
_sids.add(_current_instance['SID'])
_current_instance = {}
continue
fields = [i.strip() for i in line.split(',', 2)]
if len(fields) < 3:
raise ParseException("Incorrect line: '{0}'".format(line))
# TODO: if we see something other than 'String' in the second
# field, we could maybe change its type, say to an integer?
_current_instance[fields[0]] = fields[2]
# the last instance
if _current_instance:
_update_instance(_current_instance)
self.instances.append(_current_instance['InstanceName'])
self.data.append(_current_instance)
_types.add(_current_instance['InstanceType'])
_sids.add(_current_instance['SID'])
self.sids = list(_sids)
self.types = list(_types)
def __len__(self):
return len(self.data)
|
py | b401b74e6dfd5b3e1ac8e1cc778c01188c7a53f9 | from typing import Any
from checkov.common.models.consts import ANY_VALUE
from checkov.common.models.enums import CheckCategories
from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
class UsernameExists(BaseResourceValueCheck):
def __init__(self) -> None:
name = "Ensure username is set"
id = "CKV_LIN_4"
supported_resources = ["linode_user"]
categories = [CheckCategories.GENERAL_SECURITY]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def get_inspected_key(self) -> str:
return "username"
def get_expected_value(self) -> Any:
return ANY_VALUE
check = UsernameExists()
|
py | b401b7ab030a1a8bb97c3270a04546398afee065 | #!/usr/bin/python3
import pytest
PERIOD = 30
DENOMINATOR = 10 ** 18
SMOOTHING = 2
ALPHA = DENOMINATOR - SMOOTHING * DENOMINATOR / (PERIOD + 1)
def test_exchange_dai_to_usdc(_threepoolvolgauge, threepool, DAI, tracker, accounts):
for i in range(5):
print("Attemp #" + str(i + 1) + " .....")
last_reward_amount = tracker.rewardAmount()
tx = _threepoolvolgauge.exchange(0, 1, 50 * 10 ** 18, 0, {'from': accounts[0]})
vgas = tx.gas_used
print("VGaugeGas : " + str(vgas) + " Unit")
tx = threepool.exchange(0, 1, 50 * 10 ** 18, 0, {'from': accounts[0]})
print("OriginGas : " + str(tx.gas_used) + " Unit")
print("ConsumedGasByVolumeGauge : " + str(vgas - tx.gas_used) + " Unit")
current_reward_amount = tracker.rewardAmount()
lastvolumedata = tracker.lastVolumeData(DAI)
last_volume = lastvolumedata[0]
last_amount = lastvolumedata[1]
currentvolumedata = tracker.currentVolumeData(DAI)
current_volume = currentvolumedata[0]
current_amount = currentvolumedata[1]
newvolume = ALPHA * last_volume + (DENOMINATOR - ALPHA) * current_volume
newamount = ALPHA * last_amount + (DENOMINATOR - ALPHA) * current_amount
price_v_ema = newvolume / newamount
print("price_by_volume_EMA* : " + str(price_v_ema / DENOMINATOR) + " CRV")
print("reward_amount : " + str(current_reward_amount) + " (" + str(current_reward_amount / DENOMINATOR) + " CRV)")
print("increased_reward_amount_in_CRV : " + str(float(current_reward_amount - last_reward_amount) / DENOMINATOR) + " CRV")
def test_exchange_usdc_to_dai(_threepoolvolgauge, threepool, USDC, tracker, accounts):
for i in range(5):
print("Attemp #" + str(i + 1) + " .....")
last_reward_amount = tracker.rewardAmount()
tx = _threepoolvolgauge.exchange(1, 0, 50 * 10 ** 6, 0, {'from': accounts[0]})
vgas = tx.gas_used
print("VGaugeGas : " + str(vgas) + " Unit")
tx = threepool.exchange(1, 0, 50 * 10 ** 6, 0, {'from': accounts[0]})
print("OriginGas : " + str(tx.gas_used) + " Unit")
print("ConsumedGasByVolumeGauge : " + str(vgas - tx.gas_used) + " Unit")
current_reward_amount = tracker.rewardAmount()
lastvolumedata = tracker.lastVolumeData(USDC)
last_volume = lastvolumedata[0]
last_amount = lastvolumedata[1]
currentvolumedata = tracker.currentVolumeData(USDC)
current_volume = currentvolumedata[0]
current_amount = currentvolumedata[1]
newvolume = ALPHA * last_volume + (DENOMINATOR - ALPHA) * current_volume
newamount = ALPHA * last_amount + (DENOMINATOR - ALPHA) * current_amount
price_v_ema = newvolume / newamount
print("price_by_volume_EMA* : " + str(price_v_ema / DENOMINATOR) + " CRV")
print("reward_amount : " + str(current_reward_amount) + " (" + str(current_reward_amount / DENOMINATOR) + " CRV)")
print("increased_reward_amount_in_CRV : " + str(float(current_reward_amount - last_reward_amount) / DENOMINATOR) + " CRV")
def test_exchange_dai_to_usdt(_threepoolvolgauge, threepool, DAI, tracker, accounts):
for i in range(5):
print("Attemp #" + str(i + 1) + " .....")
last_reward_amount = tracker.rewardAmount()
tx = _threepoolvolgauge.exchange(0, 2, 50 * 10 ** 18, 0, {'from': accounts[0]})
vgas = tx.gas_used
print("VGaugeGas : " + str(vgas) + " Unit")
tx = threepool.exchange(0, 2, 50 * 10 ** 18, 0, {'from': accounts[0]})
print("OriginGas : " + str(tx.gas_used) + " Unit")
print("ConsumedGasByVolumeGauge : " + str(vgas - tx.gas_used) + " Unit")
current_reward_amount = tracker.rewardAmount()
lastvolumedata = tracker.lastVolumeData(DAI)
last_volume = lastvolumedata[0]
last_amount = lastvolumedata[1]
currentvolumedata = tracker.currentVolumeData(DAI)
current_volume = currentvolumedata[0]
current_amount = currentvolumedata[1]
newvolume = ALPHA * last_volume + (DENOMINATOR - ALPHA) * current_volume
newamount = ALPHA * last_amount + (DENOMINATOR - ALPHA) * current_amount
price_v_ema = newvolume / newamount
print("price_by_volume_EMA* : " + str(price_v_ema / DENOMINATOR) + " CRV")
print("reward_amount : " + str(current_reward_amount) + " (" + str(current_reward_amount / DENOMINATOR) + " CRV)")
print("increased_reward_amount_in_CRV : " + str(float(current_reward_amount - last_reward_amount) / DENOMINATOR) + " CRV")
def test_exchange_usdt_to_dai(_threepoolvolgauge, threepool, USDT, tracker, accounts):
for i in range(5):
print("Attemp #" + str(i + 1) + " .....")
last_reward_amount = tracker.rewardAmount()
tx = _threepoolvolgauge.exchange(2, 0, 50 * 10 ** 6, 0, {'from': accounts[0]})
vgas = tx.gas_used
print("VGaugeGas : " + str(vgas) + " Unit")
tx = threepool.exchange(2, 0, 50 * 10 ** 6, 0, {'from': accounts[0]})
print("OriginGas : " + str(tx.gas_used) + " Unit")
print("ConsumedGasByVolumeGauge : " + str(vgas - tx.gas_used) + " Unit")
current_reward_amount = tracker.rewardAmount()
lastvolumedata = tracker.lastVolumeData(USDT)
last_volume = lastvolumedata[0]
last_amount = lastvolumedata[1]
currentvolumedata = tracker.currentVolumeData(USDT)
current_volume = currentvolumedata[0]
current_amount = currentvolumedata[1]
newvolume = ALPHA * last_volume + (DENOMINATOR - ALPHA) * current_volume
newamount = ALPHA * last_amount + (DENOMINATOR - ALPHA) * current_amount
price_v_ema = newvolume / newamount
print("price_by_volume_EMA* : " + str(price_v_ema / DENOMINATOR) + " CRV")
print("reward_amount : " + str(current_reward_amount) + " (" + str(current_reward_amount / DENOMINATOR) + " CRV)")
print("increased_reward_amount_in_CRV : " + str(float(current_reward_amount - last_reward_amount) / DENOMINATOR) + " CRV")
def test_exchange_usdc_to_usdt(_threepoolvolgauge, threepool, USDC, tracker, accounts):
for i in range(5):
print("Attemp #" + str(i + 1) + " .....")
last_reward_amount = tracker.rewardAmount()
tx = _threepoolvolgauge.exchange(1, 2, 50 * 10 ** 6, 0, {'from': accounts[0]})
vgas = tx.gas_used
print("VGaugeGas : " + str(vgas) + " Unit")
tx = threepool.exchange(1, 2, 50 * 10 ** 6, 0, {'from': accounts[0]})
print("OriginGas : " + str(tx.gas_used) + " Unit")
print("ConsumedGasByVolumeGauge : " + str(vgas - tx.gas_used) + " Unit")
current_reward_amount = tracker.rewardAmount()
lastvolumedata = tracker.lastVolumeData(USDC)
last_volume = lastvolumedata[0]
last_amount = lastvolumedata[1]
currentvolumedata = tracker.currentVolumeData(USDC)
current_volume = currentvolumedata[0]
current_amount = currentvolumedata[1]
newvolume = ALPHA * last_volume + (DENOMINATOR - ALPHA) * current_volume
newamount = ALPHA * last_amount + (DENOMINATOR - ALPHA) * current_amount
price_v_ema = newvolume / newamount
print("price_by_volume_EMA* : " + str(price_v_ema / DENOMINATOR) + " CRV")
print("reward_amount : " + str(current_reward_amount) + " (" + str(current_reward_amount / DENOMINATOR) + " CRV)")
print("increased_reward_amount_in_CRV : " + str(float(current_reward_amount - last_reward_amount) / DENOMINATOR) + " CRV")
def test_exchange_usdt_to_usdc(_threepoolvolgauge, threepool, USDT, tracker, accounts):
for i in range(5):
print("Attemp #" + str(i + 1) + " .....")
last_reward_amount = tracker.rewardAmount()
tx = _threepoolvolgauge.exchange(2, 1, 50 * 10 ** 6, 0, {'from': accounts[0]})
vgas = tx.gas_used
print("VGaugeGas : " + str(vgas) + " Unit")
tx = threepool.exchange(2, 1, 50 * 10 ** 6, 0, {'from': accounts[0]})
print("OriginGas : " + str(tx.gas_used) + " Unit")
print("ConsumedGasByVolumeGauge : " + str(vgas - tx.gas_used) + " Unit")
current_reward_amount = tracker.rewardAmount()
lastvolumedata = tracker.lastVolumeData(USDT)
last_volume = lastvolumedata[0]
last_amount = lastvolumedata[1]
currentvolumedata = tracker.currentVolumeData(USDT)
current_volume = currentvolumedata[0]
current_amount = currentvolumedata[1]
newvolume = ALPHA * last_volume + (DENOMINATOR - ALPHA) * current_volume
newamount = ALPHA * last_amount + (DENOMINATOR - ALPHA) * current_amount
price_v_ema = newvolume / newamount
print("price_by_volume_EMA* : " + str(price_v_ema / DENOMINATOR) + " CRV")
print("reward_amount : " + str(current_reward_amount) + " (" + str(current_reward_amount / DENOMINATOR) + " CRV)")
print("increased_reward_amount_in_CRV : " + str(float(current_reward_amount - last_reward_amount) / DENOMINATOR) + " CRV")
|
py | b401b7eb06593682fa20997d227b78da972d8b19 | import os
import sys
sys.path.append(os.path.abspath(os.path.join(__file__, '..', '..')))
import argparse
import os
import cv2
import glob
import copy
import numpy as np
import torch
import imageio
from PIL import Image
import scipy.ndimage
from skimage.feature import canny
import torchvision.transforms.functional as F
from RAFT import utils
from RAFT import RAFT
import utils.region_fill as rf
from utils.Poisson_blend import Poisson_blend
from utils.Poisson_blend_img import Poisson_blend_img
from get_flowNN import get_flowNN
from get_flowNN_gradient import get_flowNN_gradient
from utils.common_utils import flow_edge
from spatial_inpaint import spatial_inpaint
from frame_inpaint import DeepFillv1
from edgeconnect.networks import EdgeGenerator_
def to_tensor(img):
img = Image.fromarray(img)
img_t = F.to_tensor(img).float()
return img_t
def infer(args, EdgeGenerator, device, flow_img_gray, edge, mask):
# Add a pytorch dataloader
flow_img_gray_tensor = to_tensor(flow_img_gray)[None, :, :].float().to(device)
edge_tensor = to_tensor(edge)[None, :, :].float().to(device)
mask_tensor = torch.from_numpy(mask.astype(np.float64))[None, None, :, :].float().to(device)
# Complete the edges
edges_masked = (edge_tensor * (1 - mask_tensor))
images_masked = (flow_img_gray_tensor * (1 - mask_tensor)) + mask_tensor
inputs = torch.cat((images_masked, edges_masked, mask_tensor), dim=1)
with torch.no_grad():
edges_completed = EdgeGenerator(inputs) # in: [grayscale(1) + edge(1) + mask(1)]
edges_completed = edges_completed * mask_tensor + edge_tensor * (1 - mask_tensor)
edge_completed = edges_completed[0, 0].data.cpu().numpy()
edge_completed[edge_completed < 0.5] = 0
edge_completed[edge_completed >= 0.5] = 1
return edge_completed
def gradient_mask(mask):
gradient_mask = np.logical_or.reduce((mask,
np.concatenate((mask[1:, :], np.zeros((1, mask.shape[1]), dtype=np.bool)), axis=0),
np.concatenate((mask[:, 1:], np.zeros((mask.shape[0], 1), dtype=np.bool)), axis=1)))
return gradient_mask
def create_dir(dir):
"""Creates a directory if not exist.
"""
if not os.path.exists(dir):
os.makedirs(dir)
def initialize_RAFT(args):
"""Initializes the RAFT model.
"""
model = torch.nn.DataParallel(RAFT(args))
model.load_state_dict(torch.load(args.model))
model = model.module
model.to('cuda')
model.eval()
return model
def calculate_flow(args, model, video, mode):
"""Calculates optical flow.
"""
if mode not in ['forward', 'backward']:
raise NotImplementedError
nFrame, _, imgH, imgW = video.shape
Flow = np.empty(((imgH, imgW, 2, 0)), dtype=np.float32)
# if os.path.isdir(os.path.join(args.outroot, 'flow', mode + '_flo')):
# for flow_name in sorted(glob.glob(os.path.join(args.outroot, 'flow', mode + '_flo', '*.flo'))):
# print("Loading {0}".format(flow_name), '\r', end='')
# flow = utils.frame_utils.readFlow(flow_name)
# Flow = np.concatenate((Flow, flow[..., None]), axis=-1)
# return Flow
create_dir(os.path.join(args.outroot, 'flow', mode + '_flo'))
create_dir(os.path.join(args.outroot, 'flow', mode + '_png'))
with torch.no_grad():
for i in range(video.shape[0] - 1):
print("Calculating {0} flow {1:2d} <---> {2:2d}".format(mode, i, i + 1), '\r', end='')
if mode == 'forward':
# Flow i -> i + 1
image1 = video[i, None]
image2 = video[i + 1, None]
elif mode == 'backward':
# Flow i + 1 -> i
image1 = video[i + 1, None]
image2 = video[i, None]
else:
raise NotImplementedError
_, flow = model(image1, image2, iters=20, test_mode=True)
flow = flow[0].permute(1, 2, 0).cpu().numpy()
Flow = np.concatenate((Flow, flow[..., None]), axis=-1)
# Flow visualization.
flow_img = utils.flow_viz.flow_to_image(flow)
flow_img = Image.fromarray(flow_img)
# Saves the flow and flow_img.
flow_img.save(os.path.join(args.outroot, 'flow', mode + '_png', '%05d.png'%i))
utils.frame_utils.writeFlow(os.path.join(args.outroot, 'flow', mode + '_flo', '%05d.flo'%i), flow)
return Flow
def extrapolation(args, video_ori, corrFlowF_ori, corrFlowB_ori):
"""Prepares the data for video extrapolation.
"""
imgH, imgW, _, nFrame = video_ori.shape
# Defines new FOV.
imgH_extr = int(args.H_scale * imgH)
imgW_extr = int(args.W_scale * imgW)
H_start = int((imgH_extr - imgH) / 2)
W_start = int((imgW_extr - imgW) / 2)
# Generates the mask for missing region.
flow_mask = np.ones(((imgH_extr, imgW_extr)), dtype=np.bool)
flow_mask[H_start : H_start + imgH, W_start : W_start + imgW] = 0
mask_dilated = gradient_mask(flow_mask)
# Extrapolates the FOV for video.
video = np.zeros(((imgH_extr, imgW_extr, 3, nFrame)), dtype=np.float32)
video[H_start : H_start + imgH, W_start : W_start + imgW, :, :] = video_ori
for i in range(nFrame):
print("Preparing frame {0}".format(i), '\r', end='')
video[:, :, :, i] = cv2.inpaint((video[:, :, :, i] * 255).astype(np.uint8), flow_mask.astype(np.uint8), 3, cv2.INPAINT_TELEA).astype(np.float32) / 255.
# Extrapolates the FOV for flow.
corrFlowF = np.zeros(((imgH_extr, imgW_extr, 2, nFrame - 1)), dtype=np.float32)
corrFlowB = np.zeros(((imgH_extr, imgW_extr, 2, nFrame - 1)), dtype=np.float32)
corrFlowF[H_start : H_start + imgH, W_start : W_start + imgW, :] = corrFlowF_ori
corrFlowB[H_start : H_start + imgH, W_start : W_start + imgW, :] = corrFlowB_ori
return video, corrFlowF, corrFlowB, flow_mask, mask_dilated, (W_start, H_start), (W_start + imgW, H_start + imgH)
def complete_flow(args, corrFlow, flow_mask, mode, edge=None):
"""Completes flow.
"""
if mode not in ['forward', 'backward']:
raise NotImplementedError
imgH, imgW, _, nFrame = corrFlow.shape
# if os.path.isdir(os.path.join(args.outroot, 'flow_comp', mode + '_flo')):
# compFlow = np.empty(((imgH, imgW, 2, 0)), dtype=np.float32)
# for flow_name in sorted(glob.glob(os.path.join(args.outroot, 'flow_comp', mode + '_flo', '*.flo'))):
# print("Loading {0}".format(flow_name), '\r', end='')
# flow = utils.frame_utils.readFlow(flow_name)
# compFlow = np.concatenate((compFlow, flow[..., None]), axis=-1)
# return compFlow
create_dir(os.path.join(args.outroot, 'flow_comp', mode + '_flo'))
create_dir(os.path.join(args.outroot, 'flow_comp', mode + '_png'))
compFlow = np.zeros(((imgH, imgW, 2, nFrame)), dtype=np.float32)
for i in range(nFrame):
print("Completing {0} flow {1:2d} <---> {2:2d}".format(mode, i, i + 1), '\r', end='')
flow = corrFlow[:, :, :, i]
flow_mask_img = flow_mask[:, :, i] if mode == 'forward' else flow_mask[:, :, i + 1]
flow_mask_gradient_img = gradient_mask(flow_mask_img)
if edge is not None:
# imgH x (imgW - 1 + 1) x 2
gradient_x = np.concatenate((np.diff(flow, axis=1), np.zeros((imgH, 1, 2), dtype=np.float32)), axis=1)
# (imgH - 1 + 1) x imgW x 2
gradient_y = np.concatenate((np.diff(flow, axis=0), np.zeros((1, imgW, 2), dtype=np.float32)), axis=0)
# concatenate gradient_x and gradient_y
gradient = np.concatenate((gradient_x, gradient_y), axis=2)
# We can trust the gradient outside of flow_mask_gradient_img
# We assume the gradient within flow_mask_gradient_img is 0.
gradient[flow_mask_gradient_img, :] = 0
# Complete the flow
imgSrc_gy = gradient[:, :, 2 : 4]
imgSrc_gy = imgSrc_gy[0 : imgH - 1, :, :]
imgSrc_gx = gradient[:, :, 0 : 2]
imgSrc_gx = imgSrc_gx[:, 0 : imgW - 1, :]
compFlow[:, :, :, i] = Poisson_blend(flow, imgSrc_gx, imgSrc_gy, flow_mask_img, edge[:, :, i])
else:
flow[:, :, 0] = rf.regionfill(flow[:, :, 0], flow_mask_img)
flow[:, :, 1] = rf.regionfill(flow[:, :, 1], flow_mask_img)
compFlow[:, :, :, i] = flow
# Flow visualization.
flow_img = utils.flow_viz.flow_to_image(compFlow[:, :, :, i])
flow_img = Image.fromarray(flow_img)
# Saves the flow and flow_img.
flow_img.save(os.path.join(args.outroot, 'flow_comp', mode + '_png', '%05d.png'%i))
utils.frame_utils.writeFlow(os.path.join(args.outroot, 'flow_comp', mode + '_flo', '%05d.flo'%i), compFlow[:, :, :, i])
return compFlow
def edge_completion(args, EdgeGenerator, corrFlow, flow_mask, mode):
"""Calculate flow edge and complete it.
"""
if mode not in ['forward', 'backward']:
raise NotImplementedError
imgH, imgW, _, nFrame = corrFlow.shape
Edge = np.empty(((imgH, imgW, 0)), dtype=np.float32)
for i in range(nFrame):
print("Completing {0} flow edge {1:2d} <---> {2:2d}".format(mode, i, i + 1), '\r', end='')
flow_mask_img = flow_mask[:, :, i] if mode == 'forward' else flow_mask[:, :, i + 1]
flow_img_gray = (corrFlow[:, :, 0, i] ** 2 + corrFlow[:, :, 1, i] ** 2) ** 0.5
flow_img_gray = flow_img_gray / flow_img_gray.max()
edge_corr = canny(flow_img_gray, sigma=2, mask=(1 - flow_mask_img).astype(np.bool))
edge_completed = infer(args, EdgeGenerator, torch.device('cuda:0'), flow_img_gray, edge_corr, flow_mask_img)
Edge = np.concatenate((Edge, edge_completed[..., None]), axis=-1)
return Edge
def video_completion(args):
# Flow model.
RAFT_model = initialize_RAFT(args)
# Loads frames.
filename_list = glob.glob(os.path.join(args.path, '*.png')) + \
glob.glob(os.path.join(args.path, '*.jpg'))
# Obtains imgH, imgW and nFrame.
imgH, imgW = np.array(Image.open(filename_list[0])).shape[:2]
nFrame = len(filename_list)
# Loads video.
video = []
for filename in sorted(filename_list):
video.append(torch.from_numpy(np.array(Image.open(filename)).astype(np.uint8)).permute(2, 0, 1).float())
video = torch.stack(video, dim=0)
video = video.to('cuda')
# Calcutes the corrupted flow.
corrFlowF = calculate_flow(args, RAFT_model, video, 'forward')
corrFlowB = calculate_flow(args, RAFT_model, video, 'backward')
print('\nFinish flow prediction.')
# Makes sure video is in BGR (opencv) format.
video = video.permute(2, 3, 1, 0).cpu().numpy()[:, :, ::-1, :] / 255.
if args.mode == 'video_extrapolation':
# Creates video and flow where the extrapolated region are missing.
video, corrFlowF, corrFlowB, flow_mask, mask_dilated, start_point, end_point = extrapolation(args, video, corrFlowF, corrFlowB)
imgH, imgW = video.shape[:2]
# mask indicating the missing region in the video.
mask = np.tile(flow_mask[..., None], (1, 1, nFrame))
flow_mask = np.tile(flow_mask[..., None], (1, 1, nFrame))
else:
# Loads masks.
filename_list = glob.glob(os.path.join(args.path_mask, '*.png')) + \
glob.glob(os.path.join(args.path_mask, '*.jpg'))
mask = []
flow_mask = []
for filename in sorted(filename_list):
mask_img = np.array(Image.open(filename).convert('L'))
mask.append(mask_img)
# Dilate 15 pixel so that all known pixel is trustworthy
flow_mask_img = scipy.ndimage.binary_dilation(mask_img, iterations=15)
# Close the small holes inside the foreground objects
flow_mask_img = cv2.morphologyEx(flow_mask_img.astype(np.uint8), cv2.MORPH_CLOSE, np.ones((21, 21),np.uint8)).astype(np.bool)
flow_mask_img = scipy.ndimage.binary_fill_holes(flow_mask_img).astype(np.bool)
flow_mask.append(flow_mask_img)
# mask indicating the missing region in the video.
mask = np.stack(mask, -1).astype(np.bool)
flow_mask = np.stack(flow_mask, -1).astype(np.bool)
if args.edge_guide:
# Edge completion model.
EdgeGenerator = EdgeGenerator_()
EdgeComp_ckpt = torch.load(args.edge_completion_model)
EdgeGenerator.load_state_dict(EdgeComp_ckpt['generator'])
EdgeGenerator.to(torch.device('cuda:0'))
EdgeGenerator.eval()
# Edge completion.
FlowF_edge = edge_completion(args, EdgeGenerator, corrFlowF, flow_mask, 'forward')
FlowB_edge = edge_completion(args, EdgeGenerator, corrFlowB, flow_mask, 'backward')
print('\nFinish edge completion.')
else:
FlowF_edge, FlowB_edge = None, None
# Completes the flow.
videoFlowF = complete_flow(args, corrFlowF, flow_mask, 'forward', FlowF_edge)
videoFlowB = complete_flow(args, corrFlowB, flow_mask, 'backward', FlowB_edge)
print('\nFinish flow completion.')
iter = 0
mask_tofill = mask
video_comp = video
# Image inpainting model.
deepfill = DeepFillv1(pretrained_model=args.deepfill_model, image_shape=[imgH, imgW])
# We iteratively complete the video.
while(np.sum(mask_tofill) > 0):
create_dir(os.path.join(args.outroot, 'frame_comp_' + str(iter)))
# Color propagation.
video_comp, mask_tofill, _ = get_flowNN(args,
video_comp,
mask_tofill,
videoFlowF,
videoFlowB,
None,
None)
for i in range(nFrame):
mask_tofill[:, :, i] = scipy.ndimage.binary_dilation(mask_tofill[:, :, i], iterations=2)
img = video_comp[:, :, :, i] * 255
# Green indicates the regions that are not filled yet.
img[mask_tofill[:, :, i]] = [0, 255, 0]
cv2.imwrite(os.path.join(args.outroot, 'frame_comp_' + str(iter), '%05d.png'%i), img)
# video_comp_ = (video_comp * 255).astype(np.uint8).transpose(3, 0, 1, 2)[:, :, :, ::-1]
# imageio.mimwrite(os.path.join(args.outroot, 'frame_comp_' + str(iter), 'intermediate_{0}.mp4'.format(str(iter))), video_comp_, fps=12, quality=8, macro_block_size=1)
# imageio.mimsave(os.path.join(args.outroot, 'frame_comp_' + str(iter), 'intermediate_{0}.gif'.format(str(iter))), video_comp_, format='gif', fps=12)
mask_tofill, video_comp = spatial_inpaint(deepfill, mask_tofill, video_comp)
iter += 1
create_dir(os.path.join(args.outroot, 'frame_comp_' + 'final'))
video_comp_ = (video_comp * 255).astype(np.uint8).transpose(3, 0, 1, 2)[:, :, :, ::-1]
for i in range(nFrame):
img = video_comp[:, :, :, i] * 255
cv2.imwrite(os.path.join(args.outroot, 'frame_comp_' + 'final', '%05d.png'%i), img)
imageio.mimwrite(os.path.join(args.outroot, 'frame_comp_' + 'final', 'final.mp4'), video_comp_, fps=12, quality=8, macro_block_size=1)
# imageio.mimsave(os.path.join(args.outroot, 'frame_comp_' + 'final', 'final.gif'), video_comp_, format='gif', fps=12)
def video_completion_seamless(args):
# Flow model.
RAFT_model = initialize_RAFT(args)
# Loads frames.
filename_list = glob.glob(os.path.join(args.path, '*.png')) + \
glob.glob(os.path.join(args.path, '*.jpg'))
# Obtains imgH, imgW and nFrame.
imgH, imgW = np.array(Image.open(filename_list[0])).shape[:2]
nFrame = len(filename_list)
# Loads video.
video = []
for filename in sorted(filename_list):
video.append(torch.from_numpy(np.array(Image.open(filename)).astype(np.uint8)).permute(2, 0, 1).float())
video = torch.stack(video, dim=0)
video = video.to('cuda')
# Calcutes the corrupted flow.
corrFlowF = calculate_flow(args, RAFT_model, video, 'forward')
corrFlowB = calculate_flow(args, RAFT_model, video, 'backward')
print('\nFinish flow prediction.')
# Makes sure video is in BGR (opencv) format.
video = video.permute(2, 3, 1, 0).cpu().numpy()[:, :, ::-1, :] / 255.
if args.mode == 'video_extrapolation':
# Creates video and flow where the extrapolated region are missing.
video, corrFlowF, corrFlowB, flow_mask, mask_dilated, start_point, end_point = extrapolation(args, video, corrFlowF, corrFlowB)
imgH, imgW = video.shape[:2]
# mask indicating the missing region in the video.
mask = np.tile(flow_mask[..., None], (1, 1, nFrame))
flow_mask = np.tile(flow_mask[..., None], (1, 1, nFrame))
mask_dilated = np.tile(mask_dilated[..., None], (1, 1, nFrame))
else:
# Loads masks.
filename_list = glob.glob(os.path.join(args.path_mask, '*.png')) + \
glob.glob(os.path.join(args.path_mask, '*.jpg'))
mask = []
mask_dilated = []
flow_mask = []
for filename in sorted(filename_list):
mask_img = np.array(Image.open(filename).convert('L'))
# Dilate 15 pixel so that all known pixel is trustworthy
flow_mask_img = scipy.ndimage.binary_dilation(mask_img, iterations=15)
# Close the small holes inside the foreground objects
flow_mask_img = cv2.morphologyEx(flow_mask_img.astype(np.uint8), cv2.MORPH_CLOSE, np.ones((21, 21),np.uint8)).astype(np.bool)
flow_mask_img = scipy.ndimage.binary_fill_holes(flow_mask_img).astype(np.bool)
flow_mask.append(flow_mask_img)
mask_img = scipy.ndimage.binary_dilation(mask_img, iterations=5)
mask_img = scipy.ndimage.binary_fill_holes(mask_img).astype(np.bool)
mask.append(mask_img)
mask_dilated.append(gradient_mask(mask_img))
# mask indicating the missing region in the video.
mask = np.stack(mask, -1).astype(np.bool)
mask_dilated = np.stack(mask_dilated, -1).astype(np.bool)
flow_mask = np.stack(flow_mask, -1).astype(np.bool)
if args.edge_guide:
# Edge completion model.
EdgeGenerator = EdgeGenerator_()
EdgeComp_ckpt = torch.load(args.edge_completion_model)
EdgeGenerator.load_state_dict(EdgeComp_ckpt['generator'])
EdgeGenerator.to(torch.device('cuda:0'))
EdgeGenerator.eval()
# Edge completion.
FlowF_edge = edge_completion(args, EdgeGenerator, corrFlowF, flow_mask, 'forward')
FlowB_edge = edge_completion(args, EdgeGenerator, corrFlowB, flow_mask, 'backward')
print('\nFinish edge completion.')
else:
FlowF_edge, FlowB_edge = None, None
# Completes the flow.
videoFlowF = complete_flow(args, corrFlowF, flow_mask, 'forward', FlowF_edge)
videoFlowB = complete_flow(args, corrFlowB, flow_mask, 'backward', FlowB_edge)
print('\nFinish flow completion.')
# Prepare gradients
gradient_x = np.empty(((imgH, imgW, 3, 0)), dtype=np.float32)
gradient_y = np.empty(((imgH, imgW, 3, 0)), dtype=np.float32)
for indFrame in range(nFrame):
img = video[:, :, :, indFrame]
img[mask[:, :, indFrame], :] = 0
img = cv2.inpaint((img * 255).astype(np.uint8), mask[:, :, indFrame].astype(np.uint8), 3, cv2.INPAINT_TELEA).astype(np.float32) / 255.
gradient_x_ = np.concatenate((np.diff(img, axis=1), np.zeros((imgH, 1, 3), dtype=np.float32)), axis=1)
gradient_y_ = np.concatenate((np.diff(img, axis=0), np.zeros((1, imgW, 3), dtype=np.float32)), axis=0)
gradient_x = np.concatenate((gradient_x, gradient_x_.reshape(imgH, imgW, 3, 1)), axis=-1)
gradient_y = np.concatenate((gradient_y, gradient_y_.reshape(imgH, imgW, 3, 1)), axis=-1)
gradient_x[mask_dilated[:, :, indFrame], :, indFrame] = 0
gradient_y[mask_dilated[:, :, indFrame], :, indFrame] = 0
iter = 0
mask_tofill = mask
gradient_x_filled = gradient_x # corrupted gradient_x, mask_gradient indicates the missing gradient region
gradient_y_filled = gradient_y # corrupted gradient_y, mask_gradient indicates the missing gradient region
mask_gradient = mask_dilated
video_comp = video
# Image inpainting model.
deepfill = DeepFillv1(pretrained_model=args.deepfill_model, image_shape=[imgH, imgW])
# We iteratively complete the video.
while(np.sum(mask) > 0):
create_dir(os.path.join(args.outroot, 'frame_seamless_comp_' + str(iter)))
# Gradient propagation.
gradient_x_filled, gradient_y_filled, mask_gradient = \
get_flowNN_gradient(args,
gradient_x_filled,
gradient_y_filled,
mask,
mask_gradient,
videoFlowF,
videoFlowB,
None,
None)
# if there exist holes in mask, Poisson blending will fail. So I did this trick. I sacrifice some value. Another solution is to modify Poisson blending.
for indFrame in range(nFrame):
mask_gradient[:, :, indFrame] = scipy.ndimage.binary_fill_holes(mask_gradient[:, :, indFrame]).astype(np.bool)
# After one gradient propagation iteration
# gradient --> RGB
for indFrame in range(nFrame):
print("Poisson blending frame {0:3d}".format(indFrame))
if mask[:, :, indFrame].sum() > 0:
try:
frameBlend, UnfilledMask = Poisson_blend_img(video_comp[:, :, :, indFrame], gradient_x_filled[:, 0 : imgW - 1, :, indFrame], gradient_y_filled[0 : imgH - 1, :, :, indFrame], mask[:, :, indFrame], mask_gradient[:, :, indFrame])
# UnfilledMask = scipy.ndimage.binary_fill_holes(UnfilledMask).astype(np.bool)
except:
frameBlend, UnfilledMask = video_comp[:, :, :, indFrame], mask[:, :, indFrame]
frameBlend = np.clip(frameBlend, 0, 1.0)
tmp = cv2.inpaint((frameBlend * 255).astype(np.uint8), UnfilledMask.astype(np.uint8), 3, cv2.INPAINT_TELEA).astype(np.float32) / 255.
frameBlend[UnfilledMask, :] = tmp[UnfilledMask, :]
video_comp[:, :, :, indFrame] = frameBlend
mask[:, :, indFrame] = UnfilledMask
frameBlend_ = copy.deepcopy(frameBlend)
# Green indicates the regions that are not filled yet.
frameBlend_[mask[:, :, indFrame], :] = [0, 1., 0]
else:
frameBlend_ = video_comp[:, :, :, indFrame]
cv2.imwrite(os.path.join(args.outroot, 'frame_seamless_comp_' + str(iter), '%05d.png'%indFrame), frameBlend_ * 255.)
# video_comp_ = (video_comp * 255).astype(np.uint8).transpose(3, 0, 1, 2)[:, :, :, ::-1]
# imageio.mimwrite(os.path.join(args.outroot, 'frame_seamless_comp_' + str(iter), 'intermediate_{0}.mp4'.format(str(iter))), video_comp_, fps=12, quality=8, macro_block_size=1)
# imageio.mimsave(os.path.join(args.outroot, 'frame_seamless_comp_' + str(iter), 'intermediate_{0}.gif'.format(str(iter))), video_comp_, format='gif', fps=12)
mask, video_comp = spatial_inpaint(deepfill, mask, video_comp)
iter += 1
# Re-calculate gradient_x/y_filled and mask_gradient
for indFrame in range(nFrame):
mask_gradient[:, :, indFrame] = gradient_mask(mask[:, :, indFrame])
gradient_x_filled[:, :, :, indFrame] = np.concatenate((np.diff(video_comp[:, :, :, indFrame], axis=1), np.zeros((imgH, 1, 3), dtype=np.float32)), axis=1)
gradient_y_filled[:, :, :, indFrame] = np.concatenate((np.diff(video_comp[:, :, :, indFrame], axis=0), np.zeros((1, imgW, 3), dtype=np.float32)), axis=0)
gradient_x_filled[mask_gradient[:, :, indFrame], :, indFrame] = 0
gradient_y_filled[mask_gradient[:, :, indFrame], :, indFrame] = 0
create_dir(os.path.join(args.outroot, 'frame_seamless_comp_' + 'final'))
video_comp_ = (video_comp * 255).astype(np.uint8).transpose(3, 0, 1, 2)[:, :, :, ::-1]
for i in range(nFrame):
img = video_comp[:, :, :, i] * 255
cv2.imwrite(os.path.join(args.outroot, 'frame_seamless_comp_' + 'final', '%05d.png'%i), img)
imageio.mimwrite(os.path.join(args.outroot, 'frame_seamless_comp_' + 'final', 'final.mp4'), video_comp_, fps=12, quality=8, macro_block_size=1)
# imageio.mimsave(os.path.join(args.outroot, 'frame_seamless_comp_' + 'final', 'final.gif'), video_comp_, format='gif', fps=12)
def main(args):
assert args.mode in ('object_removal', 'video_extrapolation'), (
"Accepted modes: 'object_removal', 'video_extrapolation', but input is %s"
) % mode
if args.seamless:
video_completion_seamless(args)
else:
video_completion(args)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# video completion
parser.add_argument('--seamless', action='store_true', help='Whether operate in the gradient domain')
parser.add_argument('--edge_guide', action='store_true', help='Whether use edge as guidance to complete flow')
parser.add_argument('--mode', default='object_removal', help="modes: object_removal / video_extrapolation")
parser.add_argument('--path', default='../data/tennis', help="dataset for evaluation")
parser.add_argument('--path_mask', default='../data/tennis_mask', help="mask for object removal")
parser.add_argument('--outroot', default='../result/', help="output directory")
parser.add_argument('--consistencyThres', dest='consistencyThres', default=np.inf, type=float, help='flow consistency error threshold')
parser.add_argument('--alpha', dest='alpha', default=0.1, type=float)
parser.add_argument('--Nonlocal', dest='Nonlocal', default=False, type=bool)
# RAFT
parser.add_argument('--model', default='../weight/raft-things.pth', help="restore checkpoint")
parser.add_argument('--small', action='store_true', help='use small model')
parser.add_argument('--mixed_precision', action='store_true', help='use mixed precision')
parser.add_argument('--alternate_corr', action='store_true', help='use efficent correlation implementation')
# Deepfill
parser.add_argument('--deepfill_model', default='../weight/imagenet_deepfill.pth', help="restore checkpoint")
# Edge completion
parser.add_argument('--edge_completion_model', default='../weight/edge_completion.pth', help="restore checkpoint")
# extrapolation
parser.add_argument('--H_scale', dest='H_scale', default=2, type=float, help='H extrapolation scale')
parser.add_argument('--W_scale', dest='W_scale', default=2, type=float, help='W extrapolation scale')
args = parser.parse_args()
main(args)
|
py | b401b7f3b284254396335e957c7e4ded0eb25215 | import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import functional as F
class Discriminator(nn.Module):
def __init__(self, batch_size, img_size, text_embed_dim, text_reduced_dim):
super(Discriminator, self).__init__()
self.batch_size = batch_size
self.img_size = img_size
self.in_channels = img_size[2]
self.text_embed_dim = text_embed_dim
self.text_reduced_dim = text_reduced_dim
# Defining the discriminator network architecture
self.d_net = nn.Sequential(
nn.Conv2d(self.in_channels, 64, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(64, 128, 4, 2, 1, bias=False),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(128, 256, 4, 2, 1, bias=False),
nn.BatchNorm2d(256),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(256, 512, 4, 2, 1, bias=False),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.2, inplace=True)).cuda()
# output_dim = (batch_size, 4, 4, 512)
# text.size() = (batch_size, text_embed_dim)
# Defining a linear layer to reduce the dimensionality of caption embedding
# from text_embed_dim to text_reduced_dim
self.cat_net = nn.Sequential(
nn.Conv2d(512 + self.text_reduced_dim, 512, 4, 2, 1, bias=False),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.2, inplace=True)).cuda()
self.text_reduced_dim = nn.Linear(self.text_embed_dim, self.text_reduced_dim).cuda()
self.linear = nn.Linear(2 * 2 * 512, 1).cuda()
def forward(self, image, text):
""" Given the image and its caption embedding, predict whether the image
is real or fake.
Arguments
---------
image : torch.FloatTensor
image.size() = (batch_size, 64, 64, 3)
text : torch.FloatTensor
Output of the skipthought embedding model for the caption
text.size() = (batch_size, text_embed_dim)
--------
Returns
--------
output : Probability for the image being real/fake
logit : Final score of the discriminator
"""
image = image.permute(0, 3, 1, 2) # (batch_size, 3, 64, 64)
d_net_out = self.d_net(image) # (batch_size, 512, 4, 4)
d_net_out = d_net_out.permute(0, 2, 3, 1) # (batch_size, 4, 4, 512)
text_reduced = self.text_reduced_dim(text) # (batch_size, text_reduced_dim)
text_reduced = text_reduced.unsqueeze(1) # (batch_size, 1, text_reduced_dim)
text_reduced = text_reduced.unsqueeze(2) # (batch_size, 1, 1, text_reduced_dim)
text_reduced = text_reduced.expand(-1, 4, 4, -1)
concat_out = torch.cat((d_net_out, text_reduced), 3) # (1, 4, 4, 512+text_reduced_dim)
logit = self.cat_net(concat_out.permute(0, 3, 1, 2))
logit = logit.reshape(-1, logit.size()[1] * logit.size()[2] * logit.size()[3])
logit = self.linear(logit)
output = torch.sigmoid(logit)
return output, logit
|
py | b401b957cfb639b05723262f6f3cbd7d10028253 | import numpy
import cupy
from cupy_backends.cuda.api import runtime
from cupy_backends.cuda.libs import cublas
from cupy_backends.cuda.libs import cusolver
from cupy._core import internal
from cupy.cuda import device
from cupy.cusolver import check_availability
from cupy.cusolver import _gesvdj_batched, _gesvd_batched
from cupy.linalg import _util
def _lu_factor(a_t, dtype):
"""Compute pivoted LU decomposition.
Decompose a given batch of square matrices. Inputs and outputs are
transposed.
Args:
a_t (cupy.ndarray): The input matrix with dimension ``(..., N, N)``.
The dimension condition is not checked.
dtype (numpy.dtype): float32, float64, complex64, or complex128.
Returns:
lu_t (cupy.ndarray): ``L`` without its unit diagonal and ``U`` with
dimension ``(..., N, N)``.
piv (cupy.ndarray): 1-origin pivot indices with dimension
``(..., N)``.
dev_info (cupy.ndarray): ``getrf`` info with dimension ``(...)``.
.. seealso:: :func:`scipy.linalg.lu_factor`
"""
orig_shape = a_t.shape
n = orig_shape[-2]
# copy is necessary to present `a` to be overwritten.
a_t = a_t.astype(dtype, order='C').reshape(-1, n, n)
batch_size = a_t.shape[0]
ipiv = cupy.empty((batch_size, n), dtype=numpy.int32)
dev_info = cupy.empty((batch_size,), dtype=numpy.int32)
# Heuristic condition from some performance test.
# TODO(kataoka): autotune
use_batched = batch_size * 65536 >= n * n
if use_batched:
handle = device.get_cublas_handle()
lda = n
step = n * lda * a_t.itemsize
start = a_t.data.ptr
stop = start + step * batch_size
a_array = cupy.arange(start, stop, step, dtype=cupy.uintp)
if dtype == numpy.float32:
getrfBatched = cupy.cuda.cublas.sgetrfBatched
elif dtype == numpy.float64:
getrfBatched = cupy.cuda.cublas.dgetrfBatched
elif dtype == numpy.complex64:
getrfBatched = cupy.cuda.cublas.cgetrfBatched
elif dtype == numpy.complex128:
getrfBatched = cupy.cuda.cublas.zgetrfBatched
else:
assert False
getrfBatched(
handle, n, a_array.data.ptr, lda, ipiv.data.ptr,
dev_info.data.ptr, batch_size)
else:
handle = device.get_cusolver_handle()
if dtype == numpy.float32:
getrf_bufferSize = cusolver.sgetrf_bufferSize
getrf = cusolver.sgetrf
elif dtype == numpy.float64:
getrf_bufferSize = cusolver.dgetrf_bufferSize
getrf = cusolver.dgetrf
elif dtype == numpy.complex64:
getrf_bufferSize = cusolver.cgetrf_bufferSize
getrf = cusolver.cgetrf
elif dtype == numpy.complex128:
getrf_bufferSize = cusolver.zgetrf_bufferSize
getrf = cusolver.zgetrf
else:
assert False
for i in range(batch_size):
a_ptr = a_t[i].data.ptr
buffersize = getrf_bufferSize(handle, n, n, a_ptr, n)
workspace = cupy.empty(buffersize, dtype=dtype)
getrf(
handle, n, n, a_ptr, n, workspace.data.ptr,
ipiv[i].data.ptr, dev_info[i].data.ptr)
return (
a_t.reshape(orig_shape),
ipiv.reshape(orig_shape[:-1]),
dev_info.reshape(orig_shape[:-2]),
)
def _potrf_batched(a):
"""Batched Cholesky decomposition.
Decompose a given array of two-dimensional square matrices into
``L * L.T``, where ``L`` is a lower-triangular matrix and ``.T``
is a conjugate transpose operator.
Args:
a (cupy.ndarray): The input array of matrices
with dimension ``(..., N, N)``
Returns:
cupy.ndarray: The lower-triangular matrix.
"""
if not check_availability('potrfBatched'):
raise RuntimeError('potrfBatched is not available')
dtype, out_dtype = _util.linalg_common_type(a)
if dtype == 'f':
potrfBatched = cusolver.spotrfBatched
elif dtype == 'd':
potrfBatched = cusolver.dpotrfBatched
elif dtype == 'F':
potrfBatched = cusolver.cpotrfBatched
else: # dtype == 'D':
potrfBatched = cusolver.zpotrfBatched
x = a.astype(dtype, order='C', copy=True)
xp = cupy._core._mat_ptrs(x)
n = x.shape[-1]
ldx = x.strides[-2] // x.dtype.itemsize
handle = device.get_cusolver_handle()
batch_size = internal.prod(x.shape[:-2])
dev_info = cupy.empty(batch_size, dtype=numpy.int32)
potrfBatched(
handle, cublas.CUBLAS_FILL_MODE_UPPER, n, xp.data.ptr, ldx,
dev_info.data.ptr, batch_size)
cupy.linalg._util._check_cusolver_dev_info_if_synchronization_allowed(
potrfBatched, dev_info)
return cupy.tril(x).astype(out_dtype, copy=False)
def cholesky(a):
"""Cholesky decomposition.
Decompose a given two-dimensional square matrix into ``L * L.T``,
where ``L`` is a lower-triangular matrix and ``.T`` is a conjugate
transpose operator.
Args:
a (cupy.ndarray): The input matrix with dimension ``(N, N)``
Returns:
cupy.ndarray: The lower-triangular matrix.
.. warning::
This function calls one or more cuSOLVER routine(s) which may yield
invalid results if input conditions are not met.
To detect these invalid results, you can set the `linalg`
configuration to a value that is not `ignore` in
:func:`cupyx.errstate` or :func:`cupyx.seterr`.
.. seealso:: :func:`numpy.linalg.cholesky`
"""
_util._assert_cupy_array(a)
_util._assert_nd_squareness(a)
if a.ndim > 2:
return _potrf_batched(a)
dtype, out_dtype = _util.linalg_common_type(a)
x = a.astype(dtype, order='C', copy=True)
n = len(a)
handle = device.get_cusolver_handle()
dev_info = cupy.empty(1, dtype=numpy.int32)
if dtype == 'f':
potrf = cusolver.spotrf
potrf_bufferSize = cusolver.spotrf_bufferSize
elif dtype == 'd':
potrf = cusolver.dpotrf
potrf_bufferSize = cusolver.dpotrf_bufferSize
elif dtype == 'F':
potrf = cusolver.cpotrf
potrf_bufferSize = cusolver.cpotrf_bufferSize
else: # dtype == 'D':
potrf = cusolver.zpotrf
potrf_bufferSize = cusolver.zpotrf_bufferSize
buffersize = potrf_bufferSize(
handle, cublas.CUBLAS_FILL_MODE_UPPER, n, x.data.ptr, n)
workspace = cupy.empty(buffersize, dtype=dtype)
potrf(
handle, cublas.CUBLAS_FILL_MODE_UPPER, n, x.data.ptr, n,
workspace.data.ptr, buffersize, dev_info.data.ptr)
cupy.linalg._util._check_cusolver_dev_info_if_synchronization_allowed(
potrf, dev_info)
_util._tril(x, k=0)
return x.astype(out_dtype, copy=False)
def qr(a, mode='reduced'):
"""QR decomposition.
Decompose a given two-dimensional matrix into ``Q * R``, where ``Q``
is an orthonormal and ``R`` is an upper-triangular matrix.
Args:
a (cupy.ndarray): The input matrix.
mode (str): The mode of decomposition. Currently 'reduced',
'complete', 'r', and 'raw' modes are supported. The default mode
is 'reduced', in which matrix ``A = (M, N)`` is decomposed into
``Q``, ``R`` with dimensions ``(M, K)``, ``(K, N)``, where
``K = min(M, N)``.
Returns:
cupy.ndarray, or tuple of ndarray:
Although the type of returned object depends on the mode,
it returns a tuple of ``(Q, R)`` by default.
For details, please see the document of :func:`numpy.linalg.qr`.
.. warning::
This function calls one or more cuSOLVER routine(s) which may yield
invalid results if input conditions are not met.
To detect these invalid results, you can set the `linalg`
configuration to a value that is not `ignore` in
:func:`cupyx.errstate` or :func:`cupyx.seterr`.
.. seealso:: :func:`numpy.linalg.qr`
"""
# TODO(Saito): Current implementation only accepts two-dimensional arrays
_util._assert_cupy_array(a)
_util._assert_rank2(a)
if mode not in ('reduced', 'complete', 'r', 'raw'):
if mode in ('f', 'full', 'e', 'economic'):
msg = 'The deprecated mode \'{}\' is not supported'.format(mode)
raise ValueError(msg)
else:
raise ValueError('Unrecognized mode \'{}\''.format(mode))
# support float32, float64, complex64, and complex128
dtype, out_dtype = _util.linalg_common_type(a)
if mode == 'raw':
# compatibility with numpy.linalg.qr
out_dtype = numpy.promote_types(out_dtype, 'd')
m, n = a.shape
mn = min(m, n)
if mn == 0:
if mode == 'reduced':
return cupy.empty((m, 0), out_dtype), cupy.empty((0, n), out_dtype)
elif mode == 'complete':
return cupy.identity(m, out_dtype), cupy.empty((m, n), out_dtype)
elif mode == 'r':
return cupy.empty((0, n), out_dtype)
else: # mode == 'raw'
return cupy.empty((n, m), out_dtype), cupy.empty((0,), out_dtype)
x = a.transpose().astype(dtype, order='C', copy=True)
handle = device.get_cusolver_handle()
dev_info = cupy.empty(1, dtype=numpy.int32)
if dtype == 'f':
geqrf_bufferSize = cusolver.sgeqrf_bufferSize
geqrf = cusolver.sgeqrf
elif dtype == 'd':
geqrf_bufferSize = cusolver.dgeqrf_bufferSize
geqrf = cusolver.dgeqrf
elif dtype == 'F':
geqrf_bufferSize = cusolver.cgeqrf_bufferSize
geqrf = cusolver.cgeqrf
elif dtype == 'D':
geqrf_bufferSize = cusolver.zgeqrf_bufferSize
geqrf = cusolver.zgeqrf
else:
msg = ('dtype must be float32, float64, complex64 or complex128'
' (actual: {})'.format(a.dtype))
raise ValueError(msg)
# compute working space of geqrf and solve R
buffersize = geqrf_bufferSize(handle, m, n, x.data.ptr, n)
workspace = cupy.empty(buffersize, dtype=dtype)
tau = cupy.empty(mn, dtype=dtype)
geqrf(handle, m, n, x.data.ptr, m,
tau.data.ptr, workspace.data.ptr, buffersize, dev_info.data.ptr)
cupy.linalg._util._check_cusolver_dev_info_if_synchronization_allowed(
geqrf, dev_info)
if mode == 'r':
r = x[:, :mn].transpose()
return _util._triu(r).astype(out_dtype, copy=False)
if mode == 'raw':
return (
x.astype(out_dtype, copy=False),
tau.astype(out_dtype, copy=False))
if mode == 'complete' and m > n:
mc = m
q = cupy.empty((m, m), dtype)
else:
mc = mn
q = cupy.empty((n, m), dtype)
q[:n] = x
# compute working space of orgqr and solve Q
if dtype == 'f':
orgqr_bufferSize = cusolver.sorgqr_bufferSize
orgqr = cusolver.sorgqr
elif dtype == 'd':
orgqr_bufferSize = cusolver.dorgqr_bufferSize
orgqr = cusolver.dorgqr
elif dtype == 'F':
orgqr_bufferSize = cusolver.cungqr_bufferSize
orgqr = cusolver.cungqr
elif dtype == 'D':
orgqr_bufferSize = cusolver.zungqr_bufferSize
orgqr = cusolver.zungqr
buffersize = orgqr_bufferSize(
handle, m, mc, mn, q.data.ptr, m, tau.data.ptr)
workspace = cupy.empty(buffersize, dtype=dtype)
orgqr(
handle, m, mc, mn, q.data.ptr, m, tau.data.ptr, workspace.data.ptr,
buffersize, dev_info.data.ptr)
cupy.linalg._util._check_cusolver_dev_info_if_synchronization_allowed(
orgqr, dev_info)
q = q[:mc].transpose()
r = x[:, :mc].transpose()
return (
q.astype(out_dtype, copy=False),
_util._triu(r).astype(out_dtype, copy=False))
def _svd_batched(a, full_matrices, compute_uv):
batch_shape = a.shape[:-2]
batch_size = internal.prod(batch_shape)
n, m = a.shape[-2:]
dtype, uv_dtype = _util.linalg_common_type(a)
s_dtype = uv_dtype.char.lower()
# first handle any 0-size inputs
if batch_size == 0:
k = min(m, n)
s = cupy.empty(batch_shape + (k,), s_dtype)
if compute_uv:
if full_matrices:
u = cupy.empty(batch_shape + (n, n), dtype=uv_dtype)
vt = cupy.empty(batch_shape + (m, m), dtype=uv_dtype)
else:
u = cupy.empty(batch_shape + (n, k), dtype=uv_dtype)
vt = cupy.empty(batch_shape + (k, m), dtype=uv_dtype)
return u, s, vt
else:
return s
elif m == 0 or n == 0:
s = cupy.empty(batch_shape + (0,), s_dtype)
if compute_uv:
if full_matrices:
u = cupy.empty(batch_shape + (n, n), dtype=uv_dtype)
u[...] = cupy.identity(n, dtype=uv_dtype)
vt = cupy.empty(batch_shape + (m, m), dtype=uv_dtype)
vt[...] = cupy.identity(m, dtype=uv_dtype)
else:
u = cupy.empty(batch_shape + (n, 0), dtype=uv_dtype)
vt = cupy.empty(batch_shape + (0, m), dtype=uv_dtype)
return u, s, vt
else:
return s
# ...then delegate real computation to cuSOLVER
a = a.reshape(-1, *(a.shape[-2:]))
if runtime.is_hip or (m <= 32 and n <= 32):
# copy is done in _gesvdj_batched, so let's try not to do it here
a = a.astype(dtype, order='C', copy=False)
out = _gesvdj_batched(a, full_matrices, compute_uv, False)
else:
# manually loop over cusolverDn<t>gesvd()
# copy (via possible type casting) is done in _gesvd_batched
# note: _gesvd_batched returns V, not V^H
out = _gesvd_batched(a, dtype.char, full_matrices, compute_uv, False)
if compute_uv:
u, s, v = out
u = u.astype(uv_dtype, copy=False)
u = u.reshape(*batch_shape, *(u.shape[-2:]))
s = s.astype(s_dtype, copy=False)
s = s.reshape(*batch_shape, *(s.shape[-1:]))
v = v.astype(uv_dtype, copy=False)
v = v.reshape(*batch_shape, *(v.shape[-2:]))
return u, s, v.swapaxes(-2, -1).conj()
else:
s = out
s = s.astype(s_dtype, copy=False)
s = s.reshape(*batch_shape, *(s.shape[-1:]))
return s
# TODO(leofang): support the hermitian keyword?
def svd(a, full_matrices=True, compute_uv=True):
"""Singular Value Decomposition.
Factorizes the matrix ``a`` as ``u * np.diag(s) * v``, where ``u`` and
``v`` are unitary and ``s`` is an one-dimensional array of ``a``'s
singular values.
Args:
a (cupy.ndarray): The input matrix with dimension ``(..., M, N)``.
full_matrices (bool): If True, it returns u and v with dimensions
``(..., M, M)`` and ``(..., N, N)``. Otherwise, the dimensions
of u and v are ``(..., M, K)`` and ``(..., K, N)``, respectively,
where ``K = min(M, N)``.
compute_uv (bool): If ``False``, it only returns singular values.
Returns:
tuple of :class:`cupy.ndarray`:
A tuple of ``(u, s, v)`` such that ``a = u * np.diag(s) * v``.
.. warning::
This function calls one or more cuSOLVER routine(s) which may yield
invalid results if input conditions are not met.
To detect these invalid results, you can set the `linalg`
configuration to a value that is not `ignore` in
:func:`cupyx.errstate` or :func:`cupyx.seterr`.
.. note::
On CUDA, when ``a.ndim > 2`` and the matrix dimensions <= 32, a fast
code path based on Jacobian method (``gesvdj``) is taken. Otherwise,
a QR method (``gesvd``) is used.
On ROCm, there is no such a fast code path that switches the underlying
algorithm.
.. seealso:: :func:`numpy.linalg.svd`
"""
_util._assert_cupy_array(a)
if a.ndim > 2:
return _svd_batched(a, full_matrices, compute_uv)
# Cast to float32 or float64
dtype, uv_dtype = _util.linalg_common_type(a)
real_dtype = dtype.char.lower()
s_dtype = uv_dtype.char.lower()
# Remark 1: gesvd only supports m >= n (WHAT?)
# Remark 2: gesvd returns matrix U and V^H
n, m = a.shape
if m == 0 or n == 0:
s = cupy.empty((0,), s_dtype)
if compute_uv:
if full_matrices:
u = cupy.eye(n, dtype=uv_dtype)
vt = cupy.eye(m, dtype=uv_dtype)
else:
u = cupy.empty((n, 0), dtype=uv_dtype)
vt = cupy.empty((0, m), dtype=uv_dtype)
return u, s, vt
else:
return s
# `a` must be copied because xgesvd destroys the matrix
if m >= n:
x = a.astype(dtype, order='C', copy=True)
trans_flag = False
else:
m, n = a.shape
x = a.transpose().astype(dtype, order='C', copy=True)
trans_flag = True
k = n # = min(m, n) where m >= n is ensured above
if compute_uv:
if full_matrices:
u = cupy.empty((m, m), dtype=dtype)
vt = x[:, :n]
job_u = ord('A')
job_vt = ord('O')
else:
u = x
vt = cupy.empty((k, n), dtype=dtype)
job_u = ord('O')
job_vt = ord('S')
u_ptr, vt_ptr = u.data.ptr, vt.data.ptr
else:
u_ptr, vt_ptr = 0, 0 # Use nullptr
job_u = ord('N')
job_vt = ord('N')
s = cupy.empty(k, dtype=real_dtype)
handle = device.get_cusolver_handle()
dev_info = cupy.empty(1, dtype=numpy.int32)
if dtype == 'f':
gesvd = cusolver.sgesvd
gesvd_bufferSize = cusolver.sgesvd_bufferSize
elif dtype == 'd':
gesvd = cusolver.dgesvd
gesvd_bufferSize = cusolver.dgesvd_bufferSize
elif dtype == 'F':
gesvd = cusolver.cgesvd
gesvd_bufferSize = cusolver.cgesvd_bufferSize
else: # dtype == 'D':
gesvd = cusolver.zgesvd
gesvd_bufferSize = cusolver.zgesvd_bufferSize
buffersize = gesvd_bufferSize(handle, m, n)
workspace = cupy.empty(buffersize, dtype=dtype)
if not runtime.is_hip:
# rwork can be NULL if the information from supperdiagonal isn't needed
# https://docs.nvidia.com/cuda/cusolver/index.html#cuSolverDN-lt-t-gt-gesvd # noqa
rwork_ptr = 0
else:
rwork = cupy.empty(min(m, n)-1, dtype=s_dtype)
rwork_ptr = rwork.data.ptr
gesvd(
handle, job_u, job_vt, m, n, x.data.ptr, m, s.data.ptr, u_ptr, m,
vt_ptr, n, workspace.data.ptr, buffersize, rwork_ptr,
dev_info.data.ptr)
cupy.linalg._util._check_cusolver_dev_info_if_synchronization_allowed(
gesvd, dev_info)
s = s.astype(s_dtype, copy=False)
# Note that the returned array may need to be transposed
# depending on the structure of an input
if compute_uv:
u = u.astype(uv_dtype, copy=False)
vt = vt.astype(uv_dtype, copy=False)
if trans_flag:
return u.transpose(), s, vt.transpose()
else:
return vt, s, u
else:
return s
|
py | b401ba24d56bc8eaafb9bfdbdc0351fa1854e6c5 | import os
class Phonebook:
def __init__(self, cachedir):
self.entries = {}
self.filename = 'phonebook.txt'
self.file_cache = open(os.path.join(str(cachedir), self.filename), 'w')
def add(self, name, number):
self.entries[name] = number
def lookup(self, name):
return self.entries[name]
def names(self):
return self.entries.keys()
def numbers(self):
return self.entries.values()
def clear(self):
self.entries = {}
self.file_cache.close()
os.remove(self.filename)
|
py | b401bbb586e9dcc68150f441af66f11073c82dfc | import logging
from typing import List, Dict, Set
from flask_jwt_extended import current_user
from rbac_builder import const as c
from ..base_manager import BaseManager
log = logging.getLogger(__name__)
class AbstractSecurityManager(BaseManager):
"""
Abstract SecurityManager class, declares all methods used by the
framework.
"""
def add_permissions_view(self, base_permissions, view_menu):
"""
Adds a permission on a views menu to the backend
:param base_permissions:
list of permissions from views (all exposed methods):
'add','edit' etc...
:param view_menu:
name of the views or menu to add
"""
raise NotImplementedError
def add_permissions_menu(self, view_menu_name):
"""
Adds menu_access to menu on permission_view_menu
:param view_menu_name:
The menu name
"""
raise NotImplementedError
def register_views(self):
"""
Generic function to create the security views
"""
raise NotImplementedError
def is_item_public(self, permission_name, view_name):
"""
Check if views has public permissions
:param permission_name:
the permission: show, edit...
:param view_name:
the name of the class views (child of BaseView)
"""
raise NotImplementedError
def has_access(self, permission_name, view_name):
"""
Check if current user or public has access to views or menu
"""
raise NotImplementedError
def security_cleanup(self, base_views, menus, sides):
raise NotImplementedError
class BaseSecurityManager(AbstractSecurityManager):
def __init__(self, rbac_builder):
super(BaseSecurityManager, self).__init__(rbac_builder)
app = self.rbac_builder.get_app
# Base Security Config
app.config.setdefault("AUTH_ROLE_ADMIN", "Super Admin")
app.config.setdefault("AUTH_ROLE_PUBLIC", "Public")
# Setup Flask-Jwt-Extended
self.jwt_manager = self.rbac_builder.get_jwt_manager
@property
def auth_role_admin(self):
return self.rbac_builder.get_app.config["AUTH_ROLE_ADMIN"]
@property
def auth_role_public(self):
return self.rbac_builder.get_app.config["AUTH_ROLE_PUBLIC"]
def create_db(self):
"""
Setups the DB, creates admin and public roles if they don't exist.
"""
self.add_role(self.auth_role_admin)
self.add_role(self.auth_role_public)
def register_views(self):
pass
"""
----------------------------------------
PERMISSION ACCESS CHECK
----------------------------------------
"""
def is_item_public(self, permission_name, view_name):
"""
Check if views has public permissions
:param permission_name:
the permission: can_show, can_edit...
:param view_name:
the name of the class views (child of BaseView)
"""
permissions = self.get_public_permissions()
if permissions:
for i in permissions:
if (view_name == i.view_menu.name) and (
permission_name == i.permission.name
):
return True
return False
else:
return False
def _has_view_access(
self, user, permission_name: str, view_name: str
) -> bool:
roles = user.roles
db_role_ids = list()
for role in roles:
db_role_ids.append(role.id)
# Check database-stored roles
return self.exist_permission_on_roles(
view_name,
permission_name,
db_role_ids,
)
def _get_user_permission_view_menus(
self,
user,
permission_name: str,
view_menus_name: List[str]
) -> Set[str]:
"""
Return a set of views menu names with a certain permission name
that a user has access to. Mainly used to fetch all menu permissions
on a single db call, will also check public permissions and builtin roles
"""
db_role_ids = list()
if user is None:
# include public role
roles = [self.get_public_role()]
else:
roles = user.roles
result = set()
for role in roles:
db_role_ids.append(role.id)
# Then check against database-stored roles
pvms_names = [
pvm.view_menu.name
for pvm in self.find_roles_permission_view_menus(permission_name, db_role_ids)
]
result.update(pvms_names)
return result
def _get_permission_view_menus_by_user(self, user, no_menu=True):
"""
Return a set of views menu that a user has access to. Mainly used to fetch all menu permissions
on a single db call, will also check public permissions and builtin roles
"""
db_role_ids = list()
if user is None:
# include public role
roles = [self.get_public_role()]
else:
roles = user.roles
for role in roles:
db_role_ids.append(role.id)
# Then check against database-stored roles
pvms = [
{
'id': pvm.id,
'action': pvm.permission.name,
'view': pvm.view_menu.name
}
for pvm in self.find_permission_view_by_roles(db_role_ids, no_menu)
]
return pvms
def has_access(self, permission_name, view_name):
"""
Check if current user or public has access to views or menu
"""
if current_user:
return self._has_view_access(current_user, permission_name, view_name)
else:
return self.is_item_public(permission_name, view_name)
def get_user_menu_access(self, menu_names: List[str] = None) -> Set[str]:
if current_user:
return self._get_user_permission_view_menus(
current_user, "menu_access", view_menus_name=menu_names)
else:
return self._get_user_permission_view_menus(
None, "menu_access", view_menus_name=menu_names)
def get_user_permission_view(self) -> List[dict]:
if current_user:
return self._get_permission_view_menus_by_user(current_user)
else:
return self._get_permission_view_menus_by_user(None)
def get_user_permission_view_menu(self) -> List[dict]:
if current_user:
return self._get_permission_view_menus_by_user(current_user, no_menu=False)
else:
return self._get_permission_view_menus_by_user(None, no_menu=False)
def add_permissions_view(self, base_permissions, view_menu):
"""
Adds a permission on a views menu to the backend
:param base_permissions:
list of permissions from views (all exposed methods):
'can_add','can_edit' etc...
:param view_menu:
name of the views or menu to add
"""
view_menu_db = self.add_view_menu(view_menu)
perm_views = self.find_permissions_view_menu(view_menu_db)
if not perm_views:
# No permissions yet on this views
for permission in base_permissions:
pv = self.add_permission_view_menu(permission, view_menu)
role_admin = self.find_role(self.auth_role_admin)
self.add_permission_role(role_admin, pv)
else:
# Permissions on this views exist but....
role_admin = self.find_role(self.auth_role_admin)
for permission in base_permissions:
# Check if base views permissions exist
if not self.exist_permission_on_views(perm_views, permission):
pv = self.add_permission_view_menu(permission, view_menu)
self.add_permission_role(role_admin, pv)
for perm_view in perm_views:
if perm_view.permission is None:
# Skip this perm_view, it has a null permission
continue
if perm_view.permission.name not in base_permissions:
# perm to delete
roles = self.get_all_roles()
perm = self.find_permission(perm_view.permission.name)
# del permission from all roles
for role in roles:
self.del_permission_role(role, perm_view)
self.del_permission_view_menu(perm_view.permission.name, view_menu)
elif perm_view not in role_admin.permissions:
# Role Admin must have all permissions
self.add_permission_role(role_admin, perm_view)
def add_permissions_menu(self, view_menu_name):
"""
Adds menu_access to menu on permission_view_menu
:param view_menu_name: The menu name
"""
self.add_view_menu(view_menu_name)
pv = self.find_permission_view_menu("menu_access", view_menu_name)
if not pv:
pv = self.add_permission_view_menu("menu_access", view_menu_name)
role_admin = self.find_role(self.auth_role_admin)
self.add_permission_role(role_admin, pv)
def security_cleanup(self, baseviews, menus, sides):
"""
Will cleanup all unused permissions from the database
:param baseviews: A list of BaseViews class
:param menus: Menu class
"""
viewsmenus = self.get_all_view_menu()
roles = self.get_all_roles()
for viewmenu in viewsmenus:
found = False
for baseview in baseviews:
if viewmenu.name == baseview.class_permission_name:
found = True
break
if menus.find(viewmenu.name):
found = True
if sides.find(viewmenu.name):
found = True
if not found:
permissions = self.find_permissions_view_menu(viewmenu)
for permission in permissions:
for role in roles:
self.del_permission_role(role, permission)
self.del_permission_view_menu(
permission.permission.name, viewmenu.name
)
self.del_view_menu(viewmenu.name)
self.security_converge(baseviews)
@staticmethod
def _get_new_old_permissions(baseview) -> Dict:
ret = dict()
for method_name, permission_name in baseview.method_permission_name.items():
old_permission_name = baseview.previous_method_permission_name.get(
method_name
)
# Actions do not get prefix when normally defined
if (hasattr(baseview, 'actions') and
baseview.actions.get(old_permission_name)):
permission_prefix = ''
else:
permission_prefix = c.PERMISSION_PREFIX
if old_permission_name:
if c.PERMISSION_PREFIX + permission_name not in ret:
ret[
c.PERMISSION_PREFIX + permission_name
] = {permission_prefix + old_permission_name, }
else:
ret[
c.PERMISSION_PREFIX + permission_name
].add(permission_prefix + old_permission_name)
return ret
@staticmethod
def _add_state_transition(
state_transition: Dict,
old_view_name: str,
old_perm_name: str,
view_name: str,
perm_name: str
) -> None:
old_pvm = state_transition['add'].get((old_view_name, old_perm_name))
if old_pvm:
state_transition['add'][(old_view_name, old_perm_name)].add(
(view_name, perm_name)
)
else:
state_transition['add'][(old_view_name, old_perm_name)] = {
(view_name, perm_name)
}
state_transition['del_role_pvm'].add((old_view_name, old_perm_name))
state_transition['del_views'].add(old_view_name)
state_transition['del_perms'].add(old_perm_name)
@staticmethod
def _update_del_transitions(state_transitions: Dict, baseviews: List) -> None:
"""
Mutates state_transitions, loop baseviews and prunes all
views and permissions that are not to delete because references
exist.
:param baseview:
:param state_transitions:
:return:
"""
for baseview in baseviews:
state_transitions['del_views'].discard(baseview.class_permission_name)
for permission in baseview.base_permissions:
state_transitions['del_role_pvm'].discard(
(
baseview.class_permission_name,
permission
)
)
state_transitions['del_perms'].discard(permission)
def create_state_transitions(self, baseviews: List) -> Dict:
"""
Creates a Dict with all the necessary vm/permission transitions
Dict: {
"add": {(<VM>, <PERM>): ((<VM>, PERM), ... )}
"del_role_pvm": ((<VM>, <PERM>), ...)
"del_views": (<VM>, ... )
"del_perms": (<PERM>, ... )
}
:param baseviews: List with all the registered BaseView, BaseApi
:param menus: List with all the menu entries
:return: Dict with state transitions
"""
state_transitions = {
'add': {},
'del_role_pvm': set(),
'del_views': set(),
'del_perms': set()
}
for baseview in baseviews:
add_all_flag = False
new_view_name = baseview.class_permission_name
permission_mapping = self._get_new_old_permissions(baseview)
if baseview.previous_class_permission_name:
old_view_name = baseview.previous_class_permission_name
add_all_flag = True
else:
new_view_name = baseview.class_permission_name
old_view_name = new_view_name
for new_perm_name in baseview.base_permissions:
if add_all_flag:
old_perm_names = permission_mapping.get(new_perm_name)
old_perm_names = old_perm_names or (new_perm_name,)
for old_perm_name in old_perm_names:
self._add_state_transition(
state_transitions,
old_view_name,
old_perm_name,
new_view_name,
new_perm_name
)
else:
old_perm_names = permission_mapping.get(new_perm_name) or set()
for old_perm_name in old_perm_names:
self._add_state_transition(
state_transitions,
old_view_name,
old_perm_name,
new_view_name,
new_perm_name
)
self._update_del_transitions(state_transitions, baseviews)
return state_transitions
def security_converge(self, baseviews: List, dry=False) -> Dict:
"""
Converges overridden permissions on all registered views/api
will compute all necessary operations from `class_permissions_name`,
`previous_class_permission_name`, method_permission_name`,
`previous_method_permission_name` class attributes.
:param baseviews: List of registered views/apis
:param menus: List of menu items
:param dry: If True will not change DB
:return: Dict with the necessary operations (state_transitions)
"""
state_transitions = self.create_state_transitions(baseviews)
if dry:
return state_transitions
if not state_transitions:
log.info("No state transitions found")
return dict()
log.debug(f"State transitions: {state_transitions}")
roles = self.get_all_roles()
for role in roles:
permissions = list(role.permissions)
for pvm in permissions:
new_pvm_states = state_transitions['add'].get(
(pvm.view_menu.name, pvm.permission.name)
)
if not new_pvm_states:
continue
for new_pvm_state in new_pvm_states:
new_pvm = self.add_permission_view_menu(
new_pvm_state[1], new_pvm_state[0]
)
self.add_permission_role(role, new_pvm)
if (pvm.view_menu.name, pvm.permission.name) in state_transitions[
'del_role_pvm'
]:
self.del_permission_role(role, pvm)
for pvm in state_transitions['del_role_pvm']:
self.del_permission_view_menu(pvm[1], pvm[0], cascade=False)
for view_name in state_transitions['del_views']:
self.del_view_menu(view_name)
for permission_name in state_transitions['del_perms']:
self.del_permission(permission_name)
return state_transitions
"""
---------------------------
INTERFACE ABSTRACT METHODS
---------------------------
"""
"""
----------------------
PRIMITIVES FOR ROLES
----------------------
"""
def find_role(self, name):
raise NotImplementedError
def find_role_by_id(self, pk):
raise NotImplementedError
def add_role(self, name):
raise NotImplementedError
def update_role(self, pk, name):
raise NotImplementedError
def get_all_roles(self):
raise NotImplementedError
def del_role(self, pk):
raise NotImplementedError
"""
----------------------------
PRIMITIVES FOR PERMISSIONS
----------------------------
"""
def get_public_role(self):
"""
returns all permissions from public role
"""
raise NotImplementedError
def get_public_permissions(self):
"""
returns all permissions from public role
"""
raise NotImplementedError
def find_permission(self, name):
"""
Finds and returns a Permission by name
"""
raise NotImplementedError
def find_roles_permission_view_menus(
self,
permission_name: str,
role_ids: List[int],
):
raise NotImplementedError
def find_permission_view_by_roles(
self,
role_ids: List[int],
):
raise NotImplementedError
def exist_permission_on_roles(
self,
view_name: str,
permission_name: str,
role_ids: List[int],
) -> bool:
"""
Finds and returns permission views for a group of roles
"""
raise NotImplementedError
def add_permission(self, name):
"""
Adds a permission to the backend, models permission
:param name:
name of the permission: 'can_add','can_edit' etc...
"""
raise NotImplementedError
def del_permission(self, name):
"""
Deletes a permission from the backend, models permission
:param name:
name of the permission: 'can_add','can_edit' etc...
"""
raise NotImplementedError
"""
----------------------
PRIMITIVES VIEW MENU
----------------------
"""
def find_view_menu(self, name):
"""
Finds and returns a ViewMenu by name
"""
raise NotImplementedError
def get_all_view_menu(self):
raise NotImplementedError
def add_view_menu(self, name):
"""
Adds a views or menu to the backend, models view_menu
param name:
name of the views menu to add
"""
raise NotImplementedError
def del_view_menu(self, name):
"""
Deletes a ViewMenu from the backend
:param name:
name of the ViewMenu
"""
raise NotImplementedError
"""
----------------------
PERMISSION VIEW MENU
----------------------
"""
def find_permission_view_menu(self, permission_name, view_menu_name):
"""
Finds and returns a PermissionView by names
"""
raise NotImplementedError
def find_permission_view_menu_by_id(self, pk):
"""
Finds and returns a PermissionView by names
"""
raise NotImplementedError
def find_permissions_view_menu(self, view_menu):
"""
Finds all permissions from ViewMenu, returns list of PermissionView
:param view_menu: ViewMenu object
:return: list of PermissionView objects
"""
raise NotImplementedError
def add_permission_view_menu(self, permission_name, view_menu_name):
"""
Adds a permission on a views or menu to the backend
:param permission_name:
name of the permission to add: 'can_add','can_edit' etc...
:param view_menu_name:
name of the views menu to add
"""
raise NotImplementedError
def del_permission_view_menu(self, permission_name, view_menu_name, cascade=True):
raise NotImplementedError
def exist_permission_on_views(self, lst, item):
raise NotImplementedError
def exist_permission_on_view(self, lst, permission, view_menu):
raise NotImplementedError
def add_permission_role(self, role, perm_view):
"""
Add permission-ViewMenu object to Role
:param role:
The role object
:param perm_view:
The PermissionViewMenu object
"""
raise NotImplementedError
def del_permission_role(self, role, perm_view):
"""
Remove permission-ViewMenu object to Role
:param role:
The role object
:param perm_view:
The PermissionViewMenu object
"""
raise NotImplementedError
def update_permission_role(self, role, perm_views):
"""
Remove permission-ViewMenu object to Role
:param role:
The role object
:param perm_view:
The PermissionViewMenu object
"""
raise NotImplementedError
|
py | b401bc909369fe62879e3283f096ebbf06d9bf87 | if __name__ == '__main__':
def modpow(a, n, mod):
res = 1
while n > 0:
if n & 1:
res = res * a % mod
a = a * a % mod
n >>= 1
return res
def modinv(a: int, m: int) -> int:
return modpow(a, m - 2, m)
a1, b1, x1 = [0, 5, 1]
counter = 0
MOD = 10 ** 9 + 7
b1_inv = modinv(x1, MOD)
for i in range(a1, b1 + 1):
i_i = i % MOD
hogehoge = (i_i * b1_inv)
hogehoge_mod = hogehoge % MOD
wari = hogehoge_mod
# wari = ((i_i * b1_inv) % MOD) % x1
if wari == 0:
counter += 1
print(counter)
|
py | b401bcc715f5b7226bb4e74363859e385241ea27 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class ClusterParameterGroup(pulumi.CustomResource):
arn: pulumi.Output[str]
"""
The ARN of the neptune cluster parameter group.
"""
description: pulumi.Output[str]
"""
The description of the neptune cluster parameter group. Defaults to "Managed by Pulumi".
"""
family: pulumi.Output[str]
"""
The family of the neptune cluster parameter group.
"""
name: pulumi.Output[str]
"""
The name of the neptune parameter.
"""
name_prefix: pulumi.Output[str]
"""
Creates a unique name beginning with the specified prefix. Conflicts with `name`.
"""
parameters: pulumi.Output[list]
"""
A list of neptune parameters to apply.
* `applyMethod` (`str`) - Valid values are `immediate` and `pending-reboot`. Defaults to `pending-reboot`.
* `name` (`str`) - The name of the neptune parameter.
* `value` (`str`) - The value of the neptune parameter.
"""
tags: pulumi.Output[dict]
"""
A map of tags to assign to the resource.
"""
def __init__(__self__, resource_name, opts=None, description=None, family=None, name=None, name_prefix=None, parameters=None, tags=None, __props__=None, __name__=None, __opts__=None):
"""
Manages a Neptune Cluster Parameter Group
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.neptune.ClusterParameterGroup("example",
description="neptune cluster parameter group",
family="neptune1",
parameters=[{
"name": "neptune_enable_audit_log",
"value": 1,
}])
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: The description of the neptune cluster parameter group. Defaults to "Managed by Pulumi".
:param pulumi.Input[str] family: The family of the neptune cluster parameter group.
:param pulumi.Input[str] name: The name of the neptune parameter.
:param pulumi.Input[str] name_prefix: Creates a unique name beginning with the specified prefix. Conflicts with `name`.
:param pulumi.Input[list] parameters: A list of neptune parameters to apply.
:param pulumi.Input[dict] tags: A map of tags to assign to the resource.
The **parameters** object supports the following:
* `applyMethod` (`pulumi.Input[str]`) - Valid values are `immediate` and `pending-reboot`. Defaults to `pending-reboot`.
* `name` (`pulumi.Input[str]`) - The name of the neptune parameter.
* `value` (`pulumi.Input[str]`) - The value of the neptune parameter.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['description'] = description
if family is None:
raise TypeError("Missing required property 'family'")
__props__['family'] = family
__props__['name'] = name
__props__['name_prefix'] = name_prefix
__props__['parameters'] = parameters
__props__['tags'] = tags
__props__['arn'] = None
super(ClusterParameterGroup, __self__).__init__(
'aws:neptune/clusterParameterGroup:ClusterParameterGroup',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, arn=None, description=None, family=None, name=None, name_prefix=None, parameters=None, tags=None):
"""
Get an existing ClusterParameterGroup resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: The ARN of the neptune cluster parameter group.
:param pulumi.Input[str] description: The description of the neptune cluster parameter group. Defaults to "Managed by Pulumi".
:param pulumi.Input[str] family: The family of the neptune cluster parameter group.
:param pulumi.Input[str] name: The name of the neptune parameter.
:param pulumi.Input[str] name_prefix: Creates a unique name beginning with the specified prefix. Conflicts with `name`.
:param pulumi.Input[list] parameters: A list of neptune parameters to apply.
:param pulumi.Input[dict] tags: A map of tags to assign to the resource.
The **parameters** object supports the following:
* `applyMethod` (`pulumi.Input[str]`) - Valid values are `immediate` and `pending-reboot`. Defaults to `pending-reboot`.
* `name` (`pulumi.Input[str]`) - The name of the neptune parameter.
* `value` (`pulumi.Input[str]`) - The value of the neptune parameter.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["arn"] = arn
__props__["description"] = description
__props__["family"] = family
__props__["name"] = name
__props__["name_prefix"] = name_prefix
__props__["parameters"] = parameters
__props__["tags"] = tags
return ClusterParameterGroup(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
py | b401bd294898c3c6f2cdc4bdfec8be8e2a7ce7f2 | import streamlit as st
from pathlib import Path
import base64
from modules.toc import *
# Initial page config
page_title ='Postgres Cheatsheet for Python'
# st.set_page_config(
# page_title='Postgres Cheatsheet for Python',
# layout="wide",
# # initial_sidebar_state="expanded",
# )
def img_to_bytes(img_path):
img_bytes = Path(img_path).read_bytes()
encoded = base64.b64encode(img_bytes).decode()
return encoded
##########################
# Main body of cheat sheet
##########################
def cs_body():
page_title='Postgres for Python Cheatsheet',
col1, col2= st.columns(2)
col1.subheader('Getting Started')
col1.markdown('''MACOSX: if you used brew install
**Start, Stop, Restart, Login**
```Bash
# START, STOP, RESTART postgres
brew services start postgres
pg_ctl -D /opt/homebrew/var/postgres start
brew services stop postgres
brew services restart postgres
# when starting for a new database
pqsl postgres
psql postgres -U myuser
# Login to Postgres database
# enters into postgres command line
psql <database>
# POSTGRES login and DB permissions
CREATE ROLE myuser WITH LOGIN;
ALTER ROLE myuser CREATEDB;
# in .env file for NodeJS
PG_CONNECTION_STRING=postgres://myuser@localhost/mydatabase
```
Commands work after logging into postgres
Prompt should be postgres=#
''')
# Display data
col1.subheader('Creating a Table')
col1.markdown('''
``` Bash
mydb=# CREATE TABLE users (
id BIGSERIAL PRIMARY KEY,
firstName VARCHAR(200) NOT NULL,
middleName VARCHAR(200) DEFAULT NULL,
lastName VARCHAR(200) DEFAULT NULL
);
# Another Convetnsion
CREATE TABLE Student (
roll INT,
student_name VARCHAR,
course VARCHAR,
PRIMARY KEY(roll)
);
```
``` python
# Get DB hostname
SELECT boot_val, reset_val
FROM pg_settings
WHERE name = 'listen_addresses';
# Get Ports
SELECT *
FROM pg_settings
WHERE name = 'port';
# FROM BASH GET POSTGRES PORT
sudo netstat -plunt | grep postgres
# changing password for user
# log into postgres then
cd /data
psql postgres postgres
\password <user>
```
''')
# Control flow
col2.subheader('Control flow')
col2.code('''
st.stop()
''')
# Lay out your app
col2.subheader('Lay out your app')
col2.code('''
st.form('my_form_identifier')
st.form_submit_button('Submit to me')
st.container()
st.columns(spec)
>>> col1, col2 = st.columns(2)
>>> col1.subheader('Columnisation')
st.expander('Expander')
>>> with st.expander('Expand'):
>>> st.write('Juicy deets')
''')
col2.write('Batch widgets together in a form:')
col2.code('''
>>> with st.form(key='my_form'):
>>> text_input = st.text_input(label='Enter some text')
>>> submit_button = st.form_submit_button(label='Submit')
''')
# Display code
col2.subheader('Display code')
col2.code('''
st.echo()
>>> with st.echo():
>>> st.write('Code will be executed and printed')
''')
# Display progress and status
col2.subheader('Display progress and status')
col2.code('''
st.progress(progress_variable_1_to_100)
st.spinner()
>>> with st.spinner(text='In progress'):
>>> time.sleep(5)
>>> st.success('Done')
st.balloons()
st.error('Error message')
st.warning('Warning message')
st.info('Info message')
st.success('Success message')
st.exception(e)
''')
# Placeholders, help, and options
col2.subheader('Placeholders, help, and options')
col2.code('''
st.empty()
>>> my_placeholder = st.empty()
>>> my_placeholder.text('Replaced!')
st.help(pandas.DataFrame)
st.get_option(key)
st.set_option(key, value)
st.set_page_config(layout='wide')
''')
# Mutate data
col2.subheader('Mutate data')
col2.code('''
DeltaGenerator.add_rows(data)
>>> my_table = st.table(df1)
>>> my_table.add_rows(df2)
>>> my_chart = st.line_chart(df1)
>>> my_chart.add_rows(df2)
''')
# Optimize performance
col2.subheader('Optimize performance')
col2.code('''
@st.cache
>>> @st.cache
... def fetch_and_clean_data(url):
... # Mutate data at url
... return data
>>> # Executes d1 as first time
>>> d1 = fetch_and_clean_data(ref1)
>>> # Does not execute d1; returns cached value, d1==d2
>>> d2 = fetch_and_clean_data(ref1)
>>> # Different arg, so function d1 executes
>>> d3 = fetch_and_clean_data(ref2)
''')
col2.subheader('Other key parts of the API')
col2.markdown('''
<small>[State API](https://docs.streamlit.io/en/stable/session_state_api.html)</small><br>
<small>[Theme option reference](https://docs.streamlit.io/en/stable/theme_options.html)</small><br>
<small>[Components API reference](https://docs.streamlit.io/en/stable/develop_streamlit_components.html)</small><br>
<small>[API cheat sheet](https://share.streamlit.io/daniellewisdl/streamlit-cheat-sheet/app.py)</small><br>
''', unsafe_allow_html=True)
st.subheader("PSQL CLI Commands")
st.markdown('''
Commands work after logging into postgres
Prompt should be postgres=#
| **Command** | **Description** | **Additional Information** |
| ------------------------------------------------ | ------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- |
| psql -d database -U user -W | Connects to a database under a specific user | \-d: used to state the database name <br>-U:used to state the database user |
| psql -h host -d database -U user -W | Connect to a database that resides on another host | \-h: used to state the host <br>-d: used to state the database name <br>-U:used to state the database user |
| psql -U user -h host "dbname=db sslmode=require" | Use SSL mode for the connection | \-h: used to state the host <br>-U:used to state the database user |
| \c <dbname> | Switch connection to a new database | |
| CREATE DATABASE <name> | Create a database | |
| \l | List available databases | |
| \d or \d+ | List all tables in database | |
| \dt or \dt+ | List available tables | |
| \d table_name | Describe a table such as a column, type, modifiers of columns, etc. | |
| \dn | List all schemes of the currently connected database | |
| \df | List available functions in the current database | |
| \dv | List available views in the current database | |
| \du | List all users and their assign roles | |
| SELECT version(); | Retrieve the current version of PostgreSQL server | |
| \g | Execute the last command again | |
| \s | Display command history | |
| \s filename | Save the command history to a file | |
| \i filename | Execute psql commands from a file | |
| ? | Know all available psql commands | |
| \h | Get help | Eg:to get detailed information on ALTER TABLE statement use the \h ALTER TABLE |
| \e | Edit command in your own editor | |
| \ a | Switch from aligned to non-aligned column output | |
| \H | Switch the output to HTML format | |
| \q | Exit psql shell | |
| select pg_gethostname(); | PG Hostname | *BROKEN* |
| \ x | show query out put in pretty format | NOTE: Escape sequence for streamlit |
''')
# def main():
def app():
# cs_sidebar()
cs_body()
return None
|
py | b401be1c0bc6f0f468e74489767c0b04eb7ab16a | # Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
import re
from pymongo import MongoClient
from pymongo.collection import Collection
digits = re.compile("\\D")
class JobparserPipeline:
def __init__(self):
client = MongoClient('localhost', 27017)
self.mongo_base = client.lesson6
def process_item(self, item, spider):
salary = item['salary']
item["salary"] = None
if len(salary)>2 and salary[2] == '—':
item['min_salary'] = int(digits.sub("", salary[0]))
item['max_salary'] = int(digits.sub("", salary[4]))
item['curency'] = salary[-3].strip()
else:
if salary[0].strip() == "от" and salary[1] != "\xa0":
item['min_salary'] = int(digits.sub("",salary[1]))
item['curency'] = salary[-2].strip()
item['salary'] = salary[-1].strip()
salary = salary[2:]
elif salary[0].strip() == "от":
salary_split = salary[2].split("\xa0")
item["curency"] = salary_split[-1]
salary_split = salary_split[:-1]
item["min_salary"] = int(''.join(salary_split))
if salary[0].strip() == "до" and salary[1] != "\xa0":
item['max_salary'] = int(digits.sub("",salary[1]))
item['curency'] = salary[-2].strip()
item['salary'] = salary[-1].strip()
elif salary[0].strip() == "до":
salary_split = salary[2].split("\xa0")
item["curency"] = salary_split[-1]
salary_split = salary_split[:-1]
item["max_salary"] = int(''.join(salary_split))
collection = self.mongo_base[spider.name] # type: Collection
collection.update_one({"url": item["url"]}, [{"$replaceWith": item}], upsert=True)
return item
|
py | b401bf000d671df3bb1171cbb000012bf77cecd4 | import discord
from discord.ext import tasks
import random
from redbot.core import commands, Config, checks
from github import Github, GithubException
from redbot.core.utils.predicates import MessagePredicate
import asyncio
from datetime import datetime, timedelta
class Suggestions(commands.Cog):
"""
Rotom Suggestion Bot
"""
def __init__(self, bot):
self.bot = bot
self.config = Config.get_conf(self, identifier=192153481165930496, force_registration=True)
default_guild = {"tag": "", "channel": 0, "posts": []}
default_global = {"repo": "", "issue": 0}
self.config.register_guild(**default_guild)
self.config.register_global(**default_global)
self.labels = {
"lab": 429381405840244767,
"lew/fm/hv": 331635573271822338,
"harvest": 535612750924218368,
}
self.post_suggest.start()
def cog_unload(self):
self.post_suggest.cancel()
@checks.is_owner()
@commands.group()
async def suggestset(self, ctx):
"""Configure Suggestion settings"""
pass
@suggestset.command()
async def repo(self, ctx, value: str = None):
"""Set/Show the repo to fetch the suggestions from (global setting)"""
if value is None:
rep = await self.config.repo()
await ctx.send(f"Current repo: {rep}")
else:
await self.config.repo.set(value)
await ctx.message.add_reaction("\N{WHITE HEAVY CHECK MARK}")
@suggestset.command()
async def label(self, ctx, value: str = None):
"""Set/Show the issue label for this guild"""
if value is None:
tag = await self.config.guild(ctx.guild).tag()
await ctx.send(f"Current repo: {tag}")
else:
await self.config.guild(ctx.guild).tag.set(value)
await ctx.message.add_reaction("\N{WHITE HEAVY CHECK MARK}")
@suggestset.command()
async def channel(self, ctx, value: str = None):
"""Set/Show the channel for this guild"""
if value is None:
chan = await self.config.guild(ctx.guild).channel()
chans = ctx.guild.get_channel(chan)
await ctx.send(f"Current channel: {chans.name} ({chan})")
else:
await self.config.guild(ctx.guild).channel.set(value)
await ctx.message.add_reaction("\N{WHITE HEAVY CHECK MARK}")
@checks.admin()
@commands.command()
async def suggest(self, ctx, num: int):
"""
Get specific Suggestion
"""
git = await self.bot.get_shared_api_tokens("github")
gitrepo = await self.config.repo()
g = Github(git.get("token"))
repo = g.get_repo(gitrepo)
issue = repo.get_issue(num)
guilds = await self.config.all_guilds()
for label in issue.labels:
for id, data in guilds.items():
if id == ctx.guild.id and label.name == data["tag"] and data["channel"] != 0:
embed = discord.Embed(
title=issue.title, colour=discord.Colour(0xA80387), description=issue.body
)
embed.add_field(
name="__________\nHow to Vote",
value="Simply React to this message to cast your vote\n 👍 for Yes | 👎 for No",
)
chan = self.bot.get_guild(id).get_channel(data["channel"])
msg = await chan.send(embed=embed)
await msg.add_reaction("👍")
await msg.add_reaction("👎")
else:
await ctx.send(
f"That suggestion is not for this guild | {label} | {id} | {data}"
)
@tasks.loop(hours=48.0)
async def post_suggest(self):
num = await self.config.issue()
git = await self.bot.get_shared_api_tokens("github")
gitrepo = await self.config.repo()
g = Github(git.get("token"))
repo = g.get_repo(gitrepo)
try:
issue = repo.get_issue(num + 1)
except GithubException:
pass
else:
guilds = await self.config.all_guilds()
for label in issue.labels:
for id, data in guilds.items():
if label.name == data["tag"] and data["channel"] != 0:
embed = discord.Embed(
title=issue.title,
colour=discord.Colour(0xA80387),
description=issue.body,
)
embed.add_field(
name="__________\nHow to Vote",
value="Simply React to this message to cast your vote\n 👍 for Yes | 👎 for No",
)
chan = self.bot.get_guild(int(id)).get_channel(int(data["channel"]))
msg = await chan.send(embed=embed)
await msg.add_reaction("👍")
await msg.add_reaction("👎")
async with self.config.guild(id).posts() as posts:
posts.append([issue.number, msg.id, datetime.utcnow()])
await self.config.issue.set(num + 1)
@post_suggest.before_loop
async def before_post_suggest(self):
await self.bot.wait_until_ready()
@tasks.loop(hours=24.0)
async def end_suggest(self):
git = await self.bot.get_shared_api_tokens("github")
gitrepo = await self.config.repo()
g = Github(git.get("token"))
repo = g.get_repo(gitrepo)
guilds = await self.config.all_guilds()
for id, data in guilds.items():
for msg, post in data["posts"]:
pass
@end_suggest.before_loop
async def before_end_suggest(self):
await self.bot.wait_until_ready()
|
py | b401c011f8d374b84830bb16fa802a5ddcb0784f | from six import python_2_unicode_compatible
from .base import QuickbooksBaseObject, QuickbooksTransactionEntity, QuickbooksUpdateOnlyObject # CustomField, Ref
class EmailMessageType(QuickbooksBaseObject):
def __init__(self):
super().__init__()
self.Message = ""
self.Subject = ""
class EmailMessagesPrefs(QuickbooksBaseObject):
class_dict = {
"InvoiceMessage": EmailMessageType,
"EstimateMessage": EmailMessageType,
"SalesReceiptMessage": EmailMessageType,
"StatementMessage": EmailMessageType,
}
def __init__(self):
super().__init__()
self.InvoiceMessage = None
self.EstimateMessage = None
self.SalesReceiptMessage = None
self.StatementMessage = None
class ProductAndServicesPrefs(QuickbooksBaseObject):
def __init__(self):
super().__init__()
self.QuantityWithPriceAndRate = True
self.ForPurchase = True
self.QuantityOnHand = True
self.ForSales = True
class ReportPrefs(QuickbooksBaseObject):
def __init__(self):
super().__init__()
self.ReportBasis = "Accrual" # or "Cash"
self.CalcAgingReportFromTxnDate = False # read only
class AccountingInfoPrefs(QuickbooksBaseObject):
def __init__(self):
super().__init__()
self.FirstMonthOfFiscalYear = "January" # read only
self.UseAccountNumbers = True # read only
self.TaxYearMonth = "January" # read only
self.ClassTrackingPerTxn = False
self.TrackDepartments = False
self.TaxForm = "6"
# Possible values include: Clients, Customers, Donors, Guests, Members, Patients, Tenants.
self.CustomerTerminology = "" # Customers
self.BookCloseDate = "" # e.g. "2018-12-31"
# Possible values include: Business, Department, Division, Location, Property, Store, Territory
self.DepartmentTerminology = "" # Location
self.ClassTrackingPerTxnLine = True
class ClassTrackingPerTxnLine(QuickbooksBaseObject):
def __init__(self):
super().__init__()
self.ReportBasis = "Accrual" # or "Cash"
self.CalcAgingReportFromTxnDate = False # read only
class SalesFormsPrefs(QuickbooksBaseObject):
class_dict = {
# 'DefaultTerms': Ref, # FIXME: serialize field properly, not as JSON
}
list_dict = {
# 'CustomField': CustomField, # FIXME: serialize field properly, not as JSON
}
detail_dict = {
# 'CustomField': CustomField, # FIXME: serialize field properly, not as JSON
}
def __init__(self):
super().__init__()
self.ETransactionPaymentEnabled = False
self.CustomTxnNumbers = False
self.AllowShipping = False
self.AllowServiceDate = False
self.ETransactionEnabledStatus = "" # e.g. "NotApplicable"
self.DefaultCustomerMessage = "" # e.g. "Thank you for your business and have a great day!"
self.EmailCopyToCompany = False
self.AllowEstimates = True
self.DefaultTerms = None
self.AllowDiscount = True
self.DefaultDiscountAccount = ""
self.AllowDeposit = True
self.AutoApplyPayments = True
self.IPNSupportEnabled = False
self.AutoApplyCredit = True
self.CustomField = None
self.UsingPriceLevels = False
self.ETransactionAttachPDF = False
class VendorAndPurchasesPrefs(QuickbooksBaseObject):
class_dict = {}
list_dict = {
# 'POCustomField': CustomField, # FIXME: serialize field properly, not as JSON
}
detail_dict = {
# 'POCustomField': CustomField, # FIXME: serialize field properly, not as JSON
}
def __init__(self):
super().__init__()
self.BillableExpenseTracking = True
self.TrackingByCustomer = True
self.POCustomField = None
class TaxPrefs(QuickbooksBaseObject):
class_dict = {
# 'TaxGroupCodeRef': Ref, # FIXME: serialize field properly, not as JSON
}
def __init__(self):
super().__init__()
self.TaxGroupCodeRef = None
self.UsingSalesTax = True
class OtherPrefs(QuickbooksBaseObject):
def __init__(self):
super().__init__()
class TimeTrackingPrefs(QuickbooksBaseObject):
def __init__(self):
super().__init__()
self.WorkWeekStartDate = "" # e.g. "Monday"
self.MarkTimeEntriesBillable = True
self.ShowBillRateToAll = False
self.UseServices = True
self.BillCustomers = True
class CurrencyPrefs(QuickbooksBaseObject):
class_dict = {
# 'HomeCurrency': Ref, # FIXME: serialize field properly, not as JSON
}
def __init__(self):
super().__init__()
self.HomeCurrency = None
@python_2_unicode_compatible
class Preferences(QuickbooksUpdateOnlyObject, QuickbooksTransactionEntity):
"""
QBO definition: The Preferences resource represents a set of company preferences that
control application behavior in QuickBooks Online.
They are mostly exposed as read-only through the Preferences endpoint with only a very small subset of them
available as writable. Preferences are not necessarily honored when making requests via the QuickBooks API
because a lot of them control UI behavior in the application and may not be applicable for apps.
"""
class_dict = {
'EmailMessagesPrefs': EmailMessagesPrefs,
'ProductAndServicesPrefs': ProductAndServicesPrefs,
'ReportPrefs': ReportPrefs,
'AccountingInfoPrefs': AccountingInfoPrefs,
'SalesFormsPrefs': SalesFormsPrefs,
'VendorAndPurchasesPrefs': VendorAndPurchasesPrefs,
'TaxPrefs': TaxPrefs,
'OtherPrefs': OtherPrefs,
'TimeTrackingPrefs': TimeTrackingPrefs,
'CurrencyPrefs': CurrencyPrefs,
}
qbo_object_name = "Preferences"
def __init__(self):
super().__init__()
self.EmailMessagesPrefs = None
self.ProductAndServicesPrefs = None
self.ReportPrefs = None
self.AccountingInfoPrefs = None
self.SalesFormsPrefs = None
self.VendorAndPurchasesPrefs = None
self.TaxPrefs = None
self.OtherPrefs = None
self.TimeTrackingPrefs = None
self.CurrencyPrefs = None
def __str__(self):
return 'Preferences {0}'.format(self.Id)
|
py | b401c0ce5809309604c28796df641dca80f74964 | import os,copy,shutil,subprocess,yaml
from collections import OrderedDict
import pypospack.crystal as crystal
import pypospack.io.vasp as vasp
import pypospack.potential as potential
from pypospack.task.gulp import GulpSimulationError, GulpSimulation
class GulpPhononCalculation(GulpSimulation):
def __init__(
self,
task_name,
task_directory,
task_type,
structure_filename='POSCAR',
restart=False
):
self.shrink=[8,8,8]
self.kpoints=[10,10,10]
# initialize the parent class
GulpSimulation.__init__(self,
task_name=task_name,
task_directory=task_directory,
task_type=task_type,
structure_filename=structure_filename,
)
def write_gulp_input_file(self,filename=None,structure_filename=None):
"""
Args:
filename (str): location to write the gulp input file.
poscar (str): location of poscar file for structure to read.
"""
if filename is not None:
self.gulp_input_filename = filename
if structure_filename is not None:
self.structure_filename=structure_filename
str_out = "opti conp prop phon eigen\n"
str_out += self.get_gulpinputfile_structuresection()
str_out += self.get_gulpinputfile_potentialsection()
str_out += self.get_gulpinputfile_phononsection()
gulp_input_filename = os.path.join(
self.task_directory,
self.gulp_input_filename)
with open(gulp_input_filename,'w') as f:
f.write(str_out)
def get_conditions_post(self):
GulpSimulation.get_conditions_post(self)
self.conditions_POST['is_freq_file_exists'] \
= os.path.isfile(os.path.join(
self.task_directory,
'freq.gulp'))
self.conditions_POST['is_dens_file_exists'] \
= os.path.isfile(os.path.join(
self.task_directory,
'phonon.gulp.dens'))
def get_gulpinputfile_phononsection(self):
str_phonon = (
"shrink\n"
"{shrink1} {shrink2} {shrink3}\n"
"kpoints\n"
"{k1} {k2} {k3}\n"
"output freq text freq.gulp 12 \n"
"output phonon text phonon.gulp\n"
"output osc test phonon.osc\n"
).format(
shrink1=self.shrink[0],
shrink2=self.shrink[1],
shrink3=self.shrink[2],
k1=self.kpoints[0],
k2=self.kpoints[1],
k3=self.kpoints[2])
return str_phonon
class GulpGammaPointPhonons(GulpSimulation):
def __init__(
self,
task_name,
task_directory,
structure_filename='POSCAR',
restart=False,
debug=False
):
"""
Args:
debug(bool): by default set to false, if set to True outputs debug
information to standard out
"""
_task_type = 'gulp_gamma_phonons'
# initialize the base class
GulpSimulation.__init__(
self,
task_name=task_name,
task_directory=task_directory,
task_type=_task_type,
structure_filename=structure_filename,
restart=restart)
# set additional attributes
self.is_debug = debug
def write_gulp_input_file(self,filename=None,structure_filename=None):
"""
Args:
filename (str): location to write the gulp input file.
poscar (str): location of poscar file for structure to read.
"""
if filename is not None:
self.gulp_input_filename = filename
if structure_filename is not None:
self.structure_filename=structure_filename
str_out = "opti conp prop phon eigen\n"
str_out += self.get_gulpinputfile_structuresection()
str_out += self.get_gulpinputfile_potentialsection()
str_out += self.get_gulpinputfile_phononsection()
gulp_input_filename = os.path.join(
self.task_directory,
self.gulp_input_filename)
with open(gulp_input_filename,'w') as f:
f.write(str_out)
def get_gulpinputfile_phononsection(self):
str_phonon = (
"output freq text freq.gulp\n"
"output phonon text phonon.gulp\n"
"output osc test phonon.osc\n"
)
return str_phonon
def on_post(self,output_fn=None):
self.results = OrderedDict()
# set filename
if output_fn is None:
# default behavior
_fn = os.path.join(self.task_directory,'freq.gulp')
else:
# override
_fn = output_fn
#read the filename
with open(_fn,'r') as _f:
_lines = _f.readlines()
freqs = [float(line) for line in lines]
for idx,freq in enumerate(freqs):
key = "{}.freq.{}".format(self.task_name,idx+1)
self.results[key] = freq
_results_filename = self.results_filename
with open(_results_filename,'w') as f:
yaml.dump(self.results,f,default_flow_style=True)
self.update_status()
if self.is_fullauto:
self.on_update_status()
def on_finished(self):
self.cleanup()
def cleanup(self):
print(self.results_filename)
if os.path.exists(self.task_directory):
shutil.rmtree(self.task_directory)
if os.path.exists(self.results_filename):
os.remove(self.results_filename)
|
py | b401c19028e330d212b7f933e49ac9729419f684 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pandas
from pandas.io.common import _infer_compression
import s3fs as S3FS
import inspect
import os
import py
import ray
import re
import numpy as np
import math
from modin.error_message import ErrorMessage
from modin.engines.base.io import BaseIO
PQ_INDEX_REGEX = re.compile("__index_level_\d+__") # noqa W605
S3_ADDRESS_REGEX = re.compile("s3://(.*?)/(.*)")
s3fs = S3FS.S3FileSystem(anon=False)
def file_exists(file_path):
if isinstance(file_path, str):
match = S3_ADDRESS_REGEX.search(file_path)
if match:
s3fs.exists(file_path)
return os.path.exists(file_path)
def open_file(file_path, mode="rb"):
if isinstance(file_path, str):
match = S3_ADDRESS_REGEX.search(file_path)
if match:
return s3fs.open(file_path, mode=mode)
return open(file_path, mode=mode)
@ray.remote
def get_index(index_name, *partition_indices): # pragma: no cover
"""Get the index from the indices returned by the workers.
Note: Ray functions are not detected by codecov (thus pragma: no cover)"""
index = partition_indices[0].append(partition_indices[1:])
index.names = index_name
return index
class RayIO(BaseIO):
frame_mgr_cls = None
frame_partition_cls = None
query_compiler_cls = None
# IMPORTANT NOTE
#
# Specify these in the child classes to extend the functionality from this class.
# The tasks must return a very specific set of objects in the correct order to be
# correct. The following must be returned from these remote tasks:
# 1.) A number of partitions equal to the `num_partitions` value. If there is not
# enough data to fill the number of partitions, returning empty partitions is
# okay as well.
# 2.) The index object if the index is anything but the default type (`RangeIndex`),
# otherwise return the length of the object in the remote task and the logic
# will build the `RangeIndex` correctly. May of these methods have a `index_col`
# parameter that will tell you whether or not to use the default index.
read_parquet_remote_task = None
# For reading parquet files in parallel, this task should read based on the `cols`
# value in the task signature. Each task will read a subset of the columns.
#
# Signature: (path, cols, num_splits, kwargs)
read_csv_remote_task = None
# For reading CSV files and other text files in parallel, this task should read
# based on the offsets in the signature (`start` and `stop` are byte offsets).
# `prefix_id` is the `b""` prefix for reading with a `BytesIO` object and it will
# also contain encoding information in the string.
#
# Signature: (filepath, num_splits, start, stop, kwargs, prefix_id)
read_hdf_remote_task = None
# For reading HDF5 files in parallel, this task should read based on the `columns`
# parameter in the task signature. Each task will read a subset of the columns.
#
# Signature: (path_or_buf, columns, num_splits, kwargs)
read_feather_remote_task = None
# For reading Feather file format in parallel, this task should read based on the
# `columns` parameter in the task signature. Each task will read a subset of the
# columns.
#
# Signature: (path, columns, num_splits)
read_sql_remote_task = None
# For reading SQL tables in parallel, this task should read a number of rows based
# on the `sql` string passed to the task. Each task will be given a different LIMIT
# and OFFSET as a part of the `sql` query string, so the tasks should perform only
# the logic required to read the SQL query and determine the Index (information
# above).
#
# Signature: (num_splits, sql, con, index_col, kwargs)
@classmethod
def read_parquet(cls, path, engine, columns, **kwargs):
"""Load a parquet object from the file path, returning a DataFrame.
Ray DataFrame only supports pyarrow engine for now.
Args:
path: The filepath of the parquet file.
We only support local files for now.
engine: Ray only support pyarrow reader.
This argument doesn't do anything for now.
kwargs: Pass into parquet's read_pandas function.
Notes:
ParquetFile API is used. Please refer to the documentation here
https://arrow.apache.org/docs/python/parquet.html
"""
from pyarrow.parquet import ParquetFile
if cls.read_parquet_remote_task is None:
return super(RayIO, cls).read_parquet(path, engine, columns, **kwargs)
if not columns:
pf = ParquetFile(path)
columns = [
name
for name in pf.metadata.schema.names
if not PQ_INDEX_REGEX.match(name)
]
num_partitions = cls.frame_mgr_cls._compute_num_partitions()
num_splits = min(len(columns), num_partitions)
# Each item in this list will be a list of column names of the original df
column_splits = (
len(columns) // num_partitions
if len(columns) % num_partitions == 0
else len(columns) // num_partitions + 1
)
col_partitions = [
columns[i : i + column_splits]
for i in range(0, len(columns), column_splits)
]
# Each item in this list will be a list of columns of original df
# partitioned to smaller pieces along rows.
# We need to transpose the oids array to fit our schema.
blk_partitions = np.array(
[
cls.read_parquet_remote_task._remote(
args=(path, cols, num_splits, kwargs),
num_return_vals=num_splits + 1,
)
for cols in col_partitions
]
).T
remote_partitions = np.array(
[
[cls.frame_partition_cls(obj) for obj in row]
for row in blk_partitions[:-1]
]
)
index_len = ray.get(blk_partitions[-1][0])
index = pandas.RangeIndex(index_len)
new_query_compiler = cls.query_compiler_cls(
cls.frame_mgr_cls(remote_partitions), index, columns
)
return new_query_compiler
# CSV
@classmethod
def _skip_header(cls, f, kwargs={}):
lines_read = 0
comment = kwargs.get("comment", None)
skiprows = kwargs.get("skiprows", None)
encoding = kwargs.get("encoding", None)
header = kwargs.get("header", "infer")
names = kwargs.get("names", None)
if header is None:
return lines_read
elif header == "infer":
if names is not None:
return lines_read
else:
header = 0
# Skip lines before the header
if isinstance(skiprows, int):
lines_read += skiprows
for _ in range(skiprows):
f.readline()
skiprows = None
header_lines = header + 1 if isinstance(header, int) else max(header) + 1
header_lines_skipped = 0
# Python 2 files use a read-ahead buffer which breaks our use of tell()
for line in iter(f.readline, ""):
lines_read += 1
skip = False
if not skip and comment is not None:
if encoding is not None:
skip |= line.decode(encoding)[0] == comment
else:
skip |= line.decode()[0] == comment
if not skip and callable(skiprows):
skip |= skiprows(lines_read)
elif not skip and hasattr(skiprows, "__contains__"):
skip |= lines_read in skiprows
if not skip:
header_lines_skipped += 1
if header_lines_skipped == header_lines:
return lines_read
return lines_read
@classmethod
def _read_csv_from_file_pandas_on_ray(cls, filepath, kwargs={}):
"""Constructs a DataFrame from a CSV file.
Args:
filepath (str): path to the CSV file.
npartitions (int): number of partitions for the DataFrame.
kwargs (dict): args excluding filepath provided to read_csv.
Returns:
DataFrame or Series constructed from CSV file.
"""
names = kwargs.get("names", None)
index_col = kwargs.get("index_col", None)
if names is None:
# For the sake of the empty df, we assume no `index_col` to get the correct
# column names before we build the index. Because we pass `names` in, this
# step has to happen without removing the `index_col` otherwise it will not
# be assigned correctly
kwargs["index_col"] = None
names = pandas.read_csv(
open_file(filepath, "rb"), **dict(kwargs, nrows=0, skipfooter=0)
).columns
kwargs["index_col"] = index_col
empty_pd_df = pandas.read_csv(
open_file(filepath, "rb"), **dict(kwargs, nrows=0, skipfooter=0)
)
column_names = empty_pd_df.columns
skipfooter = kwargs.get("skipfooter", None)
skiprows = kwargs.pop("skiprows", None)
parse_dates = kwargs.pop("parse_dates", False)
partition_kwargs = dict(
kwargs,
header=None,
names=names,
skipfooter=0,
skiprows=None,
parse_dates=parse_dates,
)
with open_file(filepath, "rb") as f:
# Get the BOM if necessary
prefix = b""
if kwargs.get("encoding", None) is not None:
prefix = f.readline()
partition_kwargs["skiprows"] = 1
f.seek(0, os.SEEK_SET) # Return to beginning of file
prefix_id = ray.put(prefix)
partition_kwargs_id = ray.put(partition_kwargs)
# Skip the header since we already have the header information and skip the
# rows we are told to skip.
kwargs["skiprows"] = skiprows
cls._skip_header(f, kwargs)
# Launch tasks to read partitions
partition_ids = []
index_ids = []
total_bytes = os.path.getsize(filepath)
# Max number of partitions available
num_parts = cls.frame_mgr_cls._compute_num_partitions()
# This is the number of splits for the columns
num_splits = min(len(column_names), num_parts)
# This is the chunksize each partition will read
chunk_size = max(1, (total_bytes - f.tell()) // num_parts)
while f.tell() < total_bytes:
start = f.tell()
f.seek(chunk_size, os.SEEK_CUR)
f.readline() # Read a whole number of lines
partition_id = cls.read_csv_remote_task._remote(
args=(
filepath,
num_splits,
start,
f.tell(),
partition_kwargs_id,
prefix_id,
),
num_return_vals=num_splits + 1,
)
partition_ids.append(
[cls.frame_partition_cls(obj) for obj in partition_id[:-1]]
)
index_ids.append(partition_id[-1])
if index_col is None:
new_index = pandas.RangeIndex(sum(ray.get(index_ids)))
else:
new_index_ids = get_index.remote([empty_pd_df.index.name], *index_ids)
new_index = ray.get(new_index_ids)
# If parse_dates is present, the column names that we have might not be
# the same length as the returned column names. If we do need to modify
# the column names, we remove the old names from the column names and
# insert the new one at the front of the Index.
if parse_dates is not None:
# Check if is list of lists
if isinstance(parse_dates, list) and isinstance(parse_dates[0], list):
for group in parse_dates:
new_col_name = "_".join(group)
column_names = column_names.drop(group).insert(0, new_col_name)
# Check if it is a dictionary
elif isinstance(parse_dates, dict):
for new_col_name, group in parse_dates.items():
column_names = column_names.drop(group).insert(0, new_col_name)
new_query_compiler = cls.query_compiler_cls(
cls.frame_mgr_cls(np.array(partition_ids)), new_index, column_names
)
if skipfooter:
new_query_compiler = new_query_compiler.drop(
new_query_compiler.index[-skipfooter:]
)
if kwargs.get("squeeze", False) and len(new_query_compiler.columns) == 1:
return new_query_compiler[new_query_compiler.columns[0]]
return new_query_compiler
@classmethod
def _read_csv_from_pandas(cls, filepath_or_buffer, kwargs):
# TODO: Should we try to be smart about how we load files here, or naively default to pandas?
if isinstance(filepath_or_buffer, str):
pd_obj = pandas.read_csv(open_file(filepath_or_buffer, "rb"), **kwargs)
else:
pd_obj = pandas.read_csv(filepath_or_buffer, **kwargs)
if isinstance(pd_obj, pandas.DataFrame):
return cls.from_pandas(pd_obj)
elif isinstance(pd_obj, pandas.io.parsers.TextFileReader):
# Overwriting the read method should return a ray DataFrame for calls
# to __next__ and get_chunk
pd_read = pd_obj.read
pd_obj.read = lambda *args, **kwargs: cls.from_pandas(
pd_read(*args, **kwargs)
)
return pd_obj
@classmethod
def read_csv(
cls,
filepath_or_buffer,
sep=",",
delimiter=None,
header="infer",
names=None,
index_col=None,
usecols=None,
squeeze=False,
prefix=None,
mangle_dupe_cols=True,
dtype=None,
engine=None,
converters=None,
true_values=None,
false_values=None,
skipinitialspace=False,
skiprows=None,
nrows=None,
na_values=None,
keep_default_na=True,
na_filter=True,
verbose=False,
skip_blank_lines=True,
parse_dates=False,
infer_datetime_format=False,
keep_date_col=False,
date_parser=None,
dayfirst=False,
iterator=False,
chunksize=None,
compression="infer",
thousands=None,
decimal=b".",
lineterminator=None,
quotechar='"',
quoting=0,
escapechar=None,
comment=None,
encoding=None,
dialect=None,
tupleize_cols=None,
error_bad_lines=True,
warn_bad_lines=True,
skipfooter=0,
doublequote=True,
delim_whitespace=False,
low_memory=True,
memory_map=False,
float_precision=None,
):
kwargs = {
"filepath_or_buffer": filepath_or_buffer,
"sep": sep,
"delimiter": delimiter,
"header": header,
"names": names,
"index_col": index_col,
"usecols": usecols,
"squeeze": squeeze,
"prefix": prefix,
"mangle_dupe_cols": mangle_dupe_cols,
"dtype": dtype,
"engine": engine,
"converters": converters,
"true_values": true_values,
"false_values": false_values,
"skipinitialspace": skipinitialspace,
"skiprows": skiprows,
"nrows": nrows,
"na_values": na_values,
"keep_default_na": keep_default_na,
"na_filter": na_filter,
"verbose": verbose,
"skip_blank_lines": skip_blank_lines,
"parse_dates": parse_dates,
"infer_datetime_format": infer_datetime_format,
"keep_date_col": keep_date_col,
"date_parser": date_parser,
"dayfirst": dayfirst,
"iterator": iterator,
"chunksize": chunksize,
"compression": compression,
"thousands": thousands,
"decimal": decimal,
"lineterminator": lineterminator,
"quotechar": quotechar,
"quoting": quoting,
"escapechar": escapechar,
"comment": comment,
"encoding": encoding,
"dialect": dialect,
"tupleize_cols": tupleize_cols,
"error_bad_lines": error_bad_lines,
"warn_bad_lines": warn_bad_lines,
"skipfooter": skipfooter,
"doublequote": doublequote,
"delim_whitespace": delim_whitespace,
"low_memory": low_memory,
"memory_map": memory_map,
"float_precision": float_precision,
}
if cls.read_csv_remote_task is None:
return super(RayIO, cls).read_csv(**kwargs)
return cls._read(**kwargs)
@classmethod
def _read(cls, filepath_or_buffer, **kwargs):
"""Read csv file from local disk.
Args:
filepath_or_buffer:
The filepath of the csv file.
We only support local files for now.
kwargs: Keyword arguments in pandas.read_csv
"""
# The intention of the inspection code is to reduce the amount of
# communication we have to do between processes and nodes. We take a quick
# pass over the arguments and remove those that are default values so we
# don't have to serialize and send them to the workers. Because the
# arguments list is so long, this does end up saving time based on the
# number of nodes in the cluster.
try:
args, _, _, defaults, _, _, _ = inspect.getfullargspec(cls.read_csv)
defaults = dict(zip(args[2:], defaults))
filtered_kwargs = {
kw: kwargs[kw]
for kw in kwargs
if kw in defaults
and not isinstance(kwargs[kw], type(defaults[kw]))
or kwargs[kw] != defaults[kw]
}
# This happens on Python2, we will just default to serializing the entire dictionary
except AttributeError:
filtered_kwargs = kwargs
if isinstance(filepath_or_buffer, str):
if not file_exists(filepath_or_buffer):
ErrorMessage.default_to_pandas("File path could not be resolved")
return cls._read_csv_from_pandas(filepath_or_buffer, filtered_kwargs)
elif not isinstance(filepath_or_buffer, py.path.local):
read_from_pandas = True
# Pandas read_csv supports pathlib.Path
try:
import pathlib
if isinstance(filepath_or_buffer, pathlib.Path):
read_from_pandas = False
except ImportError: # pragma: no cover
pass
if read_from_pandas:
ErrorMessage.default_to_pandas("Reading from buffer.")
return cls._read_csv_from_pandas(filepath_or_buffer, kwargs)
if (
_infer_compression(filepath_or_buffer, kwargs.get("compression"))
is not None
):
ErrorMessage.default_to_pandas("Compression detected.")
return cls._read_csv_from_pandas(filepath_or_buffer, filtered_kwargs)
chunksize = kwargs.get("chunksize")
if chunksize is not None:
ErrorMessage.default_to_pandas("Reading chunks from a file.")
return cls._read_csv_from_pandas(filepath_or_buffer, filtered_kwargs)
skiprows = kwargs.get("skiprows")
if skiprows is not None and not isinstance(skiprows, int):
ErrorMessage.default_to_pandas("skiprows parameter not optimized yet.")
return cls._read_csv_from_pandas(filepath_or_buffer, kwargs)
# TODO: replace this by reading lines from file.
if kwargs.get("nrows") is not None:
ErrorMessage.default_to_pandas("`read_csv` with `nrows`")
return cls._read_csv_from_pandas(filepath_or_buffer, filtered_kwargs)
else:
return cls._read_csv_from_file_pandas_on_ray(
filepath_or_buffer, filtered_kwargs
)
@classmethod
def _validate_hdf_format(cls, path_or_buf):
s = pandas.HDFStore(path_or_buf)
groups = s.groups()
if len(groups) == 0:
raise ValueError("No dataset in HDF5 file.")
candidate_only_group = groups[0]
format = getattr(candidate_only_group._v_attrs, "table_type", None)
s.close()
return format
@classmethod
def read_hdf(cls, path_or_buf, **kwargs):
"""Load a h5 file from the file path or buffer, returning a DataFrame.
Args:
path_or_buf: string, buffer or path object
Path to the file to open, or an open :class:`pandas.HDFStore` object.
kwargs: Pass into pandas.read_hdf function.
Returns:
DataFrame constructed from the h5 file.
"""
if cls.read_hdf_remote_task is None:
return super(RayIO, cls).read_hdf(path_or_buf, **kwargs)
format = cls._validate_hdf_format(path_or_buf=path_or_buf)
if format is None:
ErrorMessage.default_to_pandas(
"File format seems to be `fixed`. For better distribution consider saving the file in `table` format. "
"df.to_hdf(format=`table`)."
)
return cls.from_pandas(pandas.read_hdf(path_or_buf=path_or_buf, **kwargs))
columns = kwargs.get("columns", None)
if not columns:
empty_pd_df = pandas.read_hdf(path_or_buf, start=0, stop=0)
columns = empty_pd_df.columns
num_partitions = cls.frame_mgr_cls._compute_num_partitions()
num_splits = min(len(columns), num_partitions)
# Each item in this list will be a list of column names of the original df
column_splits = (
len(columns) // num_partitions
if len(columns) % num_partitions == 0
else len(columns) // num_partitions + 1
)
col_partitions = [
columns[i : i + column_splits]
for i in range(0, len(columns), column_splits)
]
blk_partitions = np.array(
[
cls.read_hdf_remote_task._remote(
args=(path_or_buf, cols, num_splits, kwargs),
num_return_vals=num_splits + 1,
)
for cols in col_partitions
]
).T
remote_partitions = np.array(
[
[cls.frame_partition_cls(obj) for obj in row]
for row in blk_partitions[:-1]
]
)
index_len = ray.get(blk_partitions[-1][0])
index = pandas.RangeIndex(index_len)
new_query_compiler = cls.query_compiler_cls(
cls.frame_mgr_cls(remote_partitions), index, columns
)
return new_query_compiler
@classmethod
def read_feather(cls, path, columns=None, use_threads=True):
"""Read a pandas.DataFrame from Feather format.
Ray DataFrame only supports pyarrow engine for now.
Args:
path: The filepath of the feather file.
We only support local files for now.
multi threading is set to True by default
columns: not supported by pandas api, but can be passed here to read only
specific columns
use_threads: Whether or not to use threads when reading
Notes:
pyarrow feather is used. Please refer to the documentation here
https://arrow.apache.org/docs/python/api.html#feather-format
"""
if cls.read_feather_remote_task is None:
return super(RayIO, cls).read_feather(
path, columns=columns, use_threads=use_threads
)
if columns is None:
from pyarrow.feather import FeatherReader
fr = FeatherReader(path)
columns = [fr.get_column_name(i) for i in range(fr.num_columns)]
num_partitions = cls.frame_mgr_cls._compute_num_partitions()
num_splits = min(len(columns), num_partitions)
# Each item in this list will be a list of column names of the original df
column_splits = (
len(columns) // num_partitions
if len(columns) % num_partitions == 0
else len(columns) // num_partitions + 1
)
col_partitions = [
columns[i : i + column_splits]
for i in range(0, len(columns), column_splits)
]
blk_partitions = np.array(
[
cls.read_feather_remote_task._remote(
args=(path, cols, num_splits), num_return_vals=num_splits + 1
)
for cols in col_partitions
]
).T
remote_partitions = np.array(
[
[cls.frame_partition_cls(obj) for obj in row]
for row in blk_partitions[:-1]
]
)
index_len = ray.get(blk_partitions[-1][0])
index = pandas.RangeIndex(index_len)
new_query_compiler = cls.query_compiler_cls(
cls.frame_mgr_cls(remote_partitions), index, columns
)
return new_query_compiler
@classmethod
def to_sql(cls, qc, **kwargs):
"""Write records stored in a DataFrame to a SQL database.
Args:
qc: the query compiler of the DF that we want to run to_sql on
kwargs: parameters for pandas.to_sql(**kwargs)
"""
# we first insert an empty DF in order to create the full table in the database
# This also helps to validate the input against pandas
# we would like to_sql() to complete only when all rows have been inserted into the database
# since the mapping operation is non-blocking, each partition will return an empty DF
# so at the end, the blocking operation will be this empty DF to_pandas
empty_df = qc.head(1).to_pandas().head(0)
empty_df.to_sql(**kwargs)
# so each partition will append its respective DF
kwargs["if_exists"] = "append"
columns = qc.columns
def func(df, **kwargs):
df.columns = columns
df.to_sql(**kwargs)
return pandas.DataFrame()
map_func = qc._prepare_method(func, **kwargs)
result = qc._map_across_full_axis(1, map_func)
# blocking operation
result.to_pandas()
@classmethod
def read_sql(cls, sql, con, index_col=None, **kwargs):
"""Reads a SQL query or database table into a DataFrame.
Args:
sql: string or SQLAlchemy Selectable (select or text object) SQL query to be
executed or a table name.
con: SQLAlchemy connectable (engine/connection) or database string URI or
DBAPI2 connection (fallback mode)
index_col: Column(s) to set as index(MultiIndex).
kwargs: Pass into pandas.read_sql function.
"""
if cls.read_sql_remote_task is None:
return super(RayIO, cls).read_sql(sql, con, index_col=index_col, **kwargs)
row_cnt_query = "SELECT COUNT(*) FROM ({})".format(sql)
row_cnt = pandas.read_sql(row_cnt_query, con).squeeze()
cols_names_df = pandas.read_sql(
"SELECT * FROM ({}) LIMIT 0".format(sql), con, index_col=index_col
)
cols_names = cols_names_df.columns
num_parts = cls.frame_mgr_cls._compute_num_partitions()
partition_ids = []
index_ids = []
limit = math.ceil(row_cnt / num_parts)
for part in range(num_parts):
offset = part * limit
query = "SELECT * FROM ({}) LIMIT {} OFFSET {}".format(sql, limit, offset)
partition_id = cls.read_sql_remote_task._remote(
args=(num_parts, query, con, index_col, kwargs),
num_return_vals=num_parts + 1,
)
partition_ids.append(
[cls.frame_partition_cls(obj) for obj in partition_id[:-1]]
)
index_ids.append(partition_id[-1])
if index_col is None: # sum all lens returned from partitions
index_lens = ray.get(index_ids)
new_index = pandas.RangeIndex(sum(index_lens))
else: # concat index returned from partitions
index_lst = [x for part_index in ray.get(index_ids) for x in part_index]
new_index = pandas.Index(index_lst).set_names(index_col)
new_query_compiler = cls.query_compiler_cls(
cls.frame_mgr_cls(np.array(partition_ids)), new_index, cols_names
)
return new_query_compiler
|
py | b401c209340b902a44a1297c84d4cc548f069cfb | # Generated by Django 2.2.6 on 2019-10-29 19:57
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Settings',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('application_submission_deadline', models.DateTimeField(auto_now_add=True)),
('application_confirmation_deadline', models.DateTimeField(auto_now_add=True)),
],
),
]
|
py | b401c334ab6f3b45d880e6f5908d42b7cdd820fc | '''
Data translators
Some :class:`~owmeta_core.datasource.DataSource` and `~owmeta_core.datasource.DataTranslator`
types. Some deal with generic file types (e.g., comma-separated values) while others are
specific to the format of a kind of file housed in owmeta.
'''
|
py | b401c435933e5b50f7e3b746e1b291d37b7454d7 | #!/usr/bin/env python2
# Copyright (c) 2014 The Testcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test -alertnotify
#
from test_framework import TestcoinTestFramework
from Testcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
import os
import shutil
class ForkNotifyTest(TestcoinTestFramework):
alert_filename = None # Set by setup_network
def setup_network(self):
self.nodes = []
self.alert_filename = os.path.join(self.options.tmpdir, "alert.txt")
with open(self.alert_filename, 'w') as f:
pass # Just open then close to create zero-length file
self.nodes.append(start_node(0, self.options.tmpdir,
["-blockversion=2", "-alertnotify=echo %s >> \"" + self.alert_filename + "\""]))
# Node1 mines block.version=211 blocks
self.nodes.append(start_node(1, self.options.tmpdir,
["-blockversion=211"]))
connect_nodes(self.nodes[1], 0)
self.is_network_split = False
self.sync_all()
def run_test(self):
# Mine 51 up-version blocks
self.nodes[1].setgenerate(True, 51)
self.sync_all()
# -alertnotify should trigger on the 51'st,
# but mine and sync another to give
# -alertnotify time to write
self.nodes[1].setgenerate(True, 1)
self.sync_all()
with open(self.alert_filename, 'r') as f:
alert_text = f.read()
if len(alert_text) == 0:
raise AssertionError("-alertnotify did not warn of up-version blocks")
# Mine more up-version blocks, should not get more alerts:
self.nodes[1].setgenerate(True, 1)
self.sync_all()
self.nodes[1].setgenerate(True, 1)
self.sync_all()
with open(self.alert_filename, 'r') as f:
alert_text2 = f.read()
if alert_text != alert_text2:
raise AssertionError("-alertnotify excessive warning of up-version blocks")
if __name__ == '__main__':
ForkNotifyTest().main()
|
py | b401c66c7673c5892b0e5a8b1dfbc672f2352b38 | import sqlite3
import numpy as np
#copied directly from EBLSST.py
###########################################################################################################
###########################################################################################################
###########################################################################################################
###########################################################################################################
class OpSim(object):
def __init__(self, *args,**kwargs):
self.dbFile = '../input/db/minion_1016_sqlite.db' #for the OpSim database
self.verbose = False
self.fieldCursor = None
self.summaryCursor = None
self.fieldID = [None]
self.RA = [None]
self.Dec = [None]
self.Nobs = [None]
self.m_5 = [None]
self.obsDates = [None]
self.NobsDates = [None]
self.totalNobs = [None]
#database manipulation
def getCursors(self):
#gets SQlite cursor to pull information from OpSim
#https://www.lsst.org/scientists/simulations/opsim/summary-table-column-descriptions-v335
#http://ops2.lsst.org/docs/current/architecture.html
db = sqlite3.connect(self.dbFile)
cursor = db.cursor()
cursor.execute("SELECT fieldid, expDate, filter, fiveSigmaDepth FROM summary")
self.summaryCursor = np.array(cursor.fetchall()) #NOTE: this takes a LONG time
print("have summary cursor.")
cursor.execute("SELECT fieldid, fieldra, fielddec FROM field")
self.fieldCursor = np.array(cursor.fetchall()) #NOTE: this takes a LONG time
print("have field cursor.")
#For OpSim database
def setFieldID(self, myRA, myDEC, deglim = 3.5/2.):
#uses RA/Dec (from galactic coordinates) to return locatiom's fieldID according to OpSim
#field-of-view == 3.5-degree diameter (also returned with fieldFov key)
RA = self.fieldCursor[:,1].astype(float)
Dec = self.fieldCursor[:,2].astype(float)
dbCoord = SkyCoord(ra = RA*units.degree, dec = Dec*units.degree, frame='icrs')
inCoord = SkyCoord(ra = myRA*units.degree, dec = myDEC*units.degree, frame='icrs')
imin, sep2d, dist3d = inCoord.match_to_catalog_sky(dbCoord)
dbID = (self.fieldCursor[imin,0]).astype('int')
mask = np.where(sep2d.to(units.degree).value > deglim)
#this check apparently isn't necessary because it looks like the entire sky is covered with fieldIDs, but I suppose some of these fieldIDs don't have any observation dates (in the northern hemisphere)
if (len(mask[0]) > 0):
print(mask[0])
print("WARNING: coordinate outside LSST FOV", myRA[mask], myDec[mask])
dbID[mask] = -999
if (self.verbose):
print("have Field ID", dbID)
self.fieldID = [dbID]
def getDates(self, ID, filtin):
#matches FieldID to existing OpSim database ID and matches observation filters to get dates (in seconds since the start of the
# survey)
FieldID = self.summaryCursor[:,0].astype('int')
date = self.summaryCursor[:,1].astype('float')
filt = self.summaryCursor[:,2]
fiveSigmaDepth = self.summaryCursor[:,3].astype('float')
posIDFilt = np.where(np.logical_and(FieldID == ID, filt == filtin[:-1]))
if (self.verbose):
print("posIDFilt = ", posIDFilt, filtin)
OpSimdates = posIDFilt[0]
if (len(OpSimdates) < 1):
return [None], [None]
else:
if (self.verbose):
print('OpSimdates =', OpSimdates)
dates = np.array([float(d) for d in date[OpSimdates] ])/86400. #converting seconds to days\
m_5 = np.array([float(x) for x in fiveSigmaDepth[OpSimdates] ])
return dates, m_5
def setDates(self, i, filters):
self.obsDates[i] = dict()
self.NobsDates[i] = dict()
self.m_5[i] = dict()
self.totalNobs[i] = 0
for filt in filters:
self.obsDates[i][filt], self.m_5[i][filt] = self.getDates(self.fieldID[i], filt)
self.NobsDates[i][filt] = 0
if (self.obsDates[i][filt][0] != None):
self.NobsDates[i][filt] = len(self.obsDates[i][filt])
self.totalNobs[i] += self.NobsDates[i][filt]
if (self.verbose):
print(f'observing with OpSim in filter {filt}, have {self.NobsDates[i][filt]} observations')
def getAllOpSimFields(self):
print("getting OpSim fields...")
self.getCursors()
FieldID = self.summaryCursor[:,0].astype('int')
date = self.summaryCursor[:,1].astype('float')
self.fieldID = np.array([])
self.RA = np.array([])
self.Dec = np.array([])
self.Nobs = np.array([])
for x in self.fieldCursor:
inS = np.where(FieldID == int(x[0]))[0]
self.Nobs = np.append(self.Nobs, len(inS))
self.fieldID = np.append(self.fieldID, x[0])
self.RA = np.append(self.RA, x[1])
self.Dec = np.append(self.Dec, x[2])
self.obsDates = np.full_like(self.RA, dict(), dtype=dict)
self.NobsDates = np.full_like(self.RA, dict(), dtype=dict)
self.totalNobs = np.full_like(self.RA, 0)
self.m_5 = np.full_like(self.RA, dict(), dtype=dict)
print(f'returned {len(self.fieldID)} fields') |
py | b401c791fc4ff17643c015c7d8d6d1b75023a4cd | from bs4 import BeautifulSoup
import requests
response = requests.get("https://www.inside.com.tw/tag/AI")
soup = BeautifulSoup(response.content, "lxml")
# 爬取文章標題
titles = soup.find_all("h3", {"class": "post_title"})
for title in titles:
print(title.getText().strip())
|
py | b401c8c0ff628f38782c4242b1be25c86ea2d0b6 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for Superset"""
import csv
import datetime
import doctest
import io
import json
import logging
import os
import random
import re
import string
import unittest
from unittest import mock
import pandas as pd
import psycopg2
import sqlalchemy as sqla
from superset import dataframe, db, jinja_context, security_manager, sql_lab
from superset.connectors.sqla.models import SqlaTable
from superset.db_engine_specs.base import BaseEngineSpec
from superset.db_engine_specs.mssql import MssqlEngineSpec
from superset.models import core as models
from superset.models.sql_lab import Query
from superset.utils import core as utils
from superset.views.core import DatabaseView
from .base_tests import SupersetTestCase
from .fixtures.pyodbcRow import Row
class CoreTests(SupersetTestCase):
def __init__(self, *args, **kwargs):
super(CoreTests, self).__init__(*args, **kwargs)
@classmethod
def setUpClass(cls):
cls.table_ids = {
tbl.table_name: tbl.id for tbl in (db.session.query(SqlaTable).all())
}
def setUp(self):
db.session.query(Query).delete()
db.session.query(models.DatasourceAccessRequest).delete()
db.session.query(models.Log).delete()
def tearDown(self):
db.session.query(Query).delete()
def test_login(self):
resp = self.get_resp("/login/", data=dict(username="admin", password="general"))
self.assertNotIn("User confirmation needed", resp)
resp = self.get_resp("/logout/", follow_redirects=True)
self.assertIn("User confirmation needed", resp)
resp = self.get_resp(
"/login/", data=dict(username="admin", password="wrongPassword")
)
self.assertIn("User confirmation needed", resp)
def test_dashboard_endpoint(self):
resp = self.client.get("/superset/dashboard/-1/")
assert resp.status_code == 404
def test_slice_endpoint(self):
self.login(username="admin")
slc = self.get_slice("Girls", db.session)
resp = self.get_resp("/superset/slice/{}/".format(slc.id))
assert "Time Column" in resp
assert "List Roles" in resp
# Testing overrides
resp = self.get_resp("/superset/slice/{}/?standalone=true".format(slc.id))
assert "List Roles" not in resp
resp = self.client.get("/superset/slice/-1/")
assert resp.status_code == 404
def test_cache_key(self):
self.login(username="admin")
slc = self.get_slice("Girls", db.session)
viz = slc.viz
qobj = viz.query_obj()
cache_key = viz.cache_key(qobj)
self.assertEqual(cache_key, viz.cache_key(qobj))
qobj["groupby"] = []
self.assertNotEqual(cache_key, viz.cache_key(qobj))
def test_api_v1_query_endpoint(self):
self.login(username="admin")
slc = self.get_slice("Name Cloud", db.session)
form_data = slc.form_data
data = json.dumps(
{
"datasource": {"id": slc.datasource_id, "type": slc.datasource_type},
"queries": [
{
"granularity": "ds",
"groupby": ["name"],
"metrics": ["sum__num"],
"filters": [],
"time_range": "{} : {}".format(
form_data.get("since"), form_data.get("until")
),
"limit": 100,
}
],
}
)
# TODO: update once get_data is implemented for QueryObject
with self.assertRaises(Exception):
self.get_resp("/api/v1/query/", {"query_context": data})
def test_old_slice_json_endpoint(self):
self.login(username="admin")
slc = self.get_slice("Girls", db.session)
json_endpoint = "/superset/explore_json/{}/{}/".format(
slc.datasource_type, slc.datasource_id
)
resp = self.get_resp(
json_endpoint, {"form_data": json.dumps(slc.viz.form_data)}
)
assert '"Jennifer"' in resp
def test_slice_json_endpoint(self):
self.login(username="admin")
slc = self.get_slice("Girls", db.session)
resp = self.get_resp(slc.explore_json_url)
assert '"Jennifer"' in resp
def test_old_slice_csv_endpoint(self):
self.login(username="admin")
slc = self.get_slice("Girls", db.session)
csv_endpoint = "/superset/explore_json/{}/{}/?csv=true".format(
slc.datasource_type, slc.datasource_id
)
resp = self.get_resp(csv_endpoint, {"form_data": json.dumps(slc.viz.form_data)})
assert "Jennifer," in resp
def test_slice_csv_endpoint(self):
self.login(username="admin")
slc = self.get_slice("Girls", db.session)
csv_endpoint = "/superset/explore_json/?csv=true"
resp = self.get_resp(
csv_endpoint, {"form_data": json.dumps({"slice_id": slc.id})}
)
assert "Jennifer," in resp
def test_admin_only_permissions(self):
def assert_admin_permission_in(role_name, assert_func):
role = security_manager.find_role(role_name)
permissions = [p.permission.name for p in role.permissions]
assert_func("can_sync_druid_source", permissions)
assert_func("can_approve", permissions)
assert_admin_permission_in("Admin", self.assertIn)
assert_admin_permission_in("Alpha", self.assertNotIn)
assert_admin_permission_in("Gamma", self.assertNotIn)
def test_admin_only_menu_views(self):
def assert_admin_view_menus_in(role_name, assert_func):
role = security_manager.find_role(role_name)
view_menus = [p.view_menu.name for p in role.permissions]
assert_func("ResetPasswordView", view_menus)
assert_func("RoleModelView", view_menus)
assert_func("Security", view_menus)
assert_func("SQL Lab", view_menus)
assert_admin_view_menus_in("Admin", self.assertIn)
assert_admin_view_menus_in("Alpha", self.assertNotIn)
assert_admin_view_menus_in("Gamma", self.assertNotIn)
def test_save_slice(self):
self.login(username="admin")
slice_name = "Energy Sankey"
slice_id = self.get_slice(slice_name, db.session).id
db.session.commit()
copy_name = "Test Sankey Save"
tbl_id = self.table_ids.get("energy_usage")
new_slice_name = "Test Sankey Overwirte"
url = (
"/superset/explore/table/{}/?slice_name={}&"
"action={}&datasource_name=energy_usage"
)
form_data = {
"viz_type": "sankey",
"groupby": "target",
"metric": "sum__value",
"row_limit": 5000,
"slice_id": slice_id,
}
# Changing name and save as a new slice
self.get_resp(
url.format(tbl_id, copy_name, "saveas"),
{"form_data": json.dumps(form_data)},
)
slices = db.session.query(models.Slice).filter_by(slice_name=copy_name).all()
assert len(slices) == 1
new_slice_id = slices[0].id
form_data = {
"viz_type": "sankey",
"groupby": "source",
"metric": "sum__value",
"row_limit": 5000,
"slice_id": new_slice_id,
"time_range": "now",
}
# Setting the name back to its original name by overwriting new slice
self.get_resp(
url.format(tbl_id, new_slice_name, "overwrite"),
{"form_data": json.dumps(form_data)},
)
slc = db.session.query(models.Slice).filter_by(id=new_slice_id).first()
assert slc.slice_name == new_slice_name
assert slc.viz.form_data == form_data
db.session.delete(slc)
def test_filter_endpoint(self):
self.login(username="admin")
slice_name = "Energy Sankey"
slice_id = self.get_slice(slice_name, db.session).id
db.session.commit()
tbl_id = self.table_ids.get("energy_usage")
table = db.session.query(SqlaTable).filter(SqlaTable.id == tbl_id)
table.filter_select_enabled = True
url = (
"/superset/filter/table/{}/target/?viz_type=sankey&groupby=source"
"&metric=sum__value&flt_col_0=source&flt_op_0=in&flt_eq_0=&"
"slice_id={}&datasource_name=energy_usage&"
"datasource_id=1&datasource_type=table"
)
# Changing name
resp = self.get_resp(url.format(tbl_id, slice_id))
assert len(resp) > 0
assert "Carbon Dioxide" in resp
def test_slice_data(self):
# slice data should have some required attributes
self.login(username="admin")
slc = self.get_slice("Girls", db.session)
slc_data_attributes = slc.data.keys()
assert "changed_on" in slc_data_attributes
assert "modified" in slc_data_attributes
def test_slices(self):
# Testing by hitting the two supported end points for all slices
self.login(username="admin")
Slc = models.Slice
urls = []
for slc in db.session.query(Slc).all():
urls += [
(slc.slice_name, "explore", slc.slice_url),
(slc.slice_name, "explore_json", slc.explore_json_url),
]
for name, method, url in urls:
logging.info(f"[{name}]/[{method}]: {url}")
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
def test_tablemodelview_list(self):
self.login(username="admin")
url = "/tablemodelview/list/"
resp = self.get_resp(url)
# assert that a table is listed
table = db.session.query(SqlaTable).first()
assert table.name in resp
assert "/superset/explore/table/{}".format(table.id) in resp
def test_add_slice(self):
self.login(username="admin")
# assert that /chart/add responds with 200
url = "/chart/add"
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
def test_get_user_slices(self):
self.login(username="admin")
userid = security_manager.find_user("admin").id
url = "/sliceaddview/api/read?_flt_0_created_by={}".format(userid)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
def test_slices_V2(self):
# Add explore-v2-beta role to admin user
# Test all slice urls as user with with explore-v2-beta role
security_manager.add_role("explore-v2-beta")
security_manager.add_user(
"explore_beta",
"explore_beta",
" user",
"[email protected]",
security_manager.find_role("explore-v2-beta"),
password="general",
)
self.login(username="explore_beta", password="general")
Slc = models.Slice
urls = []
for slc in db.session.query(Slc).all():
urls += [(slc.slice_name, "slice_url", slc.slice_url)]
for name, method, url in urls:
print(f"[{name}]/[{method}]: {url}")
self.client.get(url)
def test_doctests(self):
modules = [utils, models, sql_lab]
for mod in modules:
failed, tests = doctest.testmod(mod)
if failed:
raise Exception("Failed a doctest")
def test_misc(self):
assert self.get_resp("/health") == "OK"
assert self.get_resp("/healthcheck") == "OK"
assert self.get_resp("/ping") == "OK"
def test_testconn(self, username="admin"):
self.login(username=username)
database = utils.get_main_database()
# validate that the endpoint works with the password-masked sqlalchemy uri
data = json.dumps(
{
"uri": database.safe_sqlalchemy_uri(),
"name": "main",
"impersonate_user": False,
}
)
response = self.client.post(
"/superset/testconn", data=data, content_type="application/json"
)
assert response.status_code == 200
assert response.headers["Content-Type"] == "application/json"
# validate that the endpoint works with the decrypted sqlalchemy uri
data = json.dumps(
{
"uri": database.sqlalchemy_uri_decrypted,
"name": "main",
"impersonate_user": False,
}
)
response = self.client.post(
"/superset/testconn", data=data, content_type="application/json"
)
assert response.status_code == 200
assert response.headers["Content-Type"] == "application/json"
def test_custom_password_store(self):
database = utils.get_main_database()
conn_pre = sqla.engine.url.make_url(database.sqlalchemy_uri_decrypted)
def custom_password_store(uri):
return "password_store_test"
models.custom_password_store = custom_password_store
conn = sqla.engine.url.make_url(database.sqlalchemy_uri_decrypted)
if conn_pre.password:
assert conn.password == "password_store_test"
assert conn.password != conn_pre.password
# Disable for password store for later tests
models.custom_password_store = None
def test_databaseview_edit(self, username="admin"):
# validate that sending a password-masked uri does not over-write the decrypted
# uri
self.login(username=username)
database = utils.get_main_database()
sqlalchemy_uri_decrypted = database.sqlalchemy_uri_decrypted
url = "databaseview/edit/{}".format(database.id)
data = {k: database.__getattribute__(k) for k in DatabaseView.add_columns}
data["sqlalchemy_uri"] = database.safe_sqlalchemy_uri()
self.client.post(url, data=data)
database = utils.get_main_database()
self.assertEqual(sqlalchemy_uri_decrypted, database.sqlalchemy_uri_decrypted)
def test_warm_up_cache(self):
slc = self.get_slice("Girls", db.session)
data = self.get_json_resp("/superset/warm_up_cache?slice_id={}".format(slc.id))
assert data == [{"slice_id": slc.id, "slice_name": slc.slice_name}]
data = self.get_json_resp(
"/superset/warm_up_cache?table_name=energy_usage&db_name=main"
)
assert len(data) > 0
def test_shortner(self):
self.login(username="admin")
data = (
"//superset/explore/table/1/?viz_type=sankey&groupby=source&"
"groupby=target&metric=sum__value&row_limit=5000&where=&having=&"
"flt_col_0=source&flt_op_0=in&flt_eq_0=&slice_id=78&slice_name="
"Energy+Sankey&collapsed_fieldsets=&action=&datasource_name="
"energy_usage&datasource_id=1&datasource_type=table&"
"previous_viz_type=sankey"
)
resp = self.client.post("/r/shortner/", data=dict(data=data))
assert re.search(r"\/r\/[0-9]+", resp.data.decode("utf-8"))
def test_kv(self):
self.logout()
self.login(username="admin")
try:
resp = self.client.post("/kv/store/", data=dict())
except Exception:
self.assertRaises(TypeError)
value = json.dumps({"data": "this is a test"})
resp = self.client.post("/kv/store/", data=dict(data=value))
self.assertEqual(resp.status_code, 200)
kv = db.session.query(models.KeyValue).first()
kv_value = kv.value
self.assertEqual(json.loads(value), json.loads(kv_value))
resp = self.client.get("/kv/{}/".format(kv.id))
self.assertEqual(resp.status_code, 200)
self.assertEqual(json.loads(value), json.loads(resp.data.decode("utf-8")))
try:
resp = self.client.get("/kv/10001/")
except Exception:
self.assertRaises(TypeError)
def test_gamma(self):
self.login(username="gamma")
assert "Charts" in self.get_resp("/chart/list/")
assert "Dashboards" in self.get_resp("/dashboard/list/")
def test_csv_endpoint(self):
self.login("admin")
sql = """
SELECT first_name, last_name
FROM ab_user
WHERE first_name='admin'
"""
client_id = "{}".format(random.getrandbits(64))[:10]
self.run_sql(sql, client_id, raise_on_error=True)
resp = self.get_resp("/superset/csv/{}".format(client_id))
data = csv.reader(io.StringIO(resp))
expected_data = csv.reader(io.StringIO("first_name,last_name\nadmin, user\n"))
sql = "SELECT first_name FROM ab_user WHERE first_name LIKE '%admin%'"
client_id = "{}".format(random.getrandbits(64))[:10]
self.run_sql(sql, client_id, raise_on_error=True)
resp = self.get_resp("/superset/csv/{}".format(client_id))
data = csv.reader(io.StringIO(resp))
expected_data = csv.reader(io.StringIO("first_name\nadmin\n"))
self.assertEqual(list(expected_data), list(data))
self.logout()
def test_extra_table_metadata(self):
self.login("admin")
dbid = utils.get_main_database().id
self.get_json_resp(
f"/superset/extra_table_metadata/{dbid}/" "ab_permission_view/panoramix/"
)
def test_process_template(self):
maindb = utils.get_main_database()
sql = "SELECT '{{ datetime(2017, 1, 1).isoformat() }}'"
tp = jinja_context.get_template_processor(database=maindb)
rendered = tp.process_template(sql)
self.assertEqual("SELECT '2017-01-01T00:00:00'", rendered)
def test_get_template_kwarg(self):
maindb = utils.get_main_database()
s = "{{ foo }}"
tp = jinja_context.get_template_processor(database=maindb, foo="bar")
rendered = tp.process_template(s)
self.assertEqual("bar", rendered)
def test_template_kwarg(self):
maindb = utils.get_main_database()
s = "{{ foo }}"
tp = jinja_context.get_template_processor(database=maindb)
rendered = tp.process_template(s, foo="bar")
self.assertEqual("bar", rendered)
def test_templated_sql_json(self):
self.login("admin")
sql = "SELECT '{{ datetime(2017, 1, 1).isoformat() }}' as test"
data = self.run_sql(sql, "fdaklj3ws")
self.assertEqual(data["data"][0]["test"], "2017-01-01T00:00:00")
def test_table_metadata(self):
maindb = utils.get_main_database()
backend = maindb.backend
data = self.get_json_resp("/superset/table/{}/ab_user/null/".format(maindb.id))
self.assertEqual(data["name"], "ab_user")
assert len(data["columns"]) > 5
assert data.get("selectStar").startswith("SELECT")
# Engine specific tests
if backend in ("mysql", "postgresql"):
self.assertEqual(data.get("primaryKey").get("type"), "pk")
self.assertEqual(data.get("primaryKey").get("column_names")[0], "id")
self.assertEqual(len(data.get("foreignKeys")), 2)
if backend == "mysql":
self.assertEqual(len(data.get("indexes")), 7)
elif backend == "postgresql":
self.assertEqual(len(data.get("indexes")), 5)
def test_fetch_datasource_metadata(self):
self.login(username="admin")
url = "/superset/fetch_datasource_metadata?" "datasourceKey=1__table"
resp = self.get_json_resp(url)
keys = [
"name",
"type",
"order_by_choices",
"granularity_sqla",
"time_grain_sqla",
"id",
]
for k in keys:
self.assertIn(k, resp.keys())
def test_user_profile(self, username="admin"):
self.login(username=username)
slc = self.get_slice("Girls", db.session)
# Setting some faves
url = "/superset/favstar/Slice/{}/select/".format(slc.id)
resp = self.get_json_resp(url)
self.assertEqual(resp["count"], 1)
dash = db.session.query(models.Dashboard).filter_by(slug="births").first()
url = "/superset/favstar/Dashboard/{}/select/".format(dash.id)
resp = self.get_json_resp(url)
self.assertEqual(resp["count"], 1)
userid = security_manager.find_user("admin").id
resp = self.get_resp("/superset/profile/admin/")
self.assertIn('"app"', resp)
data = self.get_json_resp("/superset/recent_activity/{}/".format(userid))
self.assertNotIn("message", data)
data = self.get_json_resp("/superset/created_slices/{}/".format(userid))
self.assertNotIn("message", data)
data = self.get_json_resp("/superset/created_dashboards/{}/".format(userid))
self.assertNotIn("message", data)
data = self.get_json_resp("/superset/fave_slices/{}/".format(userid))
self.assertNotIn("message", data)
data = self.get_json_resp("/superset/fave_dashboards/{}/".format(userid))
self.assertNotIn("message", data)
data = self.get_json_resp(
"/superset/fave_dashboards_by_username/{}/".format(username)
)
self.assertNotIn("message", data)
def test_slice_id_is_always_logged_correctly_on_web_request(self):
# superset/explore case
slc = db.session.query(models.Slice).filter_by(slice_name="Girls").one()
qry = db.session.query(models.Log).filter_by(slice_id=slc.id)
self.get_resp(slc.slice_url, {"form_data": json.dumps(slc.form_data)})
self.assertEqual(1, qry.count())
def test_slice_id_is_always_logged_correctly_on_ajax_request(self):
# superset/explore_json case
self.login(username="admin")
slc = db.session.query(models.Slice).filter_by(slice_name="Girls").one()
qry = db.session.query(models.Log).filter_by(slice_id=slc.id)
slc_url = slc.slice_url.replace("explore", "explore_json")
self.get_json_resp(slc_url, {"form_data": json.dumps(slc.form_data)})
self.assertEqual(1, qry.count())
def test_slice_query_endpoint(self):
# API endpoint for query string
self.login(username="admin")
slc = self.get_slice("Girls", db.session)
resp = self.get_resp("/superset/slice_query/{}/".format(slc.id))
assert "query" in resp
assert "language" in resp
self.logout()
def test_import_csv(self):
self.login(username="admin")
filename = "testCSV.csv"
table_name = "".join(random.choice(string.ascii_uppercase) for _ in range(5))
test_file = open(filename, "w+")
test_file.write("a,b\n")
test_file.write("john,1\n")
test_file.write("paul,2\n")
test_file.close()
example_db = utils.get_example_database()
example_db.allow_csv_upload = True
db_id = example_db.id
db.session.commit()
test_file = open(filename, "rb")
form_data = {
"csv_file": test_file,
"sep": ",",
"name": table_name,
"con": db_id,
"if_exists": "append",
"index_label": "test_label",
"mangle_dupe_cols": False,
}
url = "/databaseview/list/"
add_datasource_page = self.get_resp(url)
assert "Upload a CSV" in add_datasource_page
url = "/csvtodatabaseview/form"
form_get = self.get_resp(url)
assert "CSV to Database configuration" in form_get
try:
# ensure uploaded successfully
resp = self.get_resp(url, data=form_data)
assert 'CSV file "testCSV.csv" uploaded to table' in resp
finally:
os.remove(filename)
def test_dataframe_timezone(self):
tz = psycopg2.tz.FixedOffsetTimezone(offset=60, name=None)
data = [
(datetime.datetime(2017, 11, 18, 21, 53, 0, 219225, tzinfo=tz),),
(datetime.datetime(2017, 11, 18, 22, 6, 30, 61810, tzinfo=tz),),
]
df = dataframe.SupersetDataFrame(list(data), [["data"]], BaseEngineSpec)
data = df.data
self.assertDictEqual(
data[0], {"data": pd.Timestamp("2017-11-18 21:53:00.219225+0100", tz=tz)}
)
self.assertDictEqual(
data[1], {"data": pd.Timestamp("2017-11-18 22:06:30.061810+0100", tz=tz)}
)
def test_mssql_engine_spec_pymssql(self):
# Test for case when tuple is returned (pymssql)
data = [
(1, 1, datetime.datetime(2017, 10, 19, 23, 39, 16, 660000)),
(2, 2, datetime.datetime(2018, 10, 19, 23, 39, 16, 660000)),
]
df = dataframe.SupersetDataFrame(
list(data), [["col1"], ["col2"], ["col3"]], MssqlEngineSpec
)
data = df.data
self.assertEqual(len(data), 2)
self.assertEqual(
data[0],
{"col1": 1, "col2": 1, "col3": pd.Timestamp("2017-10-19 23:39:16.660000")},
)
def test_mssql_engine_spec_odbc(self):
# Test for case when pyodbc.Row is returned (msodbc driver)
data = [
Row((1, 1, datetime.datetime(2017, 10, 19, 23, 39, 16, 660000))),
Row((2, 2, datetime.datetime(2018, 10, 19, 23, 39, 16, 660000))),
]
df = dataframe.SupersetDataFrame(
list(data), [["col1"], ["col2"], ["col3"]], MssqlEngineSpec
)
data = df.data
self.assertEqual(len(data), 2)
self.assertEqual(
data[0],
{"col1": 1, "col2": 1, "col3": pd.Timestamp("2017-10-19 23:39:16.660000")},
)
def test_comments_in_sqlatable_query(self):
clean_query = "SELECT '/* val 1 */' as c1, '-- val 2' as c2 FROM tbl"
commented_query = "/* comment 1 */" + clean_query + "-- comment 2"
table = SqlaTable(sql=commented_query)
rendered_query = str(table.get_from_clause())
self.assertEqual(clean_query, rendered_query)
def test_slice_payload_no_data(self):
self.login(username="admin")
slc = self.get_slice("Girls", db.session)
json_endpoint = "/superset/explore_json/"
form_data = slc.form_data
form_data.update({"filters": [{"col": "state", "op": "in", "val": ["N/A"]}]})
data = self.get_json_resp(json_endpoint, {"form_data": json.dumps(form_data)})
self.assertEqual(data["status"], utils.QueryStatus.SUCCESS)
self.assertEqual(data["error"], "No data")
def test_slice_payload_invalid_query(self):
self.login(username="admin")
slc = self.get_slice("Girls", db.session)
form_data = slc.form_data
form_data.update({"groupby": ["N/A"]})
data = self.get_json_resp(
"/superset/explore_json/", {"form_data": json.dumps(form_data)}
)
self.assertEqual(data["status"], utils.QueryStatus.FAILED)
def test_slice_payload_viz_markdown(self):
self.login(username="admin")
slc = self.get_slice("Title", db.session)
url = slc.get_explore_url(base_url="/superset/explore_json")
data = self.get_json_resp(url)
self.assertEqual(data["status"], None)
self.assertEqual(data["error"], None)
def test_slice_payload_no_datasource(self):
self.login(username="admin")
data = self.get_json_resp("/superset/explore_json/", raise_on_error=False)
self.assertEqual(
data["error"], "The datasource associated with this chart no longer exists"
)
@mock.patch("superset.security.SupersetSecurityManager.schemas_accessible_by_user")
@mock.patch("superset.security.SupersetSecurityManager.database_access")
@mock.patch("superset.security.SupersetSecurityManager.all_datasource_access")
def test_schemas_access_for_csv_upload_endpoint(
self, mock_all_datasource_access, mock_database_access, mock_schemas_accessible
):
mock_all_datasource_access.return_value = False
mock_database_access.return_value = False
mock_schemas_accessible.return_value = ["this_schema_is_allowed_too"]
database_name = "fake_db_100"
db_id = 100
extra = """{
"schemas_allowed_for_csv_upload":
["this_schema_is_allowed", "this_schema_is_allowed_too"]
}"""
self.login(username="admin")
dbobj = self.get_or_create(
cls=models.Database,
criteria={"database_name": database_name},
session=db.session,
id=db_id,
extra=extra,
)
data = self.get_json_resp(
url="/superset/schemas_access_for_csv_upload?db_id={db_id}".format(
db_id=dbobj.id
)
)
assert data == ["this_schema_is_allowed_too"]
def test_select_star(self):
self.login(username="admin")
examples_db = utils.get_example_database()
resp = self.get_resp(f"/superset/select_star/{examples_db.id}/birth_names")
self.assertIn("gender", resp)
if __name__ == "__main__":
unittest.main()
|
py | b401c94e7f3bdf6add1f40c460f990500b22e4e8 | #!python
# The prebuild script is intended to simplify life for developers and dev-ops. It's repsonsible for acquiring
# tools required by the build as well as dependencies on which we rely.
#
# By using this script, we can reduce the requirements for a developer getting started to:
#
# * A working C++ dev environment like visual studio, xcode, gcc, or clang
# * Qt
# * CMake
# * Python 3.x
#
# The function of the build script is to acquire, if not already present, all the other build requirements
# The build script should be idempotent. If you run it with the same arguments multiple times, that should
# have no negative impact on the subsequent build times (i.e. re-running the prebuild script should not
# trigger a header change that causes files to be rebuilt). Subsequent runs after the first run should
# execute quickly, determining that no work is to be done
import hifi_singleton
import hifi_utils
import hifi_android
import hifi_vcpkg
import argparse
import concurrent
import hashlib
import importlib
import json
import os
import platform
import shutil
import ssl
import sys
import re
import tempfile
import time
import functools
import subprocess
import logging
from uuid import uuid4
from contextlib import contextmanager
print = functools.partial(print, flush=True)
class TrackableLogger(logging.Logger):
guid = str(uuid4())
def _log(self, msg, *args, **kwargs):
x = {'guid': self.guid}
if 'extra' in kwargs:
kwargs['extra'].update(x)
else:
kwargs['extra'] = x
super()._log(msg, *args, **kwargs)
logging.setLoggerClass(TrackableLogger)
logger = logging.getLogger('prebuild')
def headSha():
if shutil.which('git') is None:
logger.warn("Unable to find git executable, can't caclulate commit ID")
return '0xDEADBEEF'
repo_dir = os.path.dirname(os.path.abspath(__file__))
git = subprocess.Popen(
'git rev-parse --short HEAD',
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True, cwd=repo_dir, universal_newlines=True,
)
stdout, _ = git.communicate()
sha = stdout.split('\n')[0]
if not sha:
raise RuntimeError("couldn't find git sha for repository {}".format(repo_dir))
return sha
@contextmanager
def timer(name):
''' Print the elapsed time a context's execution takes to execute '''
start = time.time()
yield
# Please take care when modifiying this print statement.
# Log parsing logic may depend on it.
logger.info('%s took %.3f secs' % (name, time.time() - start))
def parse_args():
# our custom ports, relative to the script location
defaultPortsPath = hifi_utils.scriptRelative('cmake', 'ports')
from argparse import ArgumentParser
parser = ArgumentParser(description='Prepare build dependencies.')
parser.add_argument('--android', type=str)
parser.add_argument('--debug', action='store_true')
parser.add_argument('--force-bootstrap', action='store_true')
parser.add_argument('--force-build', action='store_true')
parser.add_argument('--vcpkg-root', type=str, help='The location of the vcpkg distribution')
parser.add_argument('--build-root', required=True, type=str, help='The location of the cmake build')
parser.add_argument('--ports-path', type=str, default=defaultPortsPath)
parser.add_argument('--ci-build', action='store_true', default=os.getenv('CI_BUILD') is not None)
if True:
args = parser.parse_args()
else:
args = parser.parse_args(['--android', 'questInterface', '--build-root', 'C:/git/hifi/android/apps/questInterface/.externalNativeBuild/cmake/debug/arm64-v8a'])
return args
def main():
# Fixup env variables. Leaving `USE_CCACHE` on will cause scribe to fail to build
# VCPKG_ROOT seems to cause confusion on Windows systems that previously used it for
# building OpenSSL
removeEnvVars = ['VCPKG_ROOT', 'USE_CCACHE']
for var in removeEnvVars:
if var in os.environ:
del os.environ[var]
args = parse_args()
if args.ci_build:
logging.basicConfig(datefmt='%s', format='%(asctime)s %(guid)s %(message)s', level=logging.INFO)
logger.info('sha=%s' % headSha())
logger.info('start')
# Only allow one instance of the program to run at a time
pm = hifi_vcpkg.VcpkgRepo(args)
with hifi_singleton.Singleton(pm.lockFile) as lock:
with timer('Bootstraping'):
if not pm.upToDate():
pm.bootstrap()
# Always write the tag, even if we changed nothing. This
# allows vcpkg to reclaim disk space by identifying directories with
# tags that haven't been touched in a long time
pm.writeTag()
# Grab our required dependencies:
# * build host tools, like spirv-cross and scribe
# * build client dependencies like openssl and nvtt
with timer('Setting up dependencies'):
pm.setupDependencies()
# wipe out the build directories (after writing the tag, since failure
# here shouldn't invalidte the vcpkg install)
with timer('Cleaning builds'):
pm.cleanBuilds()
# If we're running in android mode, we also need to grab a bunch of additional binaries
# (this logic is all migrated from the old setupDependencies tasks in gradle)
if args.android:
# Find the target location
appPath = hifi_utils.scriptRelative('android/apps/' + args.android)
# Copy the non-Qt libraries specified in the config in hifi_android.py
hifi_android.copyAndroidLibs(pm.androidPackagePath, appPath)
# Determine the Qt package path
qtPath = os.path.join(pm.androidPackagePath, 'qt')
hifi_android.QtPackager(appPath, qtPath).bundle()
# Write the vcpkg config to the build directory last
with timer('Writing configuration'):
pm.writeConfig()
logger.info('end')
print(sys.argv)
main()
|
py | b401c987781e63e90684e485ee74f7b1a73543fd | #!/usr/bin/env python3
import json
import sys
from optparse import OptionParser
from zabbix.api import ZabbixAPI
from pyzabbix.api import ZabbixAPIException
from six.moves.urllib.error import URLError
# This constant describes 'script' value of 'type' property in the MediaType,
# which is specified in the Zabbix API specification.
SCRIPT_MEDIA_TYPE = '1'
# This is a constant for the metadata of MediaType to be registered
ST2_DISPATCHER_SCRIPT = 'st2_dispatch.py'
ST2_ACTION_NAME = 'Dispatching to StackStorm'
def get_options():
parser = OptionParser()
parser.add_option('-z', '--zabbix-url', dest="z_url",
help="The URL of Zabbix Server")
parser.add_option('-u', '--username', dest="z_userid", default='Admin',
help="Login username to login Zabbix Server")
parser.add_option('-p', '--password', dest="z_passwd", default='zabbix',
help="Password which is associated with the username")
parser.add_option('-s', '--sendto', dest="z_sendto", default='Admin',
help="Address, user name or other identifier of the recipient")
(options, args) = parser.parse_args()
if not options.z_url:
parser.error('Zabbix Server URL is not given')
return (options, args)
def is_already_registered_mediatype(client, options):
"""
This method checks target MediaType has already been registered, or not.
"""
for mtype in client.mediatype.get():
if mtype['type'] == SCRIPT_MEDIA_TYPE and mtype['exec_path'] == ST2_DISPATCHER_SCRIPT:
return mtype['mediatypeid']
def is_already_registered_action(client, options):
"""
This method checks target Action has already been registered, or not.
"""
for action in client.action.get():
if action['name'] == ST2_ACTION_NAME:
return action['actionid']
def register_media_type(client, options, mediatype_id=None):
"""
This method registers a MediaType which dispatches alert to the StackStorm.
"""
mediatype_args = [
'-- CHANGE ME : api_url (e.g. https://st2-node/api/v1)',
'-- CHANGE ME : auth_url (e.g. https://st2-node/auth/v1)',
'-- CHANGE ME : login uername of StackStorm --',
'-- CHANGE ME : login password of StackStorm --',
'{ALERT.SENDTO}',
'{ALERT.SUBJECT}',
'{ALERT.MESSAGE}',
]
# send request to register a new MediaType for StackStorm
params = {
'description': 'StackStorm',
'type': SCRIPT_MEDIA_TYPE,
'exec_path': ST2_DISPATCHER_SCRIPT,
'exec_params': "\n".join(mediatype_args) + "\n",
}
if mediatype_id:
params['mediatypeid'] = mediatype_id
ret = client.mediatype.update(**params)
else:
ret = client.mediatype.create(**params)
return ret['mediatypeids'][0]
def register_action(client, mediatype_id, options, action_id=None):
if action_id:
client.action.delete(action_id)
return client.action.create(**{
'name': ST2_ACTION_NAME,
'esc_period': 360,
'eventsource': 0, # means event created by a trigger
'def_shortdata': '{TRIGGER.STATUS}: {TRIGGER.NAME}',
'def_longdata': json.dumps({
'event': {
'id': '{EVENT.ID}',
'time': '{EVENT.TIME}',
},
'trigger': {
'id': '{TRIGGER.ID}',
'name': '{TRIGGER.NAME}',
'status': '{TRIGGER.STATUS}',
},
'items': [{
'name': '{ITEM.NAME%s}' % index,
'host': '{HOST.NAME%s}' % index,
'key': '{ITEM.KEY%s}' % index,
'value': '{ITEM.VALUE%s}' % index
} for index in range(1, 9)],
}),
'operations': [{
"operationtype": 0,
"esc_period": 0,
"esc_step_from": 1,
"esc_step_to": 1,
"evaltype": 0,
"opmessage_usr": [{"userid": "1"}],
"opmessage": {
"default_msg": 1,
"mediatypeid": mediatype_id,
}
}]
})
def register_media_to_admin(client, mediatype_id, options):
major_version = int(client.apiinfo.version()[0])
if major_version >= 4:
# This is because user.addmedia api was removed from Zabbix 4.0.
return client.user.update(**{
"userid": "1",
"user_medias": [{
"mediatypeid": mediatype_id,
"sendto": options.z_sendto,
"active": "0",
"severity": "63",
"period": "1-7,00:00-24:00",
}]
})
else:
return client.user.addmedia(**{
"users": [
{"userid": "1"},
],
"medias": {
"mediatypeid": mediatype_id,
"sendto": options.z_sendto,
"active": "0",
"severity": "63",
"period": "1-7,00:00-24:00",
}
})
def main():
(options, _) = get_options()
try:
client = ZabbixAPI(url=options.z_url,
user=options.z_userid,
password=options.z_passwd)
except URLError as e:
sys.exit('Failed to connect Zabbix server (%s)' % e)
except ZabbixAPIException as e:
sys.exit('Failed to authenticate Zabbix (%s)' % e)
# get ID of MediaType for StackStorm if it exists, or None.
mediatype_id = is_already_registered_mediatype(client, options)
# register a new MediaType or update one which is already registered to dispatch events
# to the StackStorm
mediatype_id = register_media_type(client, options, mediatype_id)
# get ID of Action for StackStorm if it exists, or None.
action_id = is_already_registered_action(client, options)
# register a Action which is associated with the registered MediaType
register_action(client, mediatype_id, options, action_id)
# register a Media to the Admin user
register_media_to_admin(client, mediatype_id, options)
print('Success to register the configurations for StackStorm to the Zabbix Server.')
if __name__ == '__main__':
main()
|
py | b401c9a7c1825f42d3e2783541ea9120d824d277 | #!/usr/bin/env python3
import sys
data = [x.strip() for x in open(sys.argv[1], 'r').readlines()]
slope = (3, 1) # right, down
numRows = len(data) # changes between the example and actual input
numCols = len(data[0]) # ^
# m = y2 - y1 / x2 - x1 -> basically y/x
trees = 0
x = 0
for y in range(0, numRows, slope[1]): # -> step right by factor of right (in this case, step right by a factor)
if data[y][x] == '#': # this is not a 2d array, it is checking the y and x-axis for a "tree"
trees += 1
x = (x + slope[0]) % numCols # allow for "looping" around on the x-axis
print(f"trees: {trees}")
|
py | b401ca057d813e2bf357e0b20b5bad2d0d1d58a0 | import sys
from os.path import join
from numpy import mean
import argparse
chroms = set(['chr'+str(i) for i in range(1,23)] + ['chrY', 'chrX', 'chrM'])
if __name__ == '__main__' :
parser = argparse.ArgumentParser()
parser.add_argument('--anno_path', help='Path to GENCODE GTF annotation file')
parser.add_argument('--intron_path', help='Path to a BED file of introns')
parser.add_argument('--usage_dir', help='Directory with 5\' and 3\' usage data files')
parser.add_argument('--sample', help='Name of the sample from which the usage data was calculated')
parser.add_argument('--out_dir', help='Where to output the average usage file to')
args = parser.parse_args()
anno_path, intron_path, usage_dir, sample, out_dir = args.anno_path, args.intron_path, args.usage_dir, args.sample, args.out_dir
#Parse introns from BED file of GENCODE annotation obtained from UCSC table browser
tx_introns = {}
with open(intron_path) as in_file:
for line in in_file:
chrom, start, end, info, _, strand = line.strip().split('\t')
if chrom in chroms:
tx_id, intron = info.split('.')[0], (int(start), int(end)+1)
if tx_id not in tx_introns:
tx_introns[tx_id] = [intron]
else:
tx_introns[tx_id].append(intron)
#Map gene IDs to the transcript IDs of each intron set
gene_to_tx = {}
with open(anno_path) as in_file:
for line in in_file:
chrom, _, entry_type, start, end, _, strand, _, info = line.strip().split('\t')
if entry_type == 'transcript' and chrom in chroms:
info_pairs = info.split('; ')[:-1]
values = set([e.split(' ')[1].strip('\"') for e in info_pairs])
info_dict = {e.split(' ')[0]:e.split(' ')[1].strip('\"') for e in info_pairs}
gene_id, tx_id = info_dict['gene_id'].split('.')[0], info_dict['transcript_id'].split('.')[0]
if tx_id in tx_introns and 'appris_principal_1' in values:
if gene_id not in gene_to_tx:
gene_to_tx[gene_id] = {'tx':[tx_id], 'info':(chrom, strand)}
else:
gene_to_tx[gene_id]['tx'].append(tx_id)
#Identify the longest transcript in the gene
gene_longest_tx = {gene:max(gene_to_tx[gene]['tx'], key=lambda t:len(tx_introns[t])) for gene in gene_to_tx}
#Parse splice site usage data
usages = {ss_type:{chrom:{'+':{}, '-':{}} for chrom in chroms} for ss_type in ['3p', '5p']}
for ss_type in usages:
with open(join(usage_dir, '_'.join([sample, ss_type, 'ss_usage.txt']))) as in_file:
for line in in_file:
chrom, site, strand, _, _, usage = line.strip().split('\t')
if chrom != 'Chrom':
usages[ss_type][chrom][strand][int(site)] = float(usage)
#Calculate average usage for the gene's longest transcript
gene_usage_by_introns = []
for gene in gene_longest_tx:
max_tx = gene_longest_tx[gene]
intron_num = len(tx_introns[max_tx])
avg_usage = []
chrom, strand = gene_to_tx[gene]['info']
for intron_start, intron_end in tx_introns[max_tx]:
if strand == '+':
fivep_site, threep_site = intron_start, intron_end
else:
fivep_site, threep_site = intron_end, intron_start
if fivep_site in usages['5p'][chrom][strand] and threep_site in usages['3p'][chrom][strand]:
avg_usage.append(usages['5p'][chrom][strand][fivep_site])
avg_usage.append(usages['3p'][chrom][strand][threep_site])
#Only output data for transcripts that have usage for each splice site in every intron
if len(avg_usage) >= 2.0*len(tx_introns[max_tx]):
gene_usage_by_introns.append((gene, chrom, strand, str(mean(avg_usage)), str(intron_num)))
with open(join(out_dir, '{}_gene_usage_intron_primary_tx.txt'.format(sample)), 'w') as out_file:
out_file.write('Ensembl_ID\tChrom\tStrand\tAvg_usage\tIntron_num\n')
for i in range(len(gene_usage_by_introns)):
out_file.write('\t'.join(gene_usage_by_introns[i]) + '\n')
|
py | b401ca7d1662a4ec2ab8bf87eb4f471f0e7bc4a7 | #!/usr/bin/python
# -*- coding: utf-8 -*-
######################################################################################################################
# Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES #
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions #
# and limitations under the License. #
######################################################################################################################
import base64
import logging
import boto3
from custom.custom_base import Custom
log = logging.getLogger()
log.setLevel(logging.INFO)
class Sagemaker(Custom):
def __init__(self, event, context, config_name, s3_bucket=None, s3_prefix_artifacts=None):
super().__init__(event, context,s3_bucket,s3_prefix_artifacts)
self.sage = boto3.client('sagemaker')
self.s3 = boto3.client('s3')
self.create_config = event["ResourceProperties"]["CreateConfig"]
self.s3_prefix_artifacts = s3_prefix_artifacts
self.s3_bucket = s3_bucket
self.config_name = config_name
def __call__(self):
if self.create_config == "true":
self.create_lifecycle_config()
return {'PhysicalResourceId': self.event["LogicalResourceId"]}
def __delete__(self):
self.delete_lifecycle_config()
def base64_encode(self,bucket,key):
try:
obj = self.s3.get_object(Bucket=bucket, Key=key)
encoded = base64.b64encode(obj['Body'].read())
return encoded.decode('utf-8')
except Exception as e:
print(e)
print(
'Error getting object {} from bucket {}. Make sure they exist and your bucket is in the same region as this function.'.format(
self.key, self.bucket))
raise e
def create_lifecycle_config(self):
try:
bucket = self.s3_bucket
artifacts = super().get_artifactJson()
key = "{}/scripts/sagemaker-script/{}".format(self.s3_prefix_artifacts, artifacts['artifacts']['configs']['sagemaker'])
custom_script = self.base64_encode(bucket,key)
response = self.sage.create_notebook_instance_lifecycle_config(
NotebookInstanceLifecycleConfigName=self.config_name,
OnCreate=[
{
'Content': custom_script
},
]
)
log.info('Response = %s', response)
except Exception as e:
print('An error occurred: {}.'.format(e))
raise e
return response
def delete_lifecycle_config(self):
try:
response = self.sage.delete_notebook_instance_lifecycle_config(
NotebookInstanceLifecycleConfigName=self.config_name
)
log.info('Response = %s', response)
except Exception as e:
print('No Config or An error occurred: {}.'.format(e))
raise e
|
py | b401cf79c72b5c063cbac2ed4944ebe43533b53c | """Test script for ftplib module."""
# Modified by Giampaolo Rodola' to test FTP class, IPv6 and TLS
# environment
import ftplib
import asyncore
import asynchat
import socket
import io
import errno
import os
import time
try:
import ssl
except ImportError:
ssl = None
HAS_SNI = False
else:
from ssl import HAS_SNI
from unittest import TestCase, skipUnless
from test import support
from test.support import HOST, HOSTv6
threading = support.import_module('threading')
TIMEOUT = 3
# the dummy data returned by server over the data channel when
# RETR, LIST, NLST, MLSD commands are issued
RETR_DATA = 'abcde12345\r\n' * 1000
LIST_DATA = 'foo\r\nbar\r\n'
NLST_DATA = 'foo\r\nbar\r\n'
MLSD_DATA = ("type=cdir;perm=el;unique==keVO1+ZF4; test\r\n"
"type=pdir;perm=e;unique==keVO1+d?3; ..\r\n"
"type=OS.unix=slink:/foobar;perm=;unique==keVO1+4G4; foobar\r\n"
"type=OS.unix=chr-13/29;perm=;unique==keVO1+5G4; device\r\n"
"type=OS.unix=blk-11/108;perm=;unique==keVO1+6G4; block\r\n"
"type=file;perm=awr;unique==keVO1+8G4; writable\r\n"
"type=dir;perm=cpmel;unique==keVO1+7G4; promiscuous\r\n"
"type=dir;perm=;unique==keVO1+1t2; no-exec\r\n"
"type=file;perm=r;unique==keVO1+EG4; two words\r\n"
"type=file;perm=r;unique==keVO1+IH4; leading space\r\n"
"type=file;perm=r;unique==keVO1+1G4; file1\r\n"
"type=dir;perm=cpmel;unique==keVO1+7G4; incoming\r\n"
"type=file;perm=r;unique==keVO1+1G4; file2\r\n"
"type=file;perm=r;unique==keVO1+1G4; file3\r\n"
"type=file;perm=r;unique==keVO1+1G4; file4\r\n")
class DummyDTPHandler(asynchat.async_chat):
dtp_conn_closed = False
def __init__(self, conn, baseclass):
asynchat.async_chat.__init__(self, conn)
self.baseclass = baseclass
self.baseclass.last_received_data = ''
def handle_read(self):
self.baseclass.last_received_data += self.recv(1024).decode('ascii')
def handle_close(self):
# XXX: this method can be called many times in a row for a single
# connection, including in clear-text (non-TLS) mode.
# (behaviour witnessed with test_data_connection)
if not self.dtp_conn_closed:
self.baseclass.push('226 transfer complete')
self.close()
self.dtp_conn_closed = True
def push(self, what):
if self.baseclass.next_data is not None:
what = self.baseclass.next_data
self.baseclass.next_data = None
if not what:
return self.close_when_done()
super(DummyDTPHandler, self).push(what.encode('ascii'))
def handle_error(self):
raise Exception
class DummyFTPHandler(asynchat.async_chat):
dtp_handler = DummyDTPHandler
def __init__(self, conn):
asynchat.async_chat.__init__(self, conn)
# tells the socket to handle urgent data inline (ABOR command)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_OOBINLINE, 1)
self.set_terminator(b"\r\n")
self.in_buffer = []
self.dtp = None
self.last_received_cmd = None
self.last_received_data = ''
self.next_response = ''
self.next_data = None
self.rest = None
self.next_retr_data = RETR_DATA
self.push('220 welcome')
def collect_incoming_data(self, data):
self.in_buffer.append(data)
def found_terminator(self):
line = b''.join(self.in_buffer).decode('ascii')
self.in_buffer = []
if self.next_response:
self.push(self.next_response)
self.next_response = ''
cmd = line.split(' ')[0].lower()
self.last_received_cmd = cmd
space = line.find(' ')
if space != -1:
arg = line[space + 1:]
else:
arg = ""
if hasattr(self, 'cmd_' + cmd):
method = getattr(self, 'cmd_' + cmd)
method(arg)
else:
self.push('550 command "%s" not understood.' %cmd)
def handle_error(self):
raise Exception
def push(self, data):
asynchat.async_chat.push(self, data.encode('ascii') + b'\r\n')
def cmd_port(self, arg):
addr = list(map(int, arg.split(',')))
ip = '%d.%d.%d.%d' %tuple(addr[:4])
port = (addr[4] * 256) + addr[5]
s = socket.create_connection((ip, port), timeout=TIMEOUT)
self.dtp = self.dtp_handler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_pasv(self, arg):
with socket.socket() as sock:
sock.bind((self.socket.getsockname()[0], 0))
sock.listen()
sock.settimeout(TIMEOUT)
ip, port = sock.getsockname()[:2]
ip = ip.replace('.', ','); p1 = port / 256; p2 = port % 256
self.push('227 entering passive mode (%s,%d,%d)' %(ip, p1, p2))
conn, addr = sock.accept()
self.dtp = self.dtp_handler(conn, baseclass=self)
def cmd_eprt(self, arg):
af, ip, port = arg.split(arg[0])[1:-1]
port = int(port)
s = socket.create_connection((ip, port), timeout=TIMEOUT)
self.dtp = self.dtp_handler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_epsv(self, arg):
with socket.socket(socket.AF_INET6) as sock:
sock.bind((self.socket.getsockname()[0], 0))
sock.listen()
sock.settimeout(TIMEOUT)
port = sock.getsockname()[1]
self.push('229 entering extended passive mode (|||%d|)' %port)
conn, addr = sock.accept()
self.dtp = self.dtp_handler(conn, baseclass=self)
def cmd_echo(self, arg):
# sends back the received string (used by the test suite)
self.push(arg)
def cmd_noop(self, arg):
self.push('200 noop ok')
def cmd_user(self, arg):
self.push('331 username ok')
def cmd_pass(self, arg):
self.push('230 password ok')
def cmd_acct(self, arg):
self.push('230 acct ok')
def cmd_rnfr(self, arg):
self.push('350 rnfr ok')
def cmd_rnto(self, arg):
self.push('250 rnto ok')
def cmd_dele(self, arg):
self.push('250 dele ok')
def cmd_cwd(self, arg):
self.push('250 cwd ok')
def cmd_size(self, arg):
self.push('250 1000')
def cmd_mkd(self, arg):
self.push('257 "%s"' %arg)
def cmd_rmd(self, arg):
self.push('250 rmd ok')
def cmd_pwd(self, arg):
self.push('257 "pwd ok"')
def cmd_type(self, arg):
self.push('200 type ok')
def cmd_quit(self, arg):
self.push('221 quit ok')
self.close()
def cmd_abor(self, arg):
self.push('226 abor ok')
def cmd_stor(self, arg):
self.push('125 stor ok')
def cmd_rest(self, arg):
self.rest = arg
self.push('350 rest ok')
def cmd_retr(self, arg):
self.push('125 retr ok')
if self.rest is not None:
offset = int(self.rest)
else:
offset = 0
self.dtp.push(self.next_retr_data[offset:])
self.dtp.close_when_done()
self.rest = None
def cmd_list(self, arg):
self.push('125 list ok')
self.dtp.push(LIST_DATA)
self.dtp.close_when_done()
def cmd_nlst(self, arg):
self.push('125 nlst ok')
self.dtp.push(NLST_DATA)
self.dtp.close_when_done()
def cmd_opts(self, arg):
self.push('200 opts ok')
def cmd_mlsd(self, arg):
self.push('125 mlsd ok')
self.dtp.push(MLSD_DATA)
self.dtp.close_when_done()
def cmd_setlongretr(self, arg):
# For testing. Next RETR will return long line.
self.next_retr_data = 'x' * int(arg)
self.push('125 setlongretr ok')
class DummyFTPServer(asyncore.dispatcher, threading.Thread):
handler = DummyFTPHandler
def __init__(self, address, af=socket.AF_INET):
threading.Thread.__init__(self)
asyncore.dispatcher.__init__(self)
self.create_socket(af, socket.SOCK_STREAM)
self.bind(address)
self.listen(5)
self.active = False
self.active_lock = threading.Lock()
self.host, self.port = self.socket.getsockname()[:2]
self.handler_instance = None
def start(self):
assert not self.active
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def run(self):
self.active = True
self.__flag.set()
while self.active and asyncore.socket_map:
self.active_lock.acquire()
asyncore.loop(timeout=0.1, count=1)
self.active_lock.release()
asyncore.close_all(ignore_all=True)
def stop(self):
assert self.active
self.active = False
self.join()
def handle_accepted(self, conn, addr):
self.handler_instance = self.handler(conn)
def handle_connect(self):
self.close()
handle_read = handle_connect
def writable(self):
return 0
def handle_error(self):
raise Exception
if ssl is not None:
CERTFILE = os.path.join(os.path.dirname(__file__), "keycert3.pem")
CAFILE = os.path.join(os.path.dirname(__file__), "pycacert.pem")
class SSLConnection(asyncore.dispatcher):
"""An asyncore.dispatcher subclass supporting TLS/SSL."""
_ssl_accepting = False
_ssl_closing = False
def secure_connection(self):
socket = ssl.wrap_socket(self.socket, suppress_ragged_eofs=False,
certfile=CERTFILE, server_side=True,
do_handshake_on_connect=False,
ssl_version=ssl.PROTOCOL_SSLv23)
self.del_channel()
self.set_socket(socket)
self._ssl_accepting = True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
elif err.args[0] == ssl.SSL_ERROR_EOF:
return self.handle_close()
raise
except OSError as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def _do_ssl_shutdown(self):
self._ssl_closing = True
try:
self.socket = self.socket.unwrap()
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
except OSError as err:
# Any "socket error" corresponds to a SSL_ERROR_SYSCALL return
# from OpenSSL's SSL_shutdown(), corresponding to a
# closed socket condition. See also:
# http://www.mail-archive.com/[email protected]/msg60710.html
pass
self._ssl_closing = False
if getattr(self, '_ccc', False) is False:
super(SSLConnection, self).close()
else:
pass
def handle_read_event(self):
if self._ssl_accepting:
self._do_ssl_handshake()
elif self._ssl_closing:
self._do_ssl_shutdown()
else:
super(SSLConnection, self).handle_read_event()
def handle_write_event(self):
if self._ssl_accepting:
self._do_ssl_handshake()
elif self._ssl_closing:
self._do_ssl_shutdown()
else:
super(SSLConnection, self).handle_write_event()
def send(self, data):
try:
return super(SSLConnection, self).send(data)
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN,
ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return 0
raise
def recv(self, buffer_size):
try:
return super(SSLConnection, self).recv(buffer_size)
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return b''
if err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN):
self.handle_close()
return b''
raise
def handle_error(self):
raise Exception
def close(self):
if (isinstance(self.socket, ssl.SSLSocket) and
self.socket._sslobj is not None):
self._do_ssl_shutdown()
else:
super(SSLConnection, self).close()
class DummyTLS_DTPHandler(SSLConnection, DummyDTPHandler):
"""A DummyDTPHandler subclass supporting TLS/SSL."""
def __init__(self, conn, baseclass):
DummyDTPHandler.__init__(self, conn, baseclass)
if self.baseclass.secure_data_channel:
self.secure_connection()
class DummyTLS_FTPHandler(SSLConnection, DummyFTPHandler):
"""A DummyFTPHandler subclass supporting TLS/SSL."""
dtp_handler = DummyTLS_DTPHandler
def __init__(self, conn):
DummyFTPHandler.__init__(self, conn)
self.secure_data_channel = False
self._ccc = False
def cmd_auth(self, line):
"""Set up secure control channel."""
self.push('234 AUTH TLS successful')
self.secure_connection()
def cmd_ccc(self, line):
self.push('220 Reverting back to clear-text')
self._ccc = True
self._do_ssl_shutdown()
def cmd_pbsz(self, line):
"""Negotiate size of buffer for secure data transfer.
For TLS/SSL the only valid value for the parameter is '0'.
Any other value is accepted but ignored.
"""
self.push('200 PBSZ=0 successful.')
def cmd_prot(self, line):
"""Setup un/secure data channel."""
arg = line.upper()
if arg == 'C':
self.push('200 Protection set to Clear')
self.secure_data_channel = False
elif arg == 'P':
self.push('200 Protection set to Private')
self.secure_data_channel = True
else:
self.push("502 Unrecognized PROT type (use C or P).")
class DummyTLS_FTPServer(DummyFTPServer):
handler = DummyTLS_FTPHandler
class TestFTPClass(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP(timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def check_data(self, received, expected):
self.assertEqual(len(received), len(expected))
self.assertEqual(received, expected)
def test_getwelcome(self):
self.assertEqual(self.client.getwelcome(), '220 welcome')
def test_sanitize(self):
self.assertEqual(self.client.sanitize('foo'), repr('foo'))
self.assertEqual(self.client.sanitize('pass 12345'), repr('pass *****'))
self.assertEqual(self.client.sanitize('PASS 12345'), repr('PASS *****'))
def test_exceptions(self):
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 400')
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 499')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 500')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 599')
self.assertRaises(ftplib.error_proto, self.client.sendcmd, 'echo 999')
def test_all_errors(self):
exceptions = (ftplib.error_reply, ftplib.error_temp, ftplib.error_perm,
ftplib.error_proto, ftplib.Error, OSError, EOFError)
for x in exceptions:
try:
raise x('exception not included in all_errors set')
except ftplib.all_errors:
pass
def test_set_pasv(self):
# passive mode is supposed to be enabled by default
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(True)
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(False)
self.assertFalse(self.client.passiveserver)
def test_voidcmd(self):
self.client.voidcmd('echo 200')
self.client.voidcmd('echo 299')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 199')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 300')
def test_login(self):
self.client.login()
def test_acct(self):
self.client.acct('passwd')
def test_rename(self):
self.client.rename('a', 'b')
self.server.handler_instance.next_response = '200'
self.assertRaises(ftplib.error_reply, self.client.rename, 'a', 'b')
def test_delete(self):
self.client.delete('foo')
self.server.handler_instance.next_response = '199'
self.assertRaises(ftplib.error_reply, self.client.delete, 'foo')
def test_size(self):
self.client.size('foo')
def test_mkd(self):
dir = self.client.mkd('/foo')
self.assertEqual(dir, '/foo')
def test_rmd(self):
self.client.rmd('foo')
def test_cwd(self):
dir = self.client.cwd('/foo')
self.assertEqual(dir, '250 cwd ok')
def test_pwd(self):
dir = self.client.pwd()
self.assertEqual(dir, 'pwd ok')
def test_quit(self):
self.assertEqual(self.client.quit(), '221 quit ok')
# Ensure the connection gets closed; sock attribute should be None
self.assertEqual(self.client.sock, None)
def test_abort(self):
self.client.abort()
def test_retrbinary(self):
def callback(data):
received.append(data.decode('ascii'))
received = []
self.client.retrbinary('retr', callback)
self.check_data(''.join(received), RETR_DATA)
def test_retrbinary_rest(self):
def callback(data):
received.append(data.decode('ascii'))
for rest in (0, 10, 20):
received = []
self.client.retrbinary('retr', callback, rest=rest)
self.check_data(''.join(received), RETR_DATA[rest:])
def test_retrlines(self):
received = []
self.client.retrlines('retr', received.append)
self.check_data(''.join(received), RETR_DATA.replace('\r\n', ''))
def test_storbinary(self):
f = io.BytesIO(RETR_DATA.encode('ascii'))
self.client.storbinary('stor', f)
self.check_data(self.server.handler_instance.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storbinary('stor', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
def test_storbinary_rest(self):
f = io.BytesIO(RETR_DATA.replace('\r\n', '\n').encode('ascii'))
for r in (30, '30'):
f.seek(0)
self.client.storbinary('stor', f, rest=r)
self.assertEqual(self.server.handler_instance.rest, str(r))
def test_storlines(self):
f = io.BytesIO(RETR_DATA.replace('\r\n', '\n').encode('ascii'))
self.client.storlines('stor', f)
self.check_data(self.server.handler_instance.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storlines('stor foo', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
f = io.StringIO(RETR_DATA.replace('\r\n', '\n'))
# storlines() expects a binary file, not a text file
with support.check_warnings(('', BytesWarning), quiet=True):
self.assertRaises(TypeError, self.client.storlines, 'stor foo', f)
def test_nlst(self):
self.client.nlst()
self.assertEqual(self.client.nlst(), NLST_DATA.split('\r\n')[:-1])
def test_dir(self):
l = []
self.client.dir(lambda x: l.append(x))
self.assertEqual(''.join(l), LIST_DATA.replace('\r\n', ''))
def test_mlsd(self):
list(self.client.mlsd())
list(self.client.mlsd(path='/'))
list(self.client.mlsd(path='/', facts=['size', 'type']))
ls = list(self.client.mlsd())
for name, facts in ls:
self.assertIsInstance(name, str)
self.assertIsInstance(facts, dict)
self.assertTrue(name)
self.assertIn('type', facts)
self.assertIn('perm', facts)
self.assertIn('unique', facts)
def set_data(data):
self.server.handler_instance.next_data = data
def test_entry(line, type=None, perm=None, unique=None, name=None):
type = 'type' if type is None else type
perm = 'perm' if perm is None else perm
unique = 'unique' if unique is None else unique
name = 'name' if name is None else name
set_data(line)
_name, facts = next(self.client.mlsd())
self.assertEqual(_name, name)
self.assertEqual(facts['type'], type)
self.assertEqual(facts['perm'], perm)
self.assertEqual(facts['unique'], unique)
# plain
test_entry('type=type;perm=perm;unique=unique; name\r\n')
# "=" in fact value
test_entry('type=ty=pe;perm=perm;unique=unique; name\r\n', type="ty=pe")
test_entry('type==type;perm=perm;unique=unique; name\r\n', type="=type")
test_entry('type=t=y=pe;perm=perm;unique=unique; name\r\n', type="t=y=pe")
test_entry('type=====;perm=perm;unique=unique; name\r\n', type="====")
# spaces in name
test_entry('type=type;perm=perm;unique=unique; na me\r\n', name="na me")
test_entry('type=type;perm=perm;unique=unique; name \r\n', name="name ")
test_entry('type=type;perm=perm;unique=unique; name\r\n', name=" name")
test_entry('type=type;perm=perm;unique=unique; n am e\r\n', name="n am e")
# ";" in name
test_entry('type=type;perm=perm;unique=unique; na;me\r\n', name="na;me")
test_entry('type=type;perm=perm;unique=unique; ;name\r\n', name=";name")
test_entry('type=type;perm=perm;unique=unique; ;name;\r\n', name=";name;")
test_entry('type=type;perm=perm;unique=unique; ;;;;\r\n', name=";;;;")
# case sensitiveness
set_data('Type=type;TyPe=perm;UNIQUE=unique; name\r\n')
_name, facts = next(self.client.mlsd())
for x in facts:
self.assertTrue(x.islower())
# no data (directory empty)
set_data('')
self.assertRaises(StopIteration, next, self.client.mlsd())
set_data('')
for x in self.client.mlsd():
self.fail("unexpected data %s" % x)
def test_makeport(self):
with self.client.makeport():
# IPv4 is in use, just make sure send_eprt has not been used
self.assertEqual(self.server.handler_instance.last_received_cmd,
'port')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), timeout=TIMEOUT)
conn.close()
# IPv4 is in use, just make sure send_epsv has not been used
self.assertEqual(self.server.handler_instance.last_received_cmd, 'pasv')
def test_with_statement(self):
self.client.quit()
def is_client_connected():
if self.client.sock is None:
return False
try:
self.client.sendcmd('noop')
except (OSError, EOFError):
return False
return True
# base test
with ftplib.FTP(timeout=TIMEOUT) as self.client:
self.client.connect(self.server.host, self.server.port)
self.client.sendcmd('noop')
self.assertTrue(is_client_connected())
self.assertEqual(self.server.handler_instance.last_received_cmd, 'quit')
self.assertFalse(is_client_connected())
# QUIT sent inside the with block
with ftplib.FTP(timeout=TIMEOUT) as self.client:
self.client.connect(self.server.host, self.server.port)
self.client.sendcmd('noop')
self.client.quit()
self.assertEqual(self.server.handler_instance.last_received_cmd, 'quit')
self.assertFalse(is_client_connected())
# force a wrong response code to be sent on QUIT: error_perm
# is expected and the connection is supposed to be closed
try:
with ftplib.FTP(timeout=TIMEOUT) as self.client:
self.client.connect(self.server.host, self.server.port)
self.client.sendcmd('noop')
self.server.handler_instance.next_response = '550 error on quit'
except ftplib.error_perm as err:
self.assertEqual(str(err), '550 error on quit')
else:
self.fail('Exception not raised')
# needed to give the threaded server some time to set the attribute
# which otherwise would still be == 'noop'
time.sleep(0.1)
self.assertEqual(self.server.handler_instance.last_received_cmd, 'quit')
self.assertFalse(is_client_connected())
def test_source_address(self):
self.client.quit()
port = support.find_unused_port()
try:
self.client.connect(self.server.host, self.server.port,
source_address=(HOST, port))
self.assertEqual(self.client.sock.getsockname()[1], port)
self.client.quit()
except OSError as e:
if e.errno == errno.EADDRINUSE:
self.skipTest("couldn't bind to port %d" % port)
raise
def test_source_address_passive_connection(self):
port = support.find_unused_port()
self.client.source_address = (HOST, port)
try:
with self.client.transfercmd('list') as sock:
self.assertEqual(sock.getsockname()[1], port)
except OSError as e:
if e.errno == errno.EADDRINUSE:
self.skipTest("couldn't bind to port %d" % port)
raise
def test_parse257(self):
self.assertEqual(ftplib.parse257('257 "/foo/bar"'), '/foo/bar')
self.assertEqual(ftplib.parse257('257 "/foo/bar" created'), '/foo/bar')
self.assertEqual(ftplib.parse257('257 ""'), '')
self.assertEqual(ftplib.parse257('257 "" created'), '')
self.assertRaises(ftplib.error_reply, ftplib.parse257, '250 "/foo/bar"')
# The 257 response is supposed to include the directory
# name and in case it contains embedded double-quotes
# they must be doubled (see RFC-959, chapter 7, appendix 2).
self.assertEqual(ftplib.parse257('257 "/foo/b""ar"'), '/foo/b"ar')
self.assertEqual(ftplib.parse257('257 "/foo/b""ar" created'), '/foo/b"ar')
def test_line_too_long(self):
self.assertRaises(ftplib.Error, self.client.sendcmd,
'x' * self.client.maxline * 2)
def test_retrlines_too_long(self):
self.client.sendcmd('SETLONGRETR %d' % (self.client.maxline * 2))
received = []
self.assertRaises(ftplib.Error,
self.client.retrlines, 'retr', received.append)
def test_storlines_too_long(self):
f = io.BytesIO(b'x' * self.client.maxline * 2)
self.assertRaises(ftplib.Error, self.client.storlines, 'stor', f)
@skipUnless(support.IPV6_ENABLED, "IPv6 not enabled")
class TestIPv6Environment(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOSTv6, 0), af=socket.AF_INET6)
self.server.start()
self.client = ftplib.FTP(timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_af(self):
self.assertEqual(self.client.af, socket.AF_INET6)
def test_makeport(self):
with self.client.makeport():
self.assertEqual(self.server.handler_instance.last_received_cmd,
'eprt')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), timeout=TIMEOUT)
conn.close()
self.assertEqual(self.server.handler_instance.last_received_cmd, 'epsv')
def test_transfer(self):
def retr():
def callback(data):
received.append(data.decode('ascii'))
received = []
self.client.retrbinary('retr', callback)
self.assertEqual(len(''.join(received)), len(RETR_DATA))
self.assertEqual(''.join(received), RETR_DATA)
self.client.set_pasv(True)
retr()
self.client.set_pasv(False)
retr()
@skipUnless(ssl, "SSL not available")
class TestTLS_FTPClassMixin(TestFTPClass):
"""Repeat TestFTPClass tests starting the TLS layer for both control
and data connections first.
"""
def setUp(self):
self.server = DummyTLS_FTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP_TLS(timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
# enable TLS
self.client.auth()
self.client.prot_p()
@skipUnless(ssl, "SSL not available")
class TestTLS_FTPClass(TestCase):
"""Specific TLS_FTP class tests."""
def setUp(self):
self.server = DummyTLS_FTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP_TLS(timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_control_connection(self):
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.auth()
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
def test_data_connection(self):
# clear text
with self.client.transfercmd('list') as sock:
self.assertNotIsInstance(sock, ssl.SSLSocket)
self.assertEqual(self.client.voidresp(), "226 transfer complete")
# secured, after PROT P
self.client.prot_p()
with self.client.transfercmd('list') as sock:
self.assertIsInstance(sock, ssl.SSLSocket)
self.assertEqual(self.client.voidresp(), "226 transfer complete")
# PROT C is issued, the connection must be in cleartext again
self.client.prot_c()
with self.client.transfercmd('list') as sock:
self.assertNotIsInstance(sock, ssl.SSLSocket)
self.assertEqual(self.client.voidresp(), "226 transfer complete")
def test_login(self):
# login() is supposed to implicitly secure the control connection
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.login()
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
# make sure that AUTH TLS doesn't get issued again
self.client.login()
def test_auth_issued_twice(self):
self.client.auth()
self.assertRaises(ValueError, self.client.auth)
def test_auth_ssl(self):
try:
self.client.ssl_version = ssl.PROTOCOL_SSLv3
self.client.auth()
self.assertRaises(ValueError, self.client.auth)
finally:
self.client.ssl_version = ssl.PROTOCOL_TLSv1
def test_context(self):
self.client.quit()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(ValueError, ftplib.FTP_TLS, keyfile=CERTFILE,
context=ctx)
self.assertRaises(ValueError, ftplib.FTP_TLS, certfile=CERTFILE,
context=ctx)
self.assertRaises(ValueError, ftplib.FTP_TLS, certfile=CERTFILE,
keyfile=CERTFILE, context=ctx)
self.client = ftplib.FTP_TLS(context=ctx, timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.auth()
self.assertIs(self.client.sock.context, ctx)
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
self.client.prot_p()
with self.client.transfercmd('list') as sock:
self.assertIs(sock.context, ctx)
self.assertIsInstance(sock, ssl.SSLSocket)
def test_ccc(self):
self.assertRaises(ValueError, self.client.ccc)
self.client.login(secure=True)
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
self.client.ccc()
self.assertRaises(ValueError, self.client.sock.unwrap)
@skipUnless(HAS_SNI, 'No SNI support in ssl module')
def test_check_hostname(self):
self.client.quit()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.check_hostname = True
ctx.load_verify_locations(CAFILE)
self.client = ftplib.FTP_TLS(context=ctx, timeout=TIMEOUT)
# 127.0.0.1 doesn't match SAN
self.client.connect(self.server.host, self.server.port)
with self.assertRaises(ssl.CertificateError):
self.client.auth()
# exception quits connection
self.client.connect(self.server.host, self.server.port)
self.client.prot_p()
with self.assertRaises(ssl.CertificateError):
with self.client.transfercmd("list") as sock:
pass
self.client.quit()
self.client.connect("localhost", self.server.port)
self.client.auth()
self.client.quit()
self.client.connect("localhost", self.server.port)
self.client.prot_p()
with self.client.transfercmd("list") as sock:
pass
class TestTimeouts(TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(20)
self.port = support.bind_port(self.sock)
self.server_thread = threading.Thread(target=self.server)
self.server_thread.start()
# Wait for the server to be ready.
self.evt.wait()
self.evt.clear()
self.old_port = ftplib.FTP.port
ftplib.FTP.port = self.port
def tearDown(self):
ftplib.FTP.port = self.old_port
self.server_thread.join()
def server(self):
# This method sets the evt 3 times:
# 1) when the connection is ready to be accepted.
# 2) when it is safe for the caller to close the connection
# 3) when we have closed the socket
self.sock.listen()
# (1) Signal the caller that we are ready to accept the connection.
self.evt.set()
try:
conn, addr = self.sock.accept()
except socket.timeout:
pass
else:
conn.sendall(b"1 Hola mundo\n")
conn.shutdown(socket.SHUT_WR)
# (2) Signal the caller that it is safe to close the socket.
self.evt.set()
conn.close()
finally:
self.sock.close()
def testTimeoutDefault(self):
# default -- use global socket timeout
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP(HOST)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutNone(self):
# no timeout -- do not use global socket timeout
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP(HOST, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertIsNone(ftp.sock.gettimeout())
self.evt.wait()
ftp.close()
def testTimeoutValue(self):
# a value
ftp = ftplib.FTP(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutConnect(self):
ftp = ftplib.FTP()
ftp.connect(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDifferentOrder(self):
ftp = ftplib.FTP(timeout=30)
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDirectAccess(self):
ftp = ftplib.FTP()
ftp.timeout = 30
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def test_main():
tests = [TestFTPClass, TestTimeouts,
TestIPv6Environment,
TestTLS_FTPClassMixin, TestTLS_FTPClass]
thread_info = support.threading_setup()
try:
support.run_unittest(*tests)
finally:
support.threading_cleanup(*thread_info)
if __name__ == '__main__':
test_main()
|
py | b401cf910a6b8fe6a86de9e2556343804ebe9e04 | print('{:=^40}'.format(' LOJAS DANTAS '))
preco = float(input('Qual o preço do produto? R$'))
pagamento = int(input('escolha a forma de pagamento:\n1 - a vista\n2 - a vista no cartão\n3 - 2x no cartão\n4 - 3x no cartão\nopção:_'))
if pagamento == 1:
print('a vista, 10% de desconto, total a pagar R${:.2f}'.format(preco - preco*0.1))
elif pagamento == 2:
print('a vista no cartão, 5% de desconto, total a pagar R${:.2f}'.format(preco - preco*0.05))
elif pagamento == 3:
print('2x no cartão, preço normal sem juros R${:.2f}, parcelas de R${:.2f}'.format(preco, preco/2))
elif pagamento == 4:
print('3x no cartão, 20% de juros, total R${:.2f}, parcelas de R${:.2f}'.format(preco+preco*0.2,(preco+preco*0.2)/3 ))
else:
print('opção invalida de pagamento')
|
py | b401cf928040450de4cfc8d190d08ad3ecd1ab5d | """
Minimum Path Sum
Given a m x n grid filled with non-negative numbers,
find a path from top left to bottom right which minimizes the sum of all numbers along its path.
Note: You can only move either down or right at any point in time
"""
# dp
class Solution(object):
def minPathSum(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
if not grid:
return 0
m = len(grid[0])
n = len(grid)
for i in range(n - 1, -1, -1):
for j in range(m - 1, -1, -1):
if i == n - 1 and j == m - 1:
continue
if i == n - 1:
grid[i][j] = grid[i][j] + grid[i][j + 1]
elif j == m - 1:
grid[i][j] = grid[i][j] + grid[i + 1][j]
else:
grid[i][j] = grid[i][j] + min(grid[i + 1][j], grid[i][j + 1])
return grid[0][0]
# 递归 + cache
class Solution2(object):
def __init__(self):
self.cache = {}
def helper(self, grid, i, j, m, n):
if i == n - 1 and j == m - 1:
return grid[i][j]
key = str(i) + "_" + str(j)
if key in self.cache:
return self.cache[key]
value = None
if i == n - 1:
value = grid[i][j] + self.helper(grid, i, j + 1, m, n)
elif j == m - 1:
value = grid[i][j] + self.helper(grid, i + 1, j, m, n)
else:
value = grid[i][j] + min(self.helper(grid, i + 1, j, m, n), self.helper(grid, i, j + 1, m, n))
self.cache[key] = value
return value
def minPathSum(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
if not grid:
return 0
m = len(grid[0])
n = len(grid)
return self.helper(grid, 0, 0, m, n)
grid = [
[1,2,5],
[3,2,1]
]
s = Solution()
minSum = s.minPathSum(grid)
print(minSum)
|
py | b401d0162e4267a0ee47b4d703ee5a2de98e48a6 | #!/usr/bin/python
from .globals import DEBUG, DISABLE_RESIZE_SYSTEM
from .wgwidget import TEST_SETTINGS, ExhaustedTestInput, add_test_input_from_iterable, add_test_input_ch
from .npyssafewrapper import wrapper, wrapper_basic
from .npysThemeManagers import ThemeManager, disableColor, enableColor
from . import npysThemes as Themes
from .apNPSApplication import NPSApp
from .apNPSApplicationManaged import NPSAppManaged
from .proto_fm_screen_area import setTheme
from .fmForm import FormBaseNew, Form, TitleForm, TitleFooterForm, SplitForm, FormExpanded, FormBaseNewExpanded, blank_terminal
from .fmActionForm import ActionForm, ActionFormExpanded
from .fmActionFormV2 import ActionFormV2, ActionFormExpandedV2, ActionFormMinimal
from .fmFormWithMenus import FormWithMenus, ActionFormWithMenus, \
FormBaseNewWithMenus, SplitFormWithMenus, \
ActionFormV2WithMenus
from .fmPopup import Popup, MessagePopup, ActionPopup, PopupWide, ActionPopupWide
from .fmFormMutt import FormMutt, FormMuttWithMenus
from .fmFileSelector import FileSelector, selectFile
from .fmFormMuttActive import ActionControllerSimple, TextCommandBox, \
FormMuttActive, FormMuttActiveWithMenus
from .fmFormMuttActive import FormMuttActiveTraditional, FormMuttActiveTraditionalWithMenus
from .fmFormMultiPage import FormMultiPage, FormMultiPageAction,\
FormMultiPageActionWithMenus, FormMultiPageWithMenus
from .npysNPSFilteredData import NPSFilteredDataBase, NPSFilteredDataList
from .wgbutton import MiniButton
from .wgbutton import MiniButtonPress
from .wgbutton import MiniButton as Button
from .wgbutton import MiniButtonPress as ButtonPress
from .wgtextbox import Textfield, FixedText
from .wgtitlefield import TitleText, TitleFixedText
from .wgpassword import PasswordEntry, TitlePassword
from .wgannotatetextbox import AnnotateTextboxBase
from .wgannotatetextbox import AnnotateTextboxBaseRight
from .wgslider import Slider, TitleSlider
from .wgslider import SliderNoLabel, TitleSliderNoLabel
from .wgslider import SliderPercent, TitleSliderPercent
from .wgwidget import DummyWidget, NotEnoughSpaceForWidget
from . import wgwidget as widget
from .wgmultiline import MultiLine, Pager, TitleMultiLine, TitlePager, MultiLineAction, BufferPager, TitleBufferPager
from .wgmultiselect import MultiSelect, TitleMultiSelect, MultiSelectFixed, \
TitleMultiSelectFixed, MultiSelectAction
from .wgeditmultiline import MultiLineEdit
from .wgcombobox import ComboBox, TitleCombo
from .wgcheckbox import Checkbox, RoundCheckBox, CheckBoxMultiline, RoundCheckBoxMultiline, CheckBox, CheckboxBare
from .wgFormControlCheckbox import FormControlCheckbox
from .wgautocomplete import TitleFilename, Filename, Autocomplete
from .muMenu import Menu
from .wgselectone import SelectOne, TitleSelectOne
from .wgdatecombo import DateCombo, TitleDateCombo
from .npysTree import TreeData
from .wgmultilinetree import MLTree, MLTreeAnnotated, MLTreeAction, MLTreeAnnotatedAction
from .wgmultilinetreeselectable import MLTreeMultiSelect, TreeLineSelectable
from .wgmultilinetreeselectable import MLTreeMultiSelectAnnotated, TreeLineSelectableAnnotated
# The following are maintained for compatibility with old code only. ##########################################
from .compatibility_code.oldtreeclasses import MultiLineTree, SelectOneTree
from .compatibility_code.oldtreeclasses import MultiLineTreeNew, MultiLineTreeNewAction, TreeLine, TreeLineAnnotated # Experimental
from .compatibility_code.oldtreeclasses import MultiLineTreeNewAnnotatedAction, MultiLineTreeNewAnnotated # Experimental
from .compatibility_code.npysNPSTree import NPSTreeData
# End compatibility. ###########################################################################################
from .wgfilenamecombo import FilenameCombo, TitleFilenameCombo
from .wgboxwidget import BoxBasic, BoxTitle
from .wgmultiline import MultiLineActionWithShortcuts
from .wgmultilineeditable import MultiLineEditable, MultiLineEditableTitle, MultiLineEditableBoxed
from .wgmonthbox import MonthBox
from .wggrid import SimpleGrid
from .wggridcoltitles import GridColTitles
from .muNewMenu import NewMenu, MenuItem
from .wgNMenuDisplay import MenuDisplay, MenuDisplayScreen
from .npyspmfuncs import CallSubShell
from .utilNotify import notify, notify_confirm, notify_wait, notify_ok_cancel, notify_yes_no, notify_loading
# Base classes for overriding:
# Standard Forms:
from . import stdfmemail
# Experimental Only
from .wgtextboxunicode import TextfieldUnicode
from .wgtexttokens import TextTokens, TitleTextTokens
# Very experimental. Don't use for anything serious
from .apOptions import SimpleOptionForm
from .apOptions import OptionListDisplay, OptionChanger, OptionList, OptionLimitedChoices, OptionListDisplayLine
from .apOptions import OptionFreeText, OptionSingleChoice, OptionMultiChoice, OptionMultiFreeList, \
OptionBoolean, OptionFilename, OptionDate, OptionMultiFreeText
# This really is about as experimental as it gets
from .apNPSApplicationEvents import StandardApp
from .eveventhandler import Event
|
py | b401d10dfdf7ab7c37f5203f68f0626a135e8c7e | """This module contains fields that depend on importing `bson`. `bson` is
a part of the pymongo distribution.
"""
from schematics.types import BaseType
from schematics.exceptions import ValidationError
import bson
class ObjectIdType(BaseType):
"""An field wrapper around MongoDB ObjectIds. It is correct to say they're
bson fields, but I am unaware of bson being used outside MongoDB.
`auto_fill` is disabled by default for ObjectIdType's as they are
typically obtained after a successful save to Mongo.
"""
def __init__(self, auto_fill=False, **kwargs):
self.auto_fill = auto_fill
super(ObjectIdType, self).__init__(**kwargs)
def to_native(self, value):
if not isinstance(value, bson.objectid.ObjectId):
value = bson.objectid.ObjectId(unicode(value))
return value
def to_primitive(self, value):
return str(value)
def validate_id(self, value):
if not isinstance(value, bson.objectid.ObjectId):
try:
value = bson.objectid.ObjectId(unicode(value))
except Exception, e:
raise ValidationError('Invalid ObjectId')
return True
|
py | b401d1f9b7e178932d6f4aca47993de4a396b678 | # Generated by Django 3.1.5 on 2021-01-24 15:09
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='UserAccount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
|
py | b401d2f8821d742429f5bd758bdb06579268733e | #!/usr/bin/env python
import json
import logging
import sqlite3
import sys
import git
import project
# logging
logging.basicConfig(format='%(asctime)s %(process)s %(levelname)-8s %(message)s', stream=sys.stdout)
log = logging.getLogger()
log.setLevel(logging.INFO)
SCHEMA_DDL = """
create table fix_commits (
cve_id text,
fix_commit_hash text
);
create table commit_affected_lines (
affecting_commit_hash text,
affected_file text,
affected_line integer,
affected_line_blame_commit_hash text
);
create table commit_details (
commit_hash text,
committed_timestamp text,
parent_commit_hash text
);
"""
def main():
with open(project.CVE_BLAME_FILE) as f:
cve_blame = json.load(f)
repo = git.Repo(project.REPO_DIR)
with sqlite3.connect(project.RELATIONAL_DB_FILE) as conn:
_create_schema(conn)
_insert_cve_data(conn, cve_blame)
_insert_commit_details(conn, repo)
conn.commit()
def _create_schema(conn):
cur = conn.cursor()
for ddl_statement in SCHEMA_DDL.split(';'):
cur.execute(ddl_statement)
log.info("schema created")
def _insert_cve_data(conn, cve_blame):
for cve_id, cve_details in cve_blame.items():
_insert_cve(conn, cve_id, cve_details)
log.info("CVE data inserted")
def _insert_cve(conn, cve_id, cve_details):
cur = conn.cursor()
for commit_hash, commit_details in cve_details['fix_commits'].items():
cur.execute("insert into fix_commits values (?, ?)", (cve_id, commit_hash))
for path, file_details in commit_details['affected_files'].items():
for line in file_details['blame_lines']:
cur.execute("insert into commit_affected_lines values (?,?,?,?)",
(commit_hash, path, line['line_no'], line['blame_commit']))
def _insert_commit_details(conn, repo):
cur = conn.cursor()
inscur = conn.cursor()
for commit_hash_tuple in cur.execute("select distinct fix_commit_hash from fix_commits union select distinct affected_line_blame_commit_hash from commit_affected_lines"):
commit_hash = commit_hash_tuple[0]
try:
commit = repo.commit(commit_hash)
first_parent = commit.parents[0].hexsha if len(commit.parents) > 0 else None
inscur.execute("insert into commit_details values (?,?,?)",
(commit_hash, commit.committed_datetime.isoformat(), first_parent))
except:
log.exception("error storing details for commit %s", commit_hash)
log.info("commit details inserted")
if __name__ == '__main__':
main()
|
py | b401d348134b3d7a1593989161a192b7980d7234 | from __future__ import division
from __future__ import absolute_import
__copyright__ = "Copyright (C) 2019 Icerm"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
__doc__ = """
VSYSPY is a Python api wrapper for VSYS network.
VSYSPY is a recursive acronym for V SYStems Python.
"""
from vsyspy.setting import *
import logging
console = logging.StreamHandler()
console.setLevel(logging.ERROR)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
console.setFormatter(formatter)
logging.getLogger().addHandler(console)
OFFLINE = False
def set_offline():
global OFFLINE
OFFLINE = True
def set_online():
global OFFLINE
OFFLINE = False
def is_offline():
global OFFLINE
return OFFLINE
from vsyspy.wrapper import Wrapper
def create_api_wrapper(node_host=DEFAULT_NODE, api_key=DEFAULT_API_KEY):
return Wrapper(node_host, api_key)
from .chain import Chain
def testnet_chain(api_wrapper=create_api_wrapper(DEFAULT_TESTNET_NODE, DEFAULT_TESTNET_API_KEY)):
return Chain(TESTNET_CHAIN, TESTNET_CHAIN_ID, ADDRESS_VERSION, api_wrapper)
def default_chain(api_wrapper=create_api_wrapper()):
return Chain(DEFAULT_CHAIN, DEFAULT_CHAIN_ID, ADDRESS_VERSION, api_wrapper)
from .account import Account
from .contract import Contract, DataEntry
def default_contract(con_dts=Contract_Permitted_Without_Split):
return Contract(con_dts)
__all__ = [
'Account', 'Chain', 'Wrapper', 'Contract', 'DataEntry', 'is_offline'
]
|
py | b401d48ee5cf19e3f225d236b2a3981e3dff2912 | import os
from libcloud.storage.types import ContainerDoesNotExistError
from lib.actions import BaseAction
__all__ = [
'UploadFileAction'
]
class UploadFileAction(BaseAction):
api_type = 'storage'
def run(self, credentials, file_path, container_name, object_name=None):
driver = self._get_driver_for_credentials(credentials=credentials)
try:
container = driver.get_container(container_name=container_name)
except ContainerDoesNotExistError:
self.logger.debug('Container "%s" doesn\'t exist, creating it...' %
(container_name))
container = driver.create_container(container_name=container_name)
object_name = object_name if object_name else os.path.basename(file_path)
obj = driver.upload_object(file_path=file_path, container=container,
object_name=object_name)
self.logger.info('Object successfully uploaded: %s' % (obj))
return obj
|
py | b401d5698403044ac3022517434be31cd2634dee | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on Nov 7, 2015
Don't blink...
@author: Juan_Insuasti
'''
import pyrebase
from Shared import Logger
class Storage:
def __init__(self,options,logs=True, debug=False):
self.console = Logger.Logger(logName='Storage', enabled=logs, printConsole=True)
self.firebase = pyrebase.initialize_app(options)
self.storage = self.firebase.storage()
self.console.log("Initialization...")
self.debug = debug
def saveFile(self, path, file):
self.console.log("Uploading: %s -> %s" ,(str(file),path) )
info = self.storage.child(str(path)).put(file)
# print("Saving: " + str(file) + " -> " + path )
if self.debug:
self.console.log("Uploading: %s ",(str(info)) )
url = self.storage.child(path).get_url(1)
# print("URL: " + str(url) )
self.console.log("%s URL: %s", (path, url) )
return url
def downloadFile(self, path, file):
info = self.storage.child(str(path)).download(file)
#print("Downloading: " + path + " -> " + str(file))
self.console.log("Downloading: %s -> %s",(path,str(file)))
if self.debug:
self.console.log("Downloading: %s ",(str(info)) )
def getUrl(self, path):
url = self.storage.child(path).get_url(1)
self.console.log("%s URL: %s", (path, url) )
return url
if __name__ == '__main__':
#initial setup
config = {
"apiKey": "AIzaSyCeLjnaoNZ6c9BKkccXt5E0H74DGKJWXek",
"authDomain": "testproject-cd274.firebaseapp.com",
"databaseURL": "https://testproject-cd274.firebaseio.com",
"storageBucket": "testproject-cd274.appspot.com"
}
#storage startup
store = Storage(config)
#save a file
print("Testing file save...")
url = store.saveFile("test/testfile.txt","test.txt")
print("Returned URL... " + url)
#get url
print("Testing getting url...")
url2 = store.getUrl("test/testfile.txt")
print("Returned URL2... " + url2)
pass
|
py | b401d57e235dcb4bfdfe0976ddd55388710be596 | # Copyright 2009-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Utility functions and definitions for python3 compatibility."""
import sys
PY3 = sys.version_info[0] == 3
if PY3:
import codecs
import _thread as thread
from io import BytesIO as StringIO
try:
import collections.abc as abc
except ImportError:
# PyPy3 (based on CPython 3.2)
import collections as abc
MAXSIZE = sys.maxsize
imap = map
def b(s):
# BSON and socket operations deal in binary data. In
# python 3 that means instances of `bytes`. In python
# 2.6 and 2.7 you can create an alias for `bytes` using
# the b prefix (e.g. b'foo').
# See http://python3porting.com/problems.html#nicer-solutions
return codecs.latin_1_encode(s)[0]
def bytes_from_hex(h):
return bytes.fromhex(h)
def iteritems(d):
return iter(d.items())
def itervalues(d):
return iter(d.values())
def reraise(exctype, value, trace=None):
raise exctype(str(value)).with_traceback(trace)
def reraise_instance(exc_instance, trace=None):
raise exc_instance.with_traceback(trace)
def _unicode(s):
return s
text_type = str
string_type = str
integer_types = int
else:
import collections as abc
import thread
from itertools import imap
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
MAXSIZE = sys.maxint
def b(s):
# See comments above. In python 2.x b('foo') is just 'foo'.
return s
def bytes_from_hex(h):
return h.decode('hex')
def iteritems(d):
return d.iteritems()
def itervalues(d):
return d.itervalues()
def reraise(exctype, value, trace=None):
_reraise(exctype, str(value), trace)
def reraise_instance(exc_instance, trace=None):
_reraise(exc_instance, None, trace)
# "raise x, y, z" raises SyntaxError in Python 3
exec("""def _reraise(exc, value, trace):
raise exc, value, trace
""")
_unicode = unicode
string_type = basestring
text_type = unicode
integer_types = (int, long)
|
py | b401d5dbed555ada727acc9e4f6a23d991349804 | import base64
import copy
import datetime
import json
import re
import secrets
import time
import urllib
from contextlib import contextmanager
from typing import Any, Callable, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
from unittest import mock
import jwt
import ldap
import orjson
import requests
import responses
from bs4 import BeautifulSoup
from cryptography.hazmat.primitives.ciphers.aead import AESGCM
from django.conf import settings
from django.contrib.auth import authenticate
from django.core import mail
from django.http import HttpRequest, HttpResponse
from django.test import override_settings
from django.test.client import RequestFactory
from django.urls import reverse
from django.utils.timezone import now as timezone_now
from django_auth_ldap.backend import LDAPSearch, _LDAPUser
from jwt.exceptions import PyJWTError
from onelogin.saml2.auth import OneLogin_Saml2_Auth
from onelogin.saml2.response import OneLogin_Saml2_Response
from social_core.exceptions import AuthFailed, AuthStateForbidden
from social_django.storage import BaseDjangoStorage
from social_django.strategy import DjangoStrategy
from confirmation.models import Confirmation, create_confirmation_link
from zerver.lib.actions import (
change_user_is_active,
do_create_realm,
do_create_user,
do_deactivate_realm,
do_deactivate_user,
do_invite_users,
do_reactivate_realm,
do_reactivate_user,
do_set_realm_property,
ensure_stream,
)
from zerver.lib.avatar import avatar_url
from zerver.lib.avatar_hash import user_avatar_path
from zerver.lib.dev_ldap_directory import generate_dev_ldap_dir
from zerver.lib.email_validation import (
get_existing_user_errors,
get_realm_email_validator,
validate_email_is_valid,
)
from zerver.lib.exceptions import RateLimited
from zerver.lib.initial_password import initial_password
from zerver.lib.mobile_auth_otp import otp_decrypt_api_key
from zerver.lib.rate_limiter import add_ratelimit_rule, remove_ratelimit_rule
from zerver.lib.request import JsonableError
from zerver.lib.storage import static_path
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import (
create_s3_buckets,
get_test_image_file,
load_subdomain_token,
use_s3_backend,
)
from zerver.lib.upload import MEDIUM_AVATAR_SIZE, resize_avatar
from zerver.lib.users import get_all_api_keys
from zerver.lib.validator import (
Validator,
check_bool,
check_dict_only,
check_int,
check_list,
check_none_or,
check_string,
validate_login_email,
)
from zerver.models import (
CustomProfileField,
CustomProfileFieldValue,
MultiuseInvite,
PasswordTooWeakError,
PreregistrationUser,
Realm,
RealmDomain,
UserProfile,
clear_supported_auth_backends_cache,
email_to_username,
get_realm,
get_user_by_delivery_email,
)
from zerver.signals import JUST_CREATED_THRESHOLD
from zerver.views.auth import log_into_subdomain, maybe_send_to_registration
from zproject.backends import (
AUTH_BACKEND_NAME_MAP,
AppleAuthBackend,
AzureADAuthBackend,
DevAuthBackend,
EmailAuthBackend,
ExternalAuthDataDict,
ExternalAuthResult,
GitHubAuthBackend,
GitLabAuthBackend,
GoogleAuthBackend,
PopulateUserLDAPError,
RateLimitedAuthenticationByUsername,
SAMLAuthBackend,
SocialAuthMixin,
ZulipAuthMixin,
ZulipDummyBackend,
ZulipLDAPAuthBackend,
ZulipLDAPConfigurationError,
ZulipLDAPException,
ZulipLDAPExceptionNoMatchingLDAPUser,
ZulipLDAPExceptionOutsideDomain,
ZulipLDAPUser,
ZulipLDAPUserPopulator,
ZulipRemoteUserBackend,
apple_auth_enabled,
check_password_strength,
dev_auth_enabled,
email_belongs_to_ldap,
get_external_method_dicts,
github_auth_enabled,
gitlab_auth_enabled,
google_auth_enabled,
password_auth_enabled,
query_ldap,
require_email_format_usernames,
saml_auth_enabled,
sync_user_from_ldap,
)
class AuthBackendTest(ZulipTestCase):
def get_username(self, email_to_username: Optional[Callable[[str], str]] = None) -> str:
username = self.example_email("hamlet")
if email_to_username is not None:
username = email_to_username(self.example_email("hamlet"))
return username
def verify_backend(
self,
backend: Any,
*,
good_kwargs: Dict[str, Any],
bad_kwargs: Optional[Dict[str, Any]] = None,
) -> None:
clear_supported_auth_backends_cache()
user_profile = self.example_user("hamlet")
# If bad_kwargs was specified, verify auth fails in that case
if bad_kwargs is not None:
self.assertIsNone(backend.authenticate(**bad_kwargs))
# Verify auth works
result = backend.authenticate(**good_kwargs)
self.assertEqual(user_profile, result)
# Verify auth fails with a deactivated user
do_deactivate_user(user_profile, acting_user=None)
result = backend.authenticate(**good_kwargs)
if isinstance(backend, SocialAuthMixin):
# Returns a redirect to login page with an error.
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "/login/?is_deactivated=true")
else:
# Just takes you back to the login page treating as
# invalid auth; this is correct because the form will
# provide the appropriate validation error for deactivated
# account.
self.assertIsNone(result)
# Reactivate the user and verify auth works again
do_reactivate_user(user_profile, acting_user=None)
result = backend.authenticate(**good_kwargs)
self.assertEqual(user_profile, result)
# Verify auth fails with a deactivated realm
do_deactivate_realm(user_profile.realm, acting_user=None)
self.assertIsNone(backend.authenticate(**good_kwargs))
# Verify auth works again after reactivating the realm
do_reactivate_realm(user_profile.realm)
result = backend.authenticate(**good_kwargs)
self.assertEqual(user_profile, result)
# ZulipDummyBackend isn't a real backend so the remainder
# doesn't make sense for it
if isinstance(backend, ZulipDummyBackend):
return
# Verify auth fails if the auth backend is disabled on server
with self.settings(AUTHENTICATION_BACKENDS=("zproject.backends.ZulipDummyBackend",)):
clear_supported_auth_backends_cache()
self.assertIsNone(backend.authenticate(**good_kwargs))
clear_supported_auth_backends_cache()
# Verify auth fails if the auth backend is disabled for the realm
for backend_name in AUTH_BACKEND_NAME_MAP.keys():
if isinstance(backend, AUTH_BACKEND_NAME_MAP[backend_name]):
break
index = getattr(user_profile.realm.authentication_methods, backend_name).number
user_profile.realm.authentication_methods.set_bit(index, False)
user_profile.realm.save()
if "realm" in good_kwargs:
# Because this test is a little unfaithful to the ordering
# (i.e. we fetched the realm object before this function
# was called, when in fact it should be fetched after we
# changed the allowed authentication methods), we need to
# propagate the changes we just made to the actual realm
# object in good_kwargs.
good_kwargs["realm"] = user_profile.realm
self.assertIsNone(backend.authenticate(**good_kwargs))
user_profile.realm.authentication_methods.set_bit(index, True)
user_profile.realm.save()
def test_dummy_backend(self) -> None:
realm = get_realm("zulip")
username = self.get_username()
self.verify_backend(
ZulipDummyBackend(),
good_kwargs=dict(username=username, realm=realm, use_dummy_backend=True),
bad_kwargs=dict(username=username, realm=realm, use_dummy_backend=False),
)
def setup_subdomain(self, user_profile: UserProfile) -> None:
realm = user_profile.realm
realm.string_id = "zulip"
realm.save()
def test_email_auth_backend(self) -> None:
username = self.get_username()
user_profile = self.example_user("hamlet")
password = "testpassword"
user_profile.set_password(password)
user_profile.save()
with mock.patch("zproject.backends.email_auth_enabled", return_value=False), mock.patch(
"zproject.backends.password_auth_enabled", return_value=True
):
return_data: Dict[str, bool] = {}
user = EmailAuthBackend().authenticate(
request=mock.MagicMock(),
username=user_profile.delivery_email,
realm=get_realm("zulip"),
password=password,
return_data=return_data,
)
self.assertEqual(user, None)
self.assertTrue(return_data["email_auth_disabled"])
self.verify_backend(
EmailAuthBackend(),
good_kwargs=dict(
request=mock.MagicMock(),
password=password,
username=username,
realm=get_realm("zulip"),
return_data={},
),
bad_kwargs=dict(
request=mock.MagicMock(),
password=password,
username=username,
realm=get_realm("zephyr"),
return_data={},
),
)
self.verify_backend(
EmailAuthBackend(),
good_kwargs=dict(
request=mock.MagicMock(),
password=password,
username=username,
realm=get_realm("zulip"),
return_data={},
),
bad_kwargs=dict(
request=mock.MagicMock(),
password=password,
username=username,
realm=get_realm("zephyr"),
return_data={},
),
)
def test_email_auth_backend_empty_password(self) -> None:
user_profile = self.example_user("hamlet")
password = "testpassword"
user_profile.set_password(password)
user_profile.save()
# First, verify authentication works with the a nonempty
# password so we know we've set up the test correctly.
self.assertIsNotNone(
EmailAuthBackend().authenticate(
request=mock.MagicMock(),
username=self.example_email("hamlet"),
password=password,
realm=get_realm("zulip"),
)
)
# Now do the same test with the empty string as the password.
password = ""
with self.assertRaises(PasswordTooWeakError):
# UserProfile.set_password protects against setting an empty password.
user_profile.set_password(password)
# We do want to force an empty password for this test, so we bypass the protection
# by using Django's version of this method.
super(UserProfile, user_profile).set_password(password)
user_profile.save()
self.assertIsNone(
EmailAuthBackend().authenticate(
request=mock.MagicMock(),
username=self.example_email("hamlet"),
password=password,
realm=get_realm("zulip"),
)
)
def test_email_auth_backend_disabled_password_auth(self) -> None:
user_profile = self.example_user("hamlet")
password = "testpassword"
user_profile.set_password(password)
user_profile.save()
# Verify if a realm has password auth disabled, correct password is rejected
with mock.patch("zproject.backends.password_auth_enabled", return_value=False):
self.assertIsNone(
EmailAuthBackend().authenticate(
request=mock.MagicMock(),
username=self.example_email("hamlet"),
password=password,
realm=get_realm("zulip"),
)
)
def test_login_preview(self) -> None:
# Test preview=true displays organization login page
# instead of redirecting to app
self.login("iago")
realm = get_realm("zulip")
result = self.client_get("/login/", {"preview": "true"})
self.assertEqual(result.status_code, 200)
self.assert_in_response(realm.description, result)
assert realm.name is not None
self.assert_in_response(realm.name, result)
self.assert_in_response("Log in to Zulip", result)
data = dict(
description=orjson.dumps("New realm description").decode(),
name=orjson.dumps("New Zulip").decode(),
)
result = self.client_patch("/json/realm", data)
self.assert_json_success(result)
result = self.client_get("/login/", {"preview": "true"})
self.assertEqual(result.status_code, 200)
self.assert_in_response("New realm description", result)
self.assert_in_response("New Zulip", result)
result = self.client_get("/login/")
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "http://zulip.testserver")
@override_settings(AUTHENTICATION_BACKENDS=("zproject.backends.ZulipDummyBackend",))
def test_no_backend_enabled(self) -> None:
result = self.client_get("/login/")
self.assert_in_success_response(["No authentication backends are enabled"], result)
result = self.client_get("/register/")
self.assert_in_success_response(["No authentication backends are enabled"], result)
@override_settings(AUTHENTICATION_BACKENDS=("zproject.backends.GoogleAuthBackend",))
def test_any_backend_enabled(self) -> None:
# testing to avoid false error messages.
result = self.client_get("/login/")
self.assert_not_in_success_response(["No authentication backends are enabled"], result)
result = self.client_get("/register/")
self.assert_not_in_success_response(["No authentication backends are enabled"], result)
@override_settings(
AUTHENTICATION_BACKENDS=("zproject.backends.ZulipLDAPAuthBackend",), LDAP_EMAIL_ATTR="mail"
)
def test_ldap_backend(self) -> None:
self.init_default_ldap_database()
user_profile = self.example_user("hamlet")
email = user_profile.delivery_email
password = self.ldap_password("hamlet")
self.setup_subdomain(user_profile)
username = self.get_username()
backend = ZulipLDAPAuthBackend()
# Test LDAP auth fails when LDAP server rejects password
self.assertIsNone(
backend.authenticate(
request=mock.MagicMock(),
username=email,
password="wrongpass",
realm=get_realm("zulip"),
)
)
self.verify_backend(
backend,
bad_kwargs=dict(
request=mock.MagicMock(),
username=username,
password=password,
realm=get_realm("zephyr"),
),
good_kwargs=dict(
request=mock.MagicMock(),
username=username,
password=password,
realm=get_realm("zulip"),
),
)
self.verify_backend(
backend,
bad_kwargs=dict(
request=mock.MagicMock(),
username=username,
password=password,
realm=get_realm("zephyr"),
),
good_kwargs=dict(
request=mock.MagicMock(),
username=username,
password=password,
realm=get_realm("zulip"),
),
)
def test_devauth_backend(self) -> None:
self.verify_backend(
DevAuthBackend(),
good_kwargs=dict(dev_auth_username=self.get_username(), realm=get_realm("zulip")),
bad_kwargs=dict(dev_auth_username=self.get_username(), realm=get_realm("zephyr")),
)
@override_settings(AUTHENTICATION_BACKENDS=("zproject.backends.ZulipRemoteUserBackend",))
def test_remote_user_backend(self) -> None:
username = self.get_username()
self.verify_backend(
ZulipRemoteUserBackend(),
good_kwargs=dict(remote_user=username, realm=get_realm("zulip")),
bad_kwargs=dict(remote_user=username, realm=get_realm("zephyr")),
)
@override_settings(AUTHENTICATION_BACKENDS=("zproject.backends.ZulipRemoteUserBackend",))
def test_remote_user_backend_invalid_realm(self) -> None:
username = self.get_username()
self.verify_backend(
ZulipRemoteUserBackend(),
good_kwargs=dict(remote_user=username, realm=get_realm("zulip")),
bad_kwargs=dict(remote_user=username, realm=get_realm("zephyr")),
)
@override_settings(AUTHENTICATION_BACKENDS=("zproject.backends.ZulipRemoteUserBackend",))
@override_settings(SSO_APPEND_DOMAIN="zulip.com")
def test_remote_user_backend_sso_append_domain(self) -> None:
username = self.get_username(email_to_username)
self.verify_backend(
ZulipRemoteUserBackend(),
good_kwargs=dict(remote_user=username, realm=get_realm("zulip")),
bad_kwargs=dict(remote_user=username, realm=get_realm("zephyr")),
)
@override_settings(
AUTHENTICATION_BACKENDS=(
"zproject.backends.GitHubAuthBackend",
"zproject.backends.GoogleAuthBackend",
)
)
def test_social_auth_backends(self) -> None:
user = self.example_user("hamlet")
token_data_dict = {
"access_token": "foobar",
"token_type": "bearer",
}
github_email_data = [
dict(email=user.delivery_email, verified=True, primary=True),
dict(email="[email protected]", verified=True),
dict(email="[email protected]", verified=False),
]
google_email_data = dict(
email=user.delivery_email, name=user.full_name, email_verified=True
)
backends_to_test: Dict[str, Any] = {
"google": {
"urls": [
# The limited process that we test here doesn't require mocking any urls.
],
"backend": GoogleAuthBackend,
},
"github": {
"urls": [
{
"url": "https://api.github.com/user/emails",
"method": responses.GET,
"status": 200,
"body": json.dumps(github_email_data),
},
],
"backend": GitHubAuthBackend,
},
}
def patched_authenticate(
request: Optional[HttpResponse] = None,
**kwargs: Any,
) -> Any:
# This is how we pass the subdomain to the authentication
# backend in production code, so we need to do this setup
# here.
if "subdomain" in kwargs:
backend.strategy.session_set("subdomain", kwargs["subdomain"])
del kwargs["subdomain"]
# Because we're not simulating the full python-social-auth
# pipeline here, we need to provide the user's choice of
# which email to select in the partial phase of the
# pipeline when we display an email picker for the GitHub
# authentication backend. We do that here.
def return_email() -> Dict[str, str]:
return {"email": user.delivery_email}
backend.strategy.request_data = return_email
result = orig_authenticate(backend, request, **kwargs)
return result
def patched_get_verified_emails(*args: Any, **kwargs: Any) -> Any:
return google_email_data["email"]
for backend_name in backends_to_test:
with responses.RequestsMock(assert_all_requests_are_fired=True) as requests_mock:
urls: List[Dict[str, Any]] = backends_to_test[backend_name]["urls"]
for details in urls:
requests_mock.add(
details["method"],
details["url"],
status=details["status"],
body=details["body"],
)
backend_class = backends_to_test[backend_name]["backend"]
# We're creating a new class instance here, so the
# monkey-patching of the instance that we're about to
# do will be discarded at the end of this test.
backend = backend_class()
backend.strategy = DjangoStrategy(storage=BaseDjangoStorage())
orig_authenticate = backend_class.authenticate
backend.authenticate = patched_authenticate
if backend_name == "google":
backend.get_verified_emails = patched_get_verified_emails
good_kwargs = dict(
backend=backend,
strategy=backend.strategy,
storage=backend.strategy.storage,
response=token_data_dict,
subdomain="zulip",
)
bad_kwargs = dict(subdomain="acme")
logger_name = f"zulip.auth.{backend.name}"
with mock.patch(
"zerver.views.auth.redirect_and_log_into_subdomain", return_value=user
), self.assertLogs(logger_name, level="INFO") as info_log:
self.verify_backend(backend, good_kwargs=good_kwargs, bad_kwargs=bad_kwargs)
bad_kwargs["subdomain"] = "zephyr"
self.verify_backend(backend, good_kwargs=good_kwargs, bad_kwargs=bad_kwargs)
# Verify logging for deactivated users
self.assertEqual(
info_log.output,
[
f"INFO:{logger_name}:Failed login attempt for deactivated account: {user.id}@{user.realm.string_id}",
f"INFO:{logger_name}:Failed login attempt for deactivated account: {user.id}@{user.realm.string_id}",
],
)
class RateLimitAuthenticationTests(ZulipTestCase):
@override_settings(RATE_LIMITING_AUTHENTICATE=True)
def do_test_auth_rate_limiting(
self,
attempt_authentication_func: Callable[[HttpRequest, str, str], Optional[UserProfile]],
username: str,
correct_password: str,
wrong_password: str,
expected_user_profile: UserProfile,
) -> None:
# We have to mock RateLimitedAuthenticationByUsername.key to avoid key collisions
# if tests run in parallel.
original_key_method = RateLimitedAuthenticationByUsername.key
salt = secrets.token_hex(16)
def _mock_key(self: RateLimitedAuthenticationByUsername) -> str:
return f"{salt}:{original_key_method(self)}"
def attempt_authentication(username: str, password: str) -> Optional[UserProfile]:
request = HttpRequest()
request.session = mock.MagicMock()
return attempt_authentication_func(request, username, password)
add_ratelimit_rule(10, 2, domain="authenticate_by_username")
with mock.patch.object(RateLimitedAuthenticationByUsername, "key", new=_mock_key):
try:
start_time = time.time()
with mock.patch("time.time", return_value=start_time):
self.assertIsNone(attempt_authentication(username, wrong_password))
self.assertIsNone(attempt_authentication(username, wrong_password))
# 2 failed attempts is the limit, so the next ones should get blocked,
# even with the correct password.
with self.assertRaises(RateLimited):
attempt_authentication(username, correct_password)
with self.assertRaises(RateLimited):
attempt_authentication(username, wrong_password)
# After enough time passes, more authentication attempts can be made:
with mock.patch("time.time", return_value=start_time + 11.0):
self.assertIsNone(attempt_authentication(username, wrong_password))
# Correct password
self.assertEqual(
attempt_authentication(username, correct_password), expected_user_profile
)
# A correct login attempt should reset the rate limits for this user profile,
# so the next two attempts shouldn't get limited:
self.assertIsNone(attempt_authentication(username, wrong_password))
self.assertIsNone(attempt_authentication(username, wrong_password))
# But the third attempt goes over the limit:
with self.assertRaises(RateLimited):
attempt_authentication(username, wrong_password)
finally:
# Clean up to avoid affecting other tests.
RateLimitedAuthenticationByUsername(username).clear_history()
remove_ratelimit_rule(10, 2, domain="authenticate_by_username")
def test_email_auth_backend_user_based_rate_limiting(self) -> None:
user_profile = self.example_user("hamlet")
password = "testpassword"
user_profile.set_password(password)
user_profile.save()
def attempt_authentication(
request: HttpRequest, username: str, password: str
) -> Optional[UserProfile]:
return EmailAuthBackend().authenticate(
request=request,
username=username,
realm=get_realm("zulip"),
password=password,
return_data={},
)
self.do_test_auth_rate_limiting(
attempt_authentication,
user_profile.delivery_email,
password,
"wrong_password",
user_profile,
)
@override_settings(
AUTHENTICATION_BACKENDS=("zproject.backends.ZulipLDAPAuthBackend",), LDAP_EMAIL_ATTR="mail"
)
def test_ldap_backend_user_based_rate_limiting(self) -> None:
self.init_default_ldap_database()
user_profile = self.example_user("hamlet")
password = self.ldap_password("hamlet")
def attempt_authentication(
request: HttpRequest, username: str, password: str
) -> Optional[UserProfile]:
return ZulipLDAPAuthBackend().authenticate(
request=request,
username=username,
realm=get_realm("zulip"),
password=password,
return_data={},
)
self.do_test_auth_rate_limiting(
attempt_authentication,
user_profile.delivery_email,
password,
"wrong_password",
user_profile,
)
@override_settings(
AUTHENTICATION_BACKENDS=(
"zproject.backends.EmailAuthBackend",
"zproject.backends.ZulipLDAPAuthBackend",
),
LDAP_EMAIL_ATTR="mail",
)
def test_email_and_ldap_backends_user_based_rate_limiting(self) -> None:
self.init_default_ldap_database()
user_profile = self.example_user("hamlet")
ldap_password = self.ldap_password("hamlet")
email_password = "email_password"
user_profile.set_password(email_password)
user_profile.save()
def attempt_authentication(
request: HttpRequest, username: str, password: str
) -> Optional[UserProfile]:
return authenticate(
request=request,
username=username,
realm=get_realm("zulip"),
password=password,
return_data={},
)
self.do_test_auth_rate_limiting(
attempt_authentication,
user_profile.delivery_email,
email_password,
"wrong_password",
user_profile,
)
self.do_test_auth_rate_limiting(
attempt_authentication,
user_profile.delivery_email,
ldap_password,
"wrong_password",
user_profile,
)
class CheckPasswordStrengthTest(ZulipTestCase):
def test_check_password_strength(self) -> None:
with self.settings(PASSWORD_MIN_LENGTH=0, PASSWORD_MIN_GUESSES=0):
# Never allow empty password.
self.assertFalse(check_password_strength(""))
with self.settings(PASSWORD_MIN_LENGTH=6, PASSWORD_MIN_GUESSES=1000):
self.assertFalse(check_password_strength(""))
self.assertFalse(check_password_strength("short"))
# Long enough, but too easy:
self.assertFalse(check_password_strength("longer"))
# Good password:
self.assertTrue(check_password_strength("f657gdGGk9"))
class DesktopFlowTestingLib(ZulipTestCase):
def verify_desktop_flow_app_page(self, response: HttpResponse) -> None:
self.assertEqual(response.status_code, 200)
self.assertIn(b"<h1>Finish desktop login</h1>", response.content)
def verify_desktop_flow_end_page(
self, response: HttpResponse, email: str, desktop_flow_otp: str
) -> None:
self.assertEqual(response.status_code, 200)
soup = BeautifulSoup(response.content, "html.parser")
desktop_data = soup.find("input", value=True)["value"]
browser_url = soup.find("a", href=True)["href"]
self.assertEqual(browser_url, "/login/")
decrypted_key = self.verify_desktop_data_and_return_key(desktop_data, desktop_flow_otp)
result = self.client_get(
f"http://zulip.testserver/accounts/login/subdomain/{decrypted_key}"
)
self.assertEqual(result.status_code, 302)
realm = get_realm("zulip")
user_profile = get_user_by_delivery_email(email, realm)
self.assert_logged_in_user_id(user_profile.id)
def verify_desktop_data_and_return_key(self, desktop_data: str, desktop_flow_otp: str) -> str:
key = bytes.fromhex(desktop_flow_otp)
data = bytes.fromhex(desktop_data)
iv = data[:12]
ciphertext = data[12:]
return AESGCM(key).decrypt(iv, ciphertext, b"").decode()
class SocialAuthBase(DesktopFlowTestingLib, ZulipTestCase):
"""This is a base class for testing social-auth backends. These
methods are often overridden by subclasses:
register_extra_endpoints() - If the backend being tested calls some extra
endpoints then they can be added here.
get_account_data_dict() - Return the data returned by the user info endpoint
according to the respective backend.
"""
# Don't run base class tests, make sure to set it to False
# in subclass otherwise its tests will not run.
__unittest_skip__ = True
def setUp(self) -> None:
super().setUp()
self.user_profile = self.example_user("hamlet")
self.email = self.user_profile.delivery_email
self.name = self.user_profile.full_name
self.backend = self.BACKEND_CLASS
self.backend.strategy = DjangoStrategy(storage=BaseDjangoStorage())
self.user_profile.backend = self.backend
self.logger_string = f"zulip.auth.{self.backend.name}"
# This is a workaround for the fact that Python social auth
# caches the set of authentication backends that are enabled
# the first time that `social_django.utils` is imported. See
# https://github.com/python-social-auth/social-app-django/pull/162
# for details.
from social_core.backends.utils import load_backends
load_backends(settings.AUTHENTICATION_BACKENDS, force_load=True)
def logger_output(self, output_string: str, type: str) -> str:
return f"{type.upper()}:zulip.auth.{self.backend.name}:{output_string}"
def register_extra_endpoints(
self,
requests_mock: responses.RequestsMock,
account_data_dict: Dict[str, str],
**extra_data: Any,
) -> None:
pass
def prepare_login_url_and_headers(
self,
subdomain: str,
mobile_flow_otp: Optional[str] = None,
desktop_flow_otp: Optional[str] = None,
is_signup: bool = False,
next: str = "",
multiuse_object_key: str = "",
alternative_start_url: Optional[str] = None,
*,
user_agent: Optional[str] = None,
) -> Tuple[str, Dict[str, Any]]:
url = self.LOGIN_URL
if alternative_start_url is not None:
url = alternative_start_url
params = {}
headers = {}
if subdomain == "":
# "testserver" may trip up some libraries' URL validation,
# so let's use the equivalent www. version.
headers["HTTP_HOST"] = "www.testserver"
else:
headers["HTTP_HOST"] = subdomain + ".testserver"
if mobile_flow_otp is not None:
params["mobile_flow_otp"] = mobile_flow_otp
headers["HTTP_USER_AGENT"] = "ZulipAndroid"
if desktop_flow_otp is not None:
params["desktop_flow_otp"] = desktop_flow_otp
if is_signup:
url = self.SIGNUP_URL
params["next"] = next
params["multiuse_object_key"] = multiuse_object_key
if len(params) > 0:
url += f"?{urllib.parse.urlencode(params)}"
if user_agent is not None:
headers["HTTP_USER_AGENT"] = user_agent
return url, headers
def social_auth_test_finish(
self,
result: HttpResponse,
account_data_dict: Dict[str, str],
expect_choose_email_screen: bool,
headers: Any,
**extra_data: Any,
) -> HttpResponse:
parsed_url = urllib.parse.urlparse(result.url)
csrf_state = urllib.parse.parse_qs(parsed_url.query)["state"]
result = self.client_get(self.AUTH_FINISH_URL, dict(state=csrf_state), **headers)
return result
def generate_access_url_payload(self, account_data_dict: Dict[str, str]) -> str:
return json.dumps(
{
"access_token": "foobar",
"token_type": "bearer",
}
)
def social_auth_test(
self,
account_data_dict: Dict[str, str],
*,
subdomain: str,
mobile_flow_otp: Optional[str] = None,
desktop_flow_otp: Optional[str] = None,
is_signup: bool = False,
next: str = "",
multiuse_object_key: str = "",
expect_choose_email_screen: bool = False,
alternative_start_url: Optional[str] = None,
user_agent: Optional[str] = None,
**extra_data: Any,
) -> HttpResponse:
"""Main entrypoint for all social authentication tests.
* account_data_dict: Dictionary containing the name/email data
that should be returned by the social auth backend.
* subdomain: Which organization's login page is being accessed.
* desktop_flow_otp / mobile_flow_otp: Token to be used for
mobile or desktop authentication flow testing.
* is_signup: Whether we're testing the social flow for
/register (True) or /login (False). This is important
because we need to verify behavior like the
"Continue to registration" if you try to log in using an
account that doesn't exist but is allowed to sign up.
* next: Parameter passed through in production authentication
to redirect the user to (e.g.) the specific page in the webapp
that they clicked a link to before being presented with the login
page.
* expect_choose_email_screen: Some social auth backends, like
GitHub, simultaneously authenticate for multiple email addresses.
Set this to True if we expect to show the "Choose Email" screen
in this test should the backend have that feature.
* multiuse_object_key: Used when the user has clicked a multi-use
reusable invitation link.
* alternative_start_url: Used to test legacy mobile app behavior.
* user_agent: What user-agent to use for the HTTP requests.
"""
url, headers = self.prepare_login_url_and_headers(
subdomain,
mobile_flow_otp,
desktop_flow_otp,
is_signup,
next,
multiuse_object_key,
alternative_start_url,
user_agent=user_agent,
)
result = self.client_get(url, **headers)
expected_result_url_prefix = f"http://testserver/login/{self.backend.name}/"
if settings.SOCIAL_AUTH_SUBDOMAIN is not None:
expected_result_url_prefix = (
f"http://{settings.SOCIAL_AUTH_SUBDOMAIN}.testserver/login/{self.backend.name}/"
)
if result.status_code != 302 or not result.url.startswith(expected_result_url_prefix):
return result
result = self.client_get(result.url, **headers)
self.assertEqual(result.status_code, 302)
assert self.AUTHORIZATION_URL in result.url
self.client.cookies = result.cookies
# Next, the browser requests result["Location"], and gets
# redirected back to the registered redirect uri.
# We register callbacks for the key URLs on Identity Provider that
# auth completion URL will call
with responses.RequestsMock(assert_all_requests_are_fired=False) as requests_mock:
requests_mock.add(
requests_mock.POST,
self.ACCESS_TOKEN_URL,
match_querystring=False,
status=200,
body=self.generate_access_url_payload(account_data_dict),
)
requests_mock.add(
requests_mock.GET,
self.USER_INFO_URL,
status=200,
body=json.dumps(account_data_dict),
)
self.register_extra_endpoints(requests_mock, account_data_dict, **extra_data)
result = self.social_auth_test_finish(
result, account_data_dict, expect_choose_email_screen, headers=headers, **extra_data
)
return result
def test_social_auth_no_key(self) -> None:
account_data_dict = self.get_account_data_dict(email=self.email, name=self.name)
with self.settings(**{self.CLIENT_KEY_SETTING: None}):
result = self.social_auth_test(
account_data_dict, subdomain="zulip", next="/user_uploads/image"
)
self.assert_in_success_response(["Configuration error"], result)
def test_config_error_development(self) -> None:
if hasattr(self, "CLIENT_KEY_SETTING") and hasattr(self, "CLIENT_SECRET_SETTING"):
with self.settings(**{self.CLIENT_KEY_SETTING: None}):
result = self.client_get(self.LOGIN_URL)
self.assert_in_success_response(["Configuration error"], result)
self.assert_in_success_response([self.CLIENT_KEY_SETTING.lower()], result)
self.assert_in_success_response([self.CLIENT_SECRET_SETTING.lower()], result)
self.assert_in_success_response(["zproject/dev-secrets.conf"], result)
self.assert_not_in_success_response([self.CLIENT_KEY_SETTING], result)
self.assert_not_in_success_response(["zproject/dev_settings.py"], result)
self.assert_not_in_success_response(["/etc/zulip/settings.py"], result)
self.assert_not_in_success_response(["/etc/zulip/zulip-secrets.conf"], result)
@override_settings(DEVELOPMENT=False)
def test_config_error_production(self) -> None:
if hasattr(self, "CLIENT_KEY_SETTING") and hasattr(self, "CLIENT_SECRET_SETTING"):
with self.settings(**{self.CLIENT_KEY_SETTING: None}):
result = self.client_get(self.LOGIN_URL)
self.assert_in_success_response(["Configuration error"], result)
self.assert_in_success_response([self.CLIENT_KEY_SETTING], result)
self.assert_in_success_response(["/etc/zulip/settings.py"], result)
self.assert_in_success_response([self.CLIENT_SECRET_SETTING.lower()], result)
self.assert_in_success_response(["/etc/zulip/zulip-secrets.conf"], result)
self.assert_not_in_success_response([self.CLIENT_KEY_SETTING.lower()], result)
self.assert_not_in_success_response(["zproject/dev_settings.py"], result)
self.assert_not_in_success_response(["zproject/dev-secrets.conf"], result)
def test_social_auth_success(self) -> None:
account_data_dict = self.get_account_data_dict(email=self.email, name=self.name)
result = self.social_auth_test(
account_data_dict,
expect_choose_email_screen=False,
subdomain="zulip",
next="/user_uploads/image",
)
data = load_subdomain_token(result)
self.assertEqual(data["email"], self.example_email("hamlet"))
self.assertEqual(data["full_name"], self.name)
self.assertEqual(data["subdomain"], "zulip")
self.assertEqual(data["redirect_to"], "/user_uploads/image")
self.assertEqual(result.status_code, 302)
parsed_url = urllib.parse.urlparse(result.url)
uri = f"{parsed_url.scheme}://{parsed_url.netloc}{parsed_url.path}"
self.assertTrue(uri.startswith("http://zulip.testserver/accounts/login/subdomain/"))
@override_settings(SOCIAL_AUTH_SUBDOMAIN=None)
def test_when_social_auth_subdomain_is_not_set(self) -> None:
account_data_dict = self.get_account_data_dict(email=self.email, name=self.name)
result = self.social_auth_test(
account_data_dict,
subdomain="zulip",
expect_choose_email_screen=False,
next="/user_uploads/image",
)
data = load_subdomain_token(result)
self.assertEqual(data["email"], self.example_email("hamlet"))
self.assertEqual(data["full_name"], self.name)
self.assertEqual(data["subdomain"], "zulip")
self.assertEqual(data["redirect_to"], "/user_uploads/image")
self.assertEqual(result.status_code, 302)
parsed_url = urllib.parse.urlparse(result.url)
uri = f"{parsed_url.scheme}://{parsed_url.netloc}{parsed_url.path}"
self.assertTrue(uri.startswith("http://zulip.testserver/accounts/login/subdomain/"))
def test_social_auth_deactivated_user(self) -> None:
user_profile = self.example_user("hamlet")
do_deactivate_user(user_profile, acting_user=None)
account_data_dict = self.get_account_data_dict(email=self.email, name=self.name)
# We expect to go through the "choose email" screen here,
# because there won't be an existing user account we can
# auto-select for the user.
with self.assertLogs(self.logger_string, level="INFO") as m:
result = self.social_auth_test(
account_data_dict, expect_choose_email_screen=True, subdomain="zulip"
)
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "/login/?is_deactivated=true")
self.assertEqual(
m.output,
[
self.logger_output(
f"Failed login attempt for deactivated account: {user_profile.id}@zulip", "info"
)
],
)
# TODO: verify whether we provide a clear error message
def test_social_auth_invalid_realm(self) -> None:
account_data_dict = self.get_account_data_dict(email=self.email, name=self.name)
with mock.patch("zerver.middleware.get_realm", return_value=get_realm("zulip")):
# This mock.patch case somewhat hackishly arranges it so
# that we switch realms halfway through the test
result = self.social_auth_test(
account_data_dict, subdomain="invalid", next="/user_uploads/image"
)
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "/accounts/find/")
def test_social_auth_invalid_email(self) -> None:
account_data_dict = self.get_account_data_dict(email="invalid", name=self.name)
with self.assertLogs(self.logger_string, level="INFO") as m:
result = self.social_auth_test(
account_data_dict,
expect_choose_email_screen=True,
subdomain="zulip",
next="/user_uploads/image",
)
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "/login/?next=/user_uploads/image")
self.assertEqual(
m.output,
[
self.logger_output(
"{} got invalid email argument.".format(self.backend.auth_backend_name),
"warning",
)
],
)
def test_user_cannot_log_into_nonexisting_realm(self) -> None:
account_data_dict = self.get_account_data_dict(email=self.email, name=self.name)
result = self.social_auth_test(account_data_dict, subdomain="nonexistent")
self.assert_in_response("There is no Zulip organization hosted at this subdomain.", result)
self.assertEqual(result.status_code, 404)
def test_user_cannot_log_into_wrong_subdomain(self) -> None:
account_data_dict = self.get_account_data_dict(email=self.email, name=self.name)
result = self.social_auth_test(
account_data_dict, expect_choose_email_screen=True, subdomain="zephyr"
)
self.assertTrue(result.url.startswith("http://zephyr.testserver/accounts/login/subdomain/"))
result = self.client_get(
result.url.replace("http://zephyr.testserver", ""), subdomain="zephyr"
)
self.assert_in_success_response(
[
"Your email address, [email protected], is not in one of the domains ",
"that are allowed to register for accounts in this organization.",
],
result,
)
def test_social_auth_mobile_success(self) -> None:
mobile_flow_otp = "1234abcd" * 8
account_data_dict = self.get_account_data_dict(email=self.email, name="Full Name")
self.assertEqual(len(mail.outbox), 0)
self.user_profile.date_joined = timezone_now() - datetime.timedelta(
seconds=JUST_CREATED_THRESHOLD + 1
)
self.user_profile.save()
with self.settings(SEND_LOGIN_EMAILS=True):
# Verify that the right thing happens with an invalid-format OTP
result = self.social_auth_test(
account_data_dict, subdomain="zulip", mobile_flow_otp="1234"
)
self.assert_json_error(result, "Invalid OTP")
result = self.social_auth_test(
account_data_dict, subdomain="zulip", mobile_flow_otp="invalido" * 8
)
self.assert_json_error(result, "Invalid OTP")
# Now do it correctly
result = self.social_auth_test(
account_data_dict,
subdomain="zulip",
expect_choose_email_screen=False,
mobile_flow_otp=mobile_flow_otp,
)
self.assertEqual(result.status_code, 302)
redirect_url = result["Location"]
parsed_url = urllib.parse.urlparse(redirect_url)
query_params = urllib.parse.parse_qs(parsed_url.query)
self.assertEqual(parsed_url.scheme, "zulip")
self.assertEqual(query_params["realm"], ["http://zulip.testserver"])
self.assertEqual(query_params["email"], [self.example_email("hamlet")])
encrypted_api_key = query_params["otp_encrypted_api_key"][0]
hamlet_api_keys = get_all_api_keys(self.example_user("hamlet"))
self.assertIn(otp_decrypt_api_key(encrypted_api_key, mobile_flow_otp), hamlet_api_keys)
self.assertEqual(len(mail.outbox), 1)
self.assertIn("Zulip on Android", mail.outbox[0].body)
def test_social_auth_desktop_success(self) -> None:
desktop_flow_otp = "1234abcd" * 8
account_data_dict = self.get_account_data_dict(email=self.email, name="Full Name")
# Verify that the right thing happens with an invalid-format OTP
result = self.social_auth_test(
account_data_dict, subdomain="zulip", desktop_flow_otp="1234"
)
self.assert_json_error(result, "Invalid OTP")
result = self.social_auth_test(
account_data_dict, subdomain="zulip", desktop_flow_otp="invalido" * 8
)
self.assert_json_error(result, "Invalid OTP")
# Now do it correctly
result = self.social_auth_test(
account_data_dict,
subdomain="zulip",
expect_choose_email_screen=False,
desktop_flow_otp=desktop_flow_otp,
user_agent="ZulipElectron/5.0.0",
)
self.verify_desktop_flow_app_page(result)
result = self.social_auth_test(
account_data_dict,
subdomain="zulip",
expect_choose_email_screen=False,
desktop_flow_otp=desktop_flow_otp,
)
self.verify_desktop_flow_end_page(result, self.email, desktop_flow_otp)
def test_social_auth_session_fields_cleared_correctly(self) -> None:
mobile_flow_otp = "1234abcd" * 8
def initiate_auth(mobile_flow_otp: Optional[str] = None) -> None:
url, headers = self.prepare_login_url_and_headers(
subdomain="zulip", mobile_flow_otp=mobile_flow_otp
)
result = self.client_get(url, **headers)
self.assertEqual(result.status_code, 302)
result = self.client_get(result.url, **headers)
self.assertEqual(result.status_code, 302)
# Start social auth with mobile_flow_otp param. It should get saved into the session
# on SOCIAL_AUTH_SUBDOMAIN.
initiate_auth(mobile_flow_otp)
self.assertEqual(self.client.session["mobile_flow_otp"], mobile_flow_otp)
# Make a request without mobile_flow_otp param and verify the field doesn't persist
# in the session from the previous request.
initiate_auth()
self.assertEqual(self.client.session.get("mobile_flow_otp"), None)
def test_social_auth_mobile_and_desktop_flow_in_one_request_error(self) -> None:
otp = "1234abcd" * 8
account_data_dict = self.get_account_data_dict(email=self.email, name="Full Name")
result = self.social_auth_test(
account_data_dict,
subdomain="zulip",
expect_choose_email_screen=False,
desktop_flow_otp=otp,
mobile_flow_otp=otp,
)
self.assert_json_error(
result, "Can't use both mobile_flow_otp and desktop_flow_otp together."
)
def test_social_auth_registration_existing_account(self) -> None:
"""If the user already exists, signup flow just logs them in"""
email = "[email protected]"
name = "Full Name"
account_data_dict = self.get_account_data_dict(email=email, name=name)
result = self.social_auth_test(
account_data_dict, expect_choose_email_screen=True, subdomain="zulip", is_signup=True
)
data = load_subdomain_token(result)
self.assertEqual(data["email"], self.example_email("hamlet"))
# Verify data has the full_name consistent with the user we're logging in as.
self.assertEqual(data["full_name"], self.example_user("hamlet").full_name)
self.assertEqual(data["subdomain"], "zulip")
self.assertEqual(result.status_code, 302)
parsed_url = urllib.parse.urlparse(result.url)
uri = f"{parsed_url.scheme}://{parsed_url.netloc}{parsed_url.path}"
self.assertTrue(uri.startswith("http://zulip.testserver/accounts/login/subdomain/"))
hamlet = self.example_user("hamlet")
# Name wasn't changed at all
self.assertEqual(hamlet.full_name, "King Hamlet")
def stage_two_of_registration(
self,
result: HttpResponse,
realm: Realm,
subdomain: str,
email: str,
name: str,
expected_final_name: str,
skip_registration_form: bool,
mobile_flow_otp: Optional[str] = None,
desktop_flow_otp: Optional[str] = None,
expect_confirm_registration_page: bool = False,
expect_full_name_prepopulated: bool = True,
) -> None:
data = load_subdomain_token(result)
self.assertEqual(data["email"], email)
self.assertEqual(data["full_name"], name)
self.assertEqual(data["subdomain"], "zulip")
self.assertEqual(result.status_code, 302)
parsed_url = urllib.parse.urlparse(result.url)
uri = f"{parsed_url.scheme}://{parsed_url.netloc}{parsed_url.path}"
self.assertTrue(uri.startswith("http://zulip.testserver/accounts/login/subdomain/"))
result = self.client_get(result.url)
if expect_confirm_registration_page:
self.assertEqual(result.status_code, 200)
else:
self.assertEqual(result.status_code, 302)
confirmation = Confirmation.objects.all().last()
confirmation_key = confirmation.confirmation_key
if expect_confirm_registration_page:
self.assert_in_success_response(["do_confirm/" + confirmation_key], result)
do_confirm_url = "/accounts/do_confirm/" + confirmation_key
else:
self.assertIn("do_confirm/" + confirmation_key, result.url)
do_confirm_url = result.url
result = self.client_get(do_confirm_url, name=name)
self.assert_in_response('action="/accounts/register/"', result)
confirmation_data = {"from_confirmation": "1", "key": confirmation_key}
result = self.client_post("/accounts/register/", confirmation_data)
if not skip_registration_form:
self.assert_in_response("We just need you to do one last thing", result)
# Verify that the user is asked for name but not password
self.assert_not_in_success_response(["id_password"], result)
self.assert_in_success_response(["id_full_name"], result)
if expect_full_name_prepopulated:
# Verify the name field gets correctly pre-populated:
self.assert_in_success_response([expected_final_name], result)
# Click confirm registration button.
result = self.client_post(
"/accounts/register/",
{"full_name": expected_final_name, "key": confirmation_key, "terms": True},
)
# Mobile and desktop flow have additional steps:
if mobile_flow_otp:
self.assertEqual(result.status_code, 302)
redirect_url = result["Location"]
parsed_url = urllib.parse.urlparse(redirect_url)
query_params = urllib.parse.parse_qs(parsed_url.query)
self.assertEqual(parsed_url.scheme, "zulip")
self.assertEqual(query_params["realm"], ["http://zulip.testserver"])
self.assertEqual(query_params["email"], [email])
encrypted_api_key = query_params["otp_encrypted_api_key"][0]
user_api_keys = get_all_api_keys(get_user_by_delivery_email(email, realm))
self.assertIn(otp_decrypt_api_key(encrypted_api_key, mobile_flow_otp), user_api_keys)
return
elif desktop_flow_otp:
self.verify_desktop_flow_end_page(result, email, desktop_flow_otp)
# Now the desktop app is logged in, continue with the logged in check.
else:
self.assertEqual(result.status_code, 302)
user_profile = get_user_by_delivery_email(email, realm)
self.assert_logged_in_user_id(user_profile.id)
self.assertEqual(user_profile.full_name, expected_final_name)
self.assertFalse(user_profile.has_usable_password())
@override_settings(TERMS_OF_SERVICE=None)
def test_social_auth_registration(self) -> None:
"""If the user doesn't exist yet, social auth can be used to register an account"""
email = "[email protected]"
name = "Full Name"
subdomain = "zulip"
realm = get_realm("zulip")
account_data_dict = self.get_account_data_dict(email=email, name=name)
result = self.social_auth_test(
account_data_dict, expect_choose_email_screen=True, subdomain=subdomain, is_signup=True
)
self.stage_two_of_registration(
result, realm, subdomain, email, name, name, self.BACKEND_CLASS.full_name_validated
)
@override_settings(TERMS_OF_SERVICE=None)
def test_social_auth_mobile_registration(self) -> None:
email = "[email protected]"
name = "Full Name"
subdomain = "zulip"
realm = get_realm("zulip")
mobile_flow_otp = "1234abcd" * 8
account_data_dict = self.get_account_data_dict(email=email, name=name)
result = self.social_auth_test(
account_data_dict,
subdomain="zulip",
expect_choose_email_screen=True,
is_signup=True,
mobile_flow_otp=mobile_flow_otp,
)
self.stage_two_of_registration(
result,
realm,
subdomain,
email,
name,
name,
self.BACKEND_CLASS.full_name_validated,
mobile_flow_otp=mobile_flow_otp,
)
@override_settings(TERMS_OF_SERVICE=None)
def test_social_auth_desktop_registration(self) -> None:
email = "[email protected]"
name = "Full Name"
subdomain = "zulip"
realm = get_realm("zulip")
desktop_flow_otp = "1234abcd" * 8
account_data_dict = self.get_account_data_dict(email=email, name=name)
result = self.social_auth_test(
account_data_dict,
subdomain="zulip",
expect_choose_email_screen=True,
is_signup=True,
desktop_flow_otp=desktop_flow_otp,
)
self.stage_two_of_registration(
result,
realm,
subdomain,
email,
name,
name,
self.BACKEND_CLASS.full_name_validated,
desktop_flow_otp=desktop_flow_otp,
)
@override_settings(TERMS_OF_SERVICE=None)
def test_social_auth_registration_invitation_exists(self) -> None:
"""
This tests the registration flow in the case where an invitation for the user
was generated.
"""
email = "[email protected]"
name = "Full Name"
subdomain = "zulip"
realm = get_realm("zulip")
iago = self.example_user("iago")
do_invite_users(iago, [email], [])
account_data_dict = self.get_account_data_dict(email=email, name=name)
result = self.social_auth_test(
account_data_dict, expect_choose_email_screen=True, subdomain=subdomain, is_signup=True
)
self.stage_two_of_registration(
result, realm, subdomain, email, name, name, self.BACKEND_CLASS.full_name_validated
)
@override_settings(TERMS_OF_SERVICE=None)
def test_social_auth_registration_using_multiuse_invite(self) -> None:
"""If the user doesn't exist yet, social auth can be used to register an account"""
email = "[email protected]"
name = "Full Name"
subdomain = "zulip"
realm = get_realm("zulip")
realm.invite_required = True
realm.save()
stream_names = ["new_stream_1", "new_stream_2"]
streams = []
for stream_name in set(stream_names):
stream = ensure_stream(realm, stream_name, acting_user=None)
streams.append(stream)
referrer = self.example_user("hamlet")
multiuse_obj = MultiuseInvite.objects.create(realm=realm, referred_by=referrer)
multiuse_obj.streams.set(streams)
create_confirmation_link(multiuse_obj, Confirmation.MULTIUSE_INVITE)
multiuse_confirmation = Confirmation.objects.all().last()
multiuse_object_key = multiuse_confirmation.confirmation_key
account_data_dict = self.get_account_data_dict(email=email, name=name)
# First, try to sign up for closed realm without using an invitation
result = self.social_auth_test(
account_data_dict, expect_choose_email_screen=True, subdomain=subdomain, is_signup=True
)
result = self.client_get(result.url)
# Verify that we're unable to sign up, since this is a closed realm
self.assertEqual(result.status_code, 200)
self.assert_in_success_response(["Sign up"], result)
result = self.social_auth_test(
account_data_dict,
subdomain=subdomain,
is_signup=True,
expect_choose_email_screen=True,
multiuse_object_key=multiuse_object_key,
)
self.stage_two_of_registration(
result, realm, subdomain, email, name, name, self.BACKEND_CLASS.full_name_validated
)
def test_social_auth_registration_without_is_signup(self) -> None:
"""If `is_signup` is not set then a new account isn't created"""
email = "[email protected]"
name = "Full Name"
account_data_dict = self.get_account_data_dict(email=email, name=name)
result = self.social_auth_test(
account_data_dict, expect_choose_email_screen=True, subdomain="zulip"
)
self.assertEqual(result.status_code, 302)
data = load_subdomain_token(result)
self.assertEqual(data["email"], email)
self.assertEqual(data["full_name"], name)
self.assertEqual(data["subdomain"], "zulip")
self.assertEqual(result.status_code, 302)
parsed_url = urllib.parse.urlparse(result.url)
uri = f"{parsed_url.scheme}://{parsed_url.netloc}{parsed_url.path}"
self.assertTrue(uri.startswith("http://zulip.testserver/accounts/login/subdomain/"))
result = self.client_get(result.url)
self.assertEqual(result.status_code, 200)
self.assert_in_response("No account found for [email protected].", result)
def test_social_auth_registration_without_is_signup_closed_realm(self) -> None:
"""If the user doesn't exist yet in closed realm, give an error"""
realm = get_realm("zulip")
do_set_realm_property(realm, "emails_restricted_to_domains", True, acting_user=None)
email = "[email protected]"
name = "Full Name"
account_data_dict = self.get_account_data_dict(email=email, name=name)
result = self.social_auth_test(
account_data_dict, expect_choose_email_screen=True, subdomain="zulip"
)
self.assertEqual(result.status_code, 302)
data = load_subdomain_token(result)
self.assertEqual(data["email"], email)
self.assertEqual(data["full_name"], name)
self.assertEqual(data["subdomain"], "zulip")
self.assertEqual(result.status_code, 302)
parsed_url = urllib.parse.urlparse(result.url)
uri = f"{parsed_url.scheme}://{parsed_url.netloc}{parsed_url.path}"
self.assertTrue(uri.startswith("http://zulip.testserver/accounts/login/subdomain/"))
result = self.client_get(result.url)
self.assertEqual(result.status_code, 200)
self.assert_in_response('action="/register/"', result)
self.assert_in_response(
"Your email address, {}, is not "
"in one of the domains that are allowed to register "
"for accounts in this organization.".format(email),
result,
)
@override_settings(TERMS_OF_SERVICE=None)
def test_social_auth_with_ldap_populate_registration_from_confirmation(self) -> None:
self.init_default_ldap_database()
email = "[email protected]"
name = "Full Name"
realm = get_realm("zulip")
subdomain = "zulip"
ldap_user_attr_map = {"full_name": "cn"}
account_data_dict = self.get_account_data_dict(email=email, name=name)
backend_path = f"zproject.backends.{self.BACKEND_CLASS.__name__}"
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_APPEND_DOMAIN="zulip.com",
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
AUTHENTICATION_BACKENDS=(
backend_path,
"zproject.backends.ZulipLDAPUserPopulator",
"zproject.backends.ZulipDummyBackend",
),
), self.assertLogs(level="WARNING") as log_warn:
result = self.social_auth_test(
account_data_dict,
expect_choose_email_screen=True,
subdomain=subdomain,
is_signup=True,
)
# Full name should get populated from LDAP:
self.stage_two_of_registration(
result,
realm,
subdomain,
email,
name,
"New LDAP fullname",
skip_registration_form=True,
)
# Now try a user that doesn't exist in LDAP:
email = self.nonreg_email("alice")
name = "Alice Social"
account_data_dict = self.get_account_data_dict(email=email, name=name)
result = self.social_auth_test(
account_data_dict,
expect_choose_email_screen=True,
subdomain=subdomain,
is_signup=True,
)
# Full name should get populated as provided by the social backend, because
# this user isn't in the LDAP dictionary:
self.stage_two_of_registration(
result,
realm,
subdomain,
email,
name,
name,
skip_registration_form=self.BACKEND_CLASS.full_name_validated,
)
self.assertEqual(
log_warn.output, [f"WARNING:root:New account email {email} could not be found in LDAP"]
)
@override_settings(TERMS_OF_SERVICE=None)
def test_social_auth_with_ldap_auth_registration_from_confirmation(self) -> None:
"""
This test checks that in configurations that use the LDAP authentication backend
and a social backend, it is possible to create non-LDAP users via the social backend.
"""
self.init_default_ldap_database()
email = self.nonreg_email("alice")
name = "Alice Social"
realm = get_realm("zulip")
subdomain = "zulip"
ldap_user_attr_map = {"full_name": "cn"}
account_data_dict = self.get_account_data_dict(email=email, name=name)
backend_path = f"zproject.backends.{self.BACKEND_CLASS.__name__}"
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_EMAIL_ATTR="mail",
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
AUTHENTICATION_BACKENDS=(
backend_path,
"zproject.backends.ZulipLDAPAuthBackend",
"zproject.backends.ZulipDummyBackend",
),
), self.assertLogs("zulip.ldap", level="DEBUG") as log_debug, self.assertLogs(
level="WARNING"
) as log_warn:
account_data_dict = self.get_account_data_dict(email=email, name=name)
result = self.social_auth_test(
account_data_dict,
expect_choose_email_screen=True,
subdomain=subdomain,
is_signup=True,
)
# Full name should get populated as provided by the social backend, because
# this user isn't in the LDAP dictionary:
self.stage_two_of_registration(
result,
realm,
subdomain,
email,
name,
name,
skip_registration_form=self.BACKEND_CLASS.full_name_validated,
)
self.assertEqual(
log_warn.output, [f"WARNING:root:New account email {email} could not be found in LDAP"]
)
self.assertEqual(
log_debug.output,
[
f"DEBUG:zulip.ldap:ZulipLDAPAuthBackend: No LDAP user matching django_to_ldap_username result: {email}. Input username: {email}"
],
)
def test_social_auth_complete(self) -> None:
with mock.patch(
"social_core.backends.oauth.BaseOAuth2.process_error",
side_effect=AuthFailed("Not found"),
), self.assertLogs(self.logger_string, level="INFO") as m:
result = self.client_get(reverse("social:complete", args=[self.backend.name]))
self.assertEqual(result.status_code, 302)
self.assertIn("login", result.url)
self.assertEqual(
m.output,
[
self.logger_output("AuthFailed: Authentication failed: ", "info"),
],
)
with mock.patch(
"social_core.backends.oauth.BaseOAuth2.auth_complete",
side_effect=requests.exceptions.HTTPError,
), self.assertLogs(self.logger_string, level="INFO") as m:
result = self.client_get(reverse("social:complete", args=[self.backend.name]))
self.assertEqual(result.status_code, 302)
self.assertIn("login", result.url)
self.assertEqual(
m.output,
[
self.logger_output("HTTPError: ", "info"),
],
)
def test_social_auth_complete_when_base_exc_is_raised(self) -> None:
with mock.patch(
"social_core.backends.oauth.BaseOAuth2.auth_complete",
side_effect=AuthStateForbidden("State forbidden"),
), self.assertLogs(self.logger_string, level="WARNING"):
result = self.client_get(reverse("social:complete", args=[self.backend.name]))
self.assertEqual(result.status_code, 302)
self.assertIn("login", result.url)
@override_settings(TERMS_OF_SERVICE=None)
def test_social_auth_invited_as_admin_but_expired(self) -> None:
iago = self.example_user("iago")
email = self.nonreg_email("alice")
name = "Alice Jones"
do_invite_users(iago, [email], [], invite_as=PreregistrationUser.INVITE_AS["REALM_ADMIN"])
expired_date = timezone_now() - datetime.timedelta(
days=settings.INVITATION_LINK_VALIDITY_DAYS + 1
)
PreregistrationUser.objects.filter(email=email).update(invited_at=expired_date)
subdomain = "zulip"
realm = get_realm("zulip")
account_data_dict = self.get_account_data_dict(email=email, name=name)
result = self.social_auth_test(
account_data_dict, expect_choose_email_screen=True, subdomain=subdomain, is_signup=True
)
self.stage_two_of_registration(
result, realm, subdomain, email, name, name, self.BACKEND_CLASS.full_name_validated
)
# The invitation is expired, so the user should be created as normal member only.
created_user = get_user_by_delivery_email(email, realm)
self.assertEqual(created_user.role, UserProfile.ROLE_MEMBER)
class SAMLAuthBackendTest(SocialAuthBase):
__unittest_skip__ = False
BACKEND_CLASS = SAMLAuthBackend
LOGIN_URL = "/accounts/login/social/saml/test_idp"
SIGNUP_URL = "/accounts/register/social/saml/test_idp"
AUTHORIZATION_URL = "https://idp.testshib.org/idp/profile/SAML2/Redirect/SSO"
AUTH_FINISH_URL = "/complete/saml/"
CONFIG_ERROR_URL = "/config-error/saml"
# We have to define our own social_auth_test as the flow of SAML authentication
# is different from the other social backends.
def social_auth_test(
self,
account_data_dict: Dict[str, str],
*,
subdomain: str,
mobile_flow_otp: Optional[str] = None,
desktop_flow_otp: Optional[str] = None,
is_signup: bool = False,
next: str = "",
multiuse_object_key: str = "",
user_agent: Optional[str] = None,
extra_attributes: Mapping[str, List[str]] = {},
**extra_data: Any,
) -> HttpResponse:
url, headers = self.prepare_login_url_and_headers(
subdomain,
mobile_flow_otp,
desktop_flow_otp,
is_signup,
next,
multiuse_object_key,
user_agent=user_agent,
)
result = self.client_get(url, **headers)
expected_result_url_prefix = f"http://testserver/login/{self.backend.name}/"
if settings.SOCIAL_AUTH_SUBDOMAIN is not None:
expected_result_url_prefix = (
f"http://{settings.SOCIAL_AUTH_SUBDOMAIN}.testserver/login/{self.backend.name}/"
)
if result.status_code != 302 or not result.url.startswith(expected_result_url_prefix):
return result
result = self.client_get(result.url, **headers)
self.assertEqual(result.status_code, 302)
assert self.AUTHORIZATION_URL in result.url
assert "samlrequest" in result.url.lower()
self.client.cookies = result.cookies
parsed_url = urllib.parse.urlparse(result.url)
relay_state = urllib.parse.parse_qs(parsed_url.query)["RelayState"][0]
# Make sure params are getting encoded into RelayState:
data = SAMLAuthBackend.get_data_from_redis(orjson.loads(relay_state)["state_token"])
assert data is not None
if next:
self.assertEqual(data["next"], next)
if is_signup:
self.assertEqual(data["is_signup"], "1")
saml_response = self.generate_saml_response(
email=account_data_dict["email"],
name=account_data_dict["name"],
extra_attributes=extra_attributes,
)
post_params = {"SAMLResponse": saml_response, "RelayState": relay_state}
# The mock below is necessary, so that python3-saml accepts our SAMLResponse,
# and doesn't verify the cryptographic signatures etc., since generating
# a perfectly valid SAMLResponse for the purpose of these tests would be too complex,
# and we simply use one loaded from a fixture file.
with mock.patch.object(OneLogin_Saml2_Response, "is_valid", return_value=True):
# We are simulating a cross-domain POST request here. Session is a Lax cookie, meaning
# it won't be sent by the browser in this request. To simulate that effect with the django
# test client, we flush the session before the request.
self.client.session.flush()
result = self.client_post(self.AUTH_FINISH_URL, post_params, **headers)
return result
def generate_saml_response(
self, email: str, name: str, extra_attributes: Mapping[str, List[str]] = {}
) -> str:
"""
The samlresponse.txt fixture has a pre-generated SAMLResponse,
with {email}, {first_name}, {last_name} placeholders, that can
be filled out with the data we want.
"""
name_parts = name.split(" ")
first_name = name_parts[0]
last_name = name_parts[1]
extra_attrs = ""
for extra_attr_name, extra_attr_values in extra_attributes.items():
values = "".join(
'<saml2:AttributeValue xmlns:xs="http://www.w3.org/2001/XMLSchema" '
+ 'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="xs:string">'
+ f"{value}</saml2:AttributeValue>"
for value in extra_attr_values
)
extra_attrs += (
f'<saml2:Attribute Name="{extra_attr_name}" '
+ 'NameFormat="urn:oasis:names:tc:SAML:2.0:attrname-format:unspecified">'
+ f"{values}</saml2:Attribute>"
)
unencoded_saml_response = self.fixture_data("samlresponse.txt", type="saml").format(
email=email,
first_name=first_name,
last_name=last_name,
extra_attrs=extra_attrs,
)
# SAMLResponse needs to be base64-encoded.
saml_response: str = base64.b64encode(unencoded_saml_response.encode()).decode()
return saml_response
def get_account_data_dict(self, email: str, name: str) -> Dict[str, Any]:
return dict(email=email, name=name)
def test_social_auth_no_key(self) -> None:
"""
Since in the case of SAML there isn't a direct equivalent of CLIENT_KEY_SETTING,
we override this test, to test for the case where the obligatory
SOCIAL_AUTH_SAML_ENABLED_IDPS isn't configured.
"""
account_data_dict = self.get_account_data_dict(email=self.email, name=self.name)
with self.settings(SOCIAL_AUTH_SAML_ENABLED_IDPS=None):
result = self.social_auth_test(
account_data_dict, subdomain="zulip", next="/user_uploads/image"
)
self.assert_in_success_response(["Configuration error", "SAML authentication"], result)
# Test the signup path too:
result = self.social_auth_test(
account_data_dict, is_signup=True, subdomain="zulip", next="/user_uploads/image"
)
self.assert_in_success_response(["Configuration error", "SAML authentication"], result)
def test_config_error_page(self) -> None:
with self.assertLogs(level="INFO") as info_log:
result = self.client_get("/accounts/login/social/saml")
self.assertEqual(
info_log.output,
["INFO:root:Attempted to initiate SAML authentication with wrong idp argument: None"],
)
self.assert_in_success_response(["Configuration error", "SAML authentication"], result)
def test_saml_auth_works_without_private_public_keys(self) -> None:
with self.settings(SOCIAL_AUTH_SAML_SP_PUBLIC_CERT="", SOCIAL_AUTH_SAML_SP_PRIVATE_KEY=""):
self.test_social_auth_success()
def test_saml_auth_enabled(self) -> None:
with self.settings(AUTHENTICATION_BACKENDS=("zproject.backends.SAMLAuthBackend",)):
self.assertTrue(saml_auth_enabled())
result = self.client_get("/saml/metadata.xml")
self.assert_in_success_response(
[f'entityID="{settings.SOCIAL_AUTH_SAML_SP_ENTITY_ID}"'],
result,
)
def test_social_auth_complete(self) -> None:
with mock.patch.object(OneLogin_Saml2_Response, "is_valid", return_value=True):
with mock.patch.object(
OneLogin_Saml2_Auth, "is_authenticated", return_value=False
), self.assertLogs(self.logger_string, level="INFO") as m:
# This mock causes AuthFailed to be raised.
saml_response = self.generate_saml_response(self.email, self.name)
relay_state = orjson.dumps(
dict(
state_token=SAMLAuthBackend.put_data_in_redis({"subdomain": "zulip"}),
)
).decode()
post_params = {"SAMLResponse": saml_response, "RelayState": relay_state}
result = self.client_post("/complete/saml/", post_params)
self.assertEqual(result.status_code, 302)
self.assertIn("login", result.url)
self.assertEqual(
m.output,
[
self.logger_output(
"AuthFailed: Authentication failed: SAML login failed: [] (None)", "info"
)
],
)
def test_social_auth_complete_when_base_exc_is_raised(self) -> None:
with mock.patch.object(OneLogin_Saml2_Response, "is_valid", return_value=True):
with mock.patch(
"social_core.backends.saml.SAMLAuth.auth_complete",
side_effect=AuthStateForbidden("State forbidden"),
), self.assertLogs(self.logger_string, level="WARNING") as m:
saml_response = self.generate_saml_response(self.email, self.name)
relay_state = orjson.dumps(
dict(
state_token=SAMLAuthBackend.put_data_in_redis({"subdomain": "zulip"}),
)
).decode()
post_params = {"SAMLResponse": saml_response, "RelayState": relay_state}
result = self.client_post("/complete/saml/", post_params)
self.assertEqual(result.status_code, 302)
self.assertIn("login", result.url)
self.assertEqual(
m.output, [self.logger_output("Wrong state parameter given.", "warning")]
)
def test_social_auth_complete_bad_params(self) -> None:
# Simple GET for /complete/saml without the required parameters.
# This tests the auth_complete wrapped in our SAMLAuthBackend,
# ensuring it prevents this requests from causing an internal server error.
with self.assertLogs(self.logger_string, level="INFO") as m:
result = self.client_get("/complete/saml/")
self.assertEqual(result.status_code, 302)
self.assertIn("login", result.url)
self.assertEqual(
m.output, [self.logger_output("/complete/saml/: No SAMLResponse in request.", "info")]
)
# Check that POSTing the RelayState, but with missing SAMLResponse,
# doesn't cause errors either:
with self.assertLogs(self.logger_string, level="INFO") as m:
relay_state = orjson.dumps(
dict(
state_token=SAMLAuthBackend.put_data_in_redis({"subdomain": "zulip"}),
)
).decode()
post_params = {"RelayState": relay_state}
result = self.client_post("/complete/saml/", post_params)
self.assertEqual(result.status_code, 302)
self.assertIn("login", result.url)
self.assertEqual(
m.output, [self.logger_output("/complete/saml/: No SAMLResponse in request.", "info")]
)
# Now test bad SAMLResponses.
with self.assertLogs(self.logger_string, level="INFO") as m:
relay_state = orjson.dumps(
dict(
state_token=SAMLAuthBackend.put_data_in_redis({"subdomain": "zulip"}),
)
).decode()
post_params = {"RelayState": relay_state, "SAMLResponse": ""}
result = self.client_post("/complete/saml/", post_params)
self.assertEqual(result.status_code, 302)
self.assertIn("login", result.url)
self.assertTrue(m.output != "")
with self.assertLogs(self.logger_string, level="INFO") as m:
relay_state = orjson.dumps(
dict(
state_token=SAMLAuthBackend.put_data_in_redis({"subdomain": "zulip"}),
)
).decode()
post_params = {"RelayState": relay_state, "SAMLResponse": "b"}
result = self.client_post("/complete/saml/", post_params)
self.assertEqual(result.status_code, 302)
self.assertIn("login", result.url)
self.assertTrue(m.output != "")
with self.assertLogs(self.logger_string, level="INFO") as m:
relay_state = orjson.dumps(
dict(
state_token=SAMLAuthBackend.put_data_in_redis({"subdomain": "zulip"}),
)
).decode()
post_params = {
"RelayState": relay_state,
"SAMLResponse": base64.b64encode(b"test").decode(),
}
result = self.client_post("/complete/saml/", post_params)
self.assertEqual(result.status_code, 302)
self.assertIn("login", result.url)
self.assertTrue(m.output != "")
def test_social_auth_complete_no_subdomain(self) -> None:
with self.assertLogs(self.logger_string, level="INFO") as m:
post_params = {
"RelayState": "",
"SAMLResponse": self.generate_saml_response(
email=self.example_email("hamlet"), name="King Hamlet"
),
}
with mock.patch.object(SAMLAuthBackend, "choose_subdomain", return_value=None):
result = self.client_post("/complete/saml/", post_params)
self.assertEqual(result.status_code, 302)
self.assertEqual("/login/", result.url)
self.assertEqual(
m.output,
[
self.logger_output(
"/complete/saml/: Can't figure out subdomain for this authentication request. relayed_params: {}".format(
"{}"
),
"info",
)
],
)
def test_social_auth_complete_wrong_issuing_idp(self) -> None:
relay_state = orjson.dumps(
dict(
state_token=SAMLAuthBackend.put_data_in_redis({"subdomain": "zulip"}),
)
).decode()
saml_response = self.generate_saml_response(
email=self.example_email("hamlet"), name="King Hamlet"
)
# We change the entity_id of the configured test IdP, which means it won't match
# the Entity ID in the SAMLResponse generated above.
idps_dict = copy.deepcopy(settings.SOCIAL_AUTH_SAML_ENABLED_IDPS)
idps_dict["test_idp"]["entity_id"] = "https://different.idp.example.com/"
with self.settings(SOCIAL_AUTH_SAML_ENABLED_IDPS=idps_dict):
with self.assertLogs(self.logger_string, level="INFO") as m:
post_params = {"RelayState": relay_state, "SAMLResponse": saml_response}
result = self.client_post("/complete/saml/", post_params)
self.assertEqual(result.status_code, 302)
self.assertEqual("/login/", result.url)
self.assertEqual(
m.output,
[
self.logger_output(
"/complete/saml/: No valid IdP as issuer of the SAMLResponse.", "info"
)
],
)
def test_social_auth_complete_valid_get_idp_bad_samlresponse(self) -> None:
"""
This tests for a hypothetical scenario where our basic parsing of the SAMLResponse
successfully returns the issuing IdP, but it fails further down the line, during proper
validation in the underlying libraries.
"""
with self.assertLogs(self.logger_string, level="INFO") as m, mock.patch.object(
SAMLAuthBackend, "get_issuing_idp", return_value="test_idp"
):
relay_state = orjson.dumps(
dict(
state_token=SAMLAuthBackend.put_data_in_redis({"subdomain": "zulip"}),
)
).decode()
post_params = {
"RelayState": relay_state,
"SAMLResponse": base64.b64encode(b"test").decode(),
}
result = self.client_post("/complete/saml/", post_params)
self.assertEqual(result.status_code, 302)
self.assertIn("login", result.url)
self.assertTrue(m.output != "")
def test_social_auth_saml_bad_idp_param_on_login_page(self) -> None:
with self.assertLogs(self.logger_string, level="INFO") as m:
result = self.client_get("/login/saml/")
self.assertEqual(result.status_code, 302)
self.assertEqual("/login/", result.url)
self.assertEqual(
m.output,
[
self.logger_output(
"/login/saml/ : Bad idp param: KeyError: {}.".format("'idp'"), "info"
)
],
)
with self.assertLogs(self.logger_string, level="INFO") as m:
result = self.client_get("/login/saml/", {"idp": "bad_idp"})
self.assertEqual(result.status_code, 302)
self.assertEqual("/login/", result.url)
self.assertEqual(
m.output,
[
self.logger_output(
"/login/saml/ : Bad idp param: KeyError: {}.".format("'bad_idp'"), "info"
)
],
)
def test_social_auth_invalid_email(self) -> None:
"""
This test needs an override from the original class. For security reasons,
the 'next' and 'mobile_flow_otp' params don't get passed on in the session
if the authentication attempt failed. See SAMLAuthBackend.auth_complete for details.
"""
account_data_dict = self.get_account_data_dict(email="invalid", name=self.name)
with self.assertLogs(self.logger_string, "WARNING") as warn_log:
result = self.social_auth_test(
account_data_dict,
expect_choose_email_screen=True,
subdomain="zulip",
next="/user_uploads/image",
)
self.assertEqual(
warn_log.output, [self.logger_output("SAML got invalid email argument.", "warning")]
)
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "/login/")
def test_social_auth_saml_multiple_idps_configured(self) -> None:
# Setup a new SOCIAL_AUTH_SAML_ENABLED_IDPS dict with two idps.
# We deepcopy() dictionaries around for the sake of brevity,
# to avoid having to spell them out explicitly here.
# The second idp's configuration is a copy of the first one,
# with name test_idp2 and altered url. It is also configured to be
# limited to the zulip realm, so that we get to test both types
# of configs here.
idps_dict = copy.deepcopy(settings.SOCIAL_AUTH_SAML_ENABLED_IDPS)
idps_dict["test_idp2"] = copy.deepcopy(idps_dict["test_idp"])
idps_dict["test_idp2"]["url"] = "https://idp2.example.com/idp/profile/SAML2/Redirect/SSO"
idps_dict["test_idp2"]["display_name"] = "Second Test IdP"
idps_dict["test_idp2"]["limit_to_subdomains"] = ["zulip"]
# Run tests with multiple idps configured:
with self.settings(SOCIAL_AUTH_SAML_ENABLED_IDPS=idps_dict):
# Go to the login page and check that buttons to log in show up for both IdPs:
result = self.client_get("/accounts/login/")
self.assert_in_success_response(["Log in with Test IdP"], result)
self.assert_in_success_response(["/accounts/login/social/saml/test_idp"], result)
self.assert_in_success_response(["Log in with Second Test IdP"], result)
self.assert_in_success_response(["/accounts/login/social/saml/test_idp2"], result)
# Try successful authentication with the regular idp from all previous tests:
self.test_social_auth_success()
# Now test with the second idp:
original_LOGIN_URL = self.LOGIN_URL
original_SIGNUP_URL = self.SIGNUP_URL
original_AUTHORIZATION_URL = self.AUTHORIZATION_URL
self.LOGIN_URL = "/accounts/login/social/saml/test_idp2"
self.SIGNUP_URL = "/accounts/register/social/saml/test_idp2"
self.AUTHORIZATION_URL = idps_dict["test_idp2"]["url"]
try:
self.test_social_auth_success()
finally:
# Restore original values at the end, regardless of what happens
# in the block above, to avoid affecting other tests in unpredictable
# ways.
self.LOGIN_URL = original_LOGIN_URL
self.SIGNUP_URL = original_SIGNUP_URL
self.AUTHORIZATION_URL = original_AUTHORIZATION_URL
def test_social_auth_saml_idp_limited_to_subdomains_success(self) -> None:
idps_dict = copy.deepcopy(settings.SOCIAL_AUTH_SAML_ENABLED_IDPS)
idps_dict["test_idp"]["limit_to_subdomains"] = ["zulip"]
with self.settings(SOCIAL_AUTH_SAML_ENABLED_IDPS=idps_dict):
self.test_social_auth_success()
def test_social_auth_saml_idp_limited_to_subdomains_attempt_wrong_realm(self) -> None:
idps_dict = copy.deepcopy(settings.SOCIAL_AUTH_SAML_ENABLED_IDPS)
idps_dict["test_idp"]["limit_to_subdomains"] = ["zulip"]
with self.settings(SOCIAL_AUTH_SAML_ENABLED_IDPS=idps_dict):
account_data_dict = self.get_account_data_dict(email=self.email, name=self.name)
with self.assertLogs(self.logger_string, level="INFO") as m:
result = self.social_auth_test(account_data_dict, subdomain="zephyr")
self.assertEqual(result.status_code, 302)
self.assertEqual("/login/", result.url)
self.assertEqual(
m.output,
[
self.logger_output(
"/complete/saml/: Authentication request with IdP test_idp but this provider is not enabled "
"for this subdomain zephyr.",
"info",
)
],
)
def test_social_auth_saml_login_bad_idp_arg(self) -> None:
for action in ["login", "register"]:
with self.assertLogs(level="INFO") as info_log:
result = self.client_get(f"/accounts/{action}/social/saml")
# Missing idp argument.
self.assert_in_success_response(["Configuration error", "SAML authentication"], result)
self.assertEqual(
info_log.output,
[
"INFO:root:Attempted to initiate SAML authentication with wrong idp argument: None"
],
)
with self.assertLogs(level="INFO") as info_log:
result = self.client_get(f"/accounts/{action}/social/saml/nonexistent_idp")
# No such IdP is configured.
self.assertEqual(
info_log.output,
[
"INFO:root:Attempted to initiate SAML authentication with wrong idp argument: nonexistent_idp"
],
)
self.assert_in_success_response(["Configuration error", "SAML authentication"], result)
result = self.client_get(f"/accounts/{action}/social/saml/")
# No matching URL pattern.
self.assertEqual(result.status_code, 404)
def test_social_auth_saml_require_limit_to_subdomains(self) -> None:
idps_dict = copy.deepcopy(settings.SOCIAL_AUTH_SAML_ENABLED_IDPS)
idps_dict["test_idp2"] = copy.deepcopy(idps_dict["test_idp"])
idps_dict["test_idp2"]["url"] = "https://idp2.example.com/idp/profile/SAML2/Redirect/SSO"
idps_dict["test_idp2"]["display_name"] = "Second Test IdP"
idps_dict["test_idp2"]["limit_to_subdomains"] = ["zulip"]
with self.settings(
SOCIAL_AUTH_SAML_ENABLED_IDPS=idps_dict, SAML_REQUIRE_LIMIT_TO_SUBDOMAINS=True
):
with self.assertLogs(self.logger_string, level="ERROR") as m:
# Initialization of the backend should validate the configured IdPs
# with respect to the SAML_REQUIRE_LIMIT_TO_SUBDOMAINS setting and remove
# the non-compliant ones.
SAMLAuthBackend()
self.assertEqual(list(settings.SOCIAL_AUTH_SAML_ENABLED_IDPS.keys()), ["test_idp2"])
self.assertEqual(
m.output,
[
self.logger_output(
"SAML_REQUIRE_LIMIT_TO_SUBDOMAINS is enabled and the following "
"IdPs don't have limit_to_subdomains specified and will be ignored: "
"['test_idp']",
"error",
)
],
)
def test_idp_initiated_signin_subdomain_specified(self) -> None:
post_params = {
"RelayState": '{"subdomain": "zulip"}',
"SAMLResponse": self.generate_saml_response(email=self.email, name=self.name),
}
with mock.patch.object(OneLogin_Saml2_Response, "is_valid", return_value=True):
# We're not able to generate valid signatures in tests, so we need the mock.
result = self.client_post("/complete/saml/", post_params)
data = load_subdomain_token(result)
self.assertEqual(data["email"], self.example_email("hamlet"))
self.assertEqual(data["full_name"], self.name)
self.assertEqual(data["subdomain"], "zulip")
self.assertEqual(result.status_code, 302)
parsed_url = urllib.parse.urlparse(result.url)
uri = f"{parsed_url.scheme}://{parsed_url.netloc}{parsed_url.path}"
self.assertTrue(uri.startswith("http://zulip.testserver/accounts/login/subdomain/"))
self.client_get(uri)
self.assert_logged_in_user_id(self.example_user("hamlet").id)
def test_choose_subdomain_invalid_subdomain_specified(self) -> None:
post_params = {
"RelayState": '{"subdomain": "invalid"}',
"SAMLResponse": self.generate_saml_response(email=self.email, name=self.name),
}
with mock.patch.object(OneLogin_Saml2_Response, "is_valid", return_value=True):
# We're not able to generate valid signatures in tests, so we need the mock.
result = self.client_post("/complete/saml/", post_params)
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "/accounts/find/")
def test_idp_initiated_signin_subdomain_implicit(self) -> None:
post_params = {
"RelayState": "",
"SAMLResponse": self.generate_saml_response(email=self.email, name=self.name),
}
with mock.patch.object(OneLogin_Saml2_Response, "is_valid", return_value=True):
# We're not able to generate valid signatures in tests, so we need the mock.
result = self.client_post("http://zulip.testserver/complete/saml/", post_params)
data = load_subdomain_token(result)
self.assertEqual(data["email"], self.example_email("hamlet"))
self.assertEqual(data["full_name"], self.name)
self.assertEqual(data["subdomain"], "zulip")
self.assertEqual(result.status_code, 302)
parsed_url = urllib.parse.urlparse(result.url)
uri = f"{parsed_url.scheme}://{parsed_url.netloc}{parsed_url.path}"
self.assertTrue(uri.startswith("http://zulip.testserver/accounts/login/subdomain/"))
self.client_get(uri)
self.assert_logged_in_user_id(self.example_user("hamlet").id)
def test_idp_initiated_signin_subdomain_implicit_no_relaystate_param(self) -> None:
post_params = {
"SAMLResponse": self.generate_saml_response(email=self.email, name=self.name),
}
with mock.patch.object(OneLogin_Saml2_Response, "is_valid", return_value=True):
# We're not able to generate valid signatures in tests, so we need the mock.
result = self.client_post("http://zulip.testserver/complete/saml/", post_params)
data = load_subdomain_token(result)
self.assertEqual(data["email"], self.example_email("hamlet"))
self.assertEqual(data["full_name"], self.name)
self.assertEqual(data["subdomain"], "zulip")
self.assertEqual(result.status_code, 302)
parsed_url = urllib.parse.urlparse(result.url)
uri = f"{parsed_url.scheme}://{parsed_url.netloc}{parsed_url.path}"
self.assertTrue(uri.startswith("http://zulip.testserver/accounts/login/subdomain/"))
self.client_get(uri)
self.assert_logged_in_user_id(self.example_user("hamlet").id)
def test_idp_initiated_signin_subdomain_implicit_invalid(self) -> None:
post_params = {
"RelayState": "",
"SAMLResponse": self.generate_saml_response(email=self.email, name=self.name),
}
with self.assertLogs(self.logger_string, level="INFO") as m:
with mock.patch("zproject.backends.get_subdomain", return_value="invalid"):
# Due to the quirks of our test setup, get_subdomain on all these `some_subdomain.testserver`
# requests returns 'zulip', so we need to mock it here.
result = self.client_post("http://invalid.testserver/complete/saml/", post_params)
self.assertEqual(result.status_code, 302)
self.assertEqual("/login/", result.url)
self.assertEqual(
m.output,
[
self.logger_output(
"/complete/saml/: Can't figure out subdomain for this authentication request. relayed_params: {}",
"info",
)
],
)
def test_social_auth_saml_idp_org_membership_success(self) -> None:
idps_dict = copy.deepcopy(settings.SOCIAL_AUTH_SAML_ENABLED_IDPS)
idps_dict["test_idp"]["attr_org_membership"] = "member"
with self.settings(SOCIAL_AUTH_SAML_ENABLED_IDPS=idps_dict):
account_data_dict = self.get_account_data_dict(email=self.email, name=self.name)
result = self.social_auth_test(
account_data_dict,
subdomain="zulip",
expect_choose_email_screen=False,
extra_attributes=dict(member=["zulip"]),
)
data = load_subdomain_token(result)
self.assertEqual(data["email"], self.email)
self.assertEqual(data["full_name"], self.name)
self.assertEqual(data["subdomain"], "zulip")
self.assertEqual(result.status_code, 302)
def test_social_auth_saml_idp_org_membership_root_subdomain(self) -> None:
realm = get_realm("zulip")
realm.string_id = ""
realm.save()
idps_dict = copy.deepcopy(settings.SOCIAL_AUTH_SAML_ENABLED_IDPS)
idps_dict["test_idp"]["attr_org_membership"] = "member"
with self.settings(SOCIAL_AUTH_SAML_ENABLED_IDPS=idps_dict):
# Having one of the settings.ROOT_SUBDOMAIN_ALIASES in the membership attributes
# authorizes the user to access the root subdomain.
account_data_dict = self.get_account_data_dict(email=self.email, name=self.name)
result = self.social_auth_test(
account_data_dict,
subdomain="",
expect_choose_email_screen=False,
extra_attributes=dict(member=["www"]),
)
data = load_subdomain_token(result)
self.assertEqual(data["email"], self.email)
self.assertEqual(data["full_name"], self.name)
self.assertEqual(data["subdomain"], "")
self.assertEqual(result.status_code, 302)
# Failure, the user doesn't have entitlements for the root subdomain.
account_data_dict = self.get_account_data_dict(email=self.email, name=self.name)
with self.assertLogs(self.logger_string, level="INFO") as m:
result = self.social_auth_test(
account_data_dict,
subdomain="",
expect_choose_email_screen=False,
extra_attributes=dict(member=["zephyr"]),
)
self.assertEqual(result.status_code, 302)
self.assertEqual(
m.output,
[
self.logger_output(
"AuthFailed: Authentication failed: SAML user from IdP test_idp rejected due to "
+ "missing entitlement for subdomain ''. User entitlements: ['zephyr'].",
"info",
)
],
)
def test_social_auth_saml_idp_org_membership_failed(self) -> None:
idps_dict = copy.deepcopy(settings.SOCIAL_AUTH_SAML_ENABLED_IDPS)
idps_dict["test_idp"]["attr_org_membership"] = "member"
with self.settings(SOCIAL_AUTH_SAML_ENABLED_IDPS=idps_dict):
account_data_dict = self.get_account_data_dict(email=self.email, name=self.name)
with self.assertLogs(self.logger_string, level="INFO") as m:
result = self.social_auth_test(
account_data_dict,
subdomain="zulip",
extra_attributes=dict(member=["zephyr", "othersubdomain"]),
)
self.assertEqual(result.status_code, 302)
self.assertEqual("/login/", result.url)
self.assertEqual(
m.output,
[
self.logger_output(
"AuthFailed: Authentication failed: SAML user from IdP test_idp rejected due to "
+ "missing entitlement for subdomain 'zulip'. User entitlements: ['zephyr', 'othersubdomain'].",
"info",
)
],
)
class AppleAuthMixin:
BACKEND_CLASS = AppleAuthBackend
CLIENT_KEY_SETTING = "SOCIAL_AUTH_APPLE_KEY"
AUTHORIZATION_URL = "https://appleid.apple.com/auth/authorize"
ACCESS_TOKEN_URL = "https://appleid.apple.com/auth/token"
AUTH_FINISH_URL = "/complete/apple/"
CONFIG_ERROR_URL = "/config-error/apple"
def generate_id_token(
self, account_data_dict: Dict[str, str], audience: Optional[str] = None
) -> str:
payload = dict(email=account_data_dict["email"])
# This setup is important because python-social-auth decodes `id_token`
# with `SOCIAL_AUTH_APPLE_CLIENT` as the `audience`
payload["aud"] = settings.SOCIAL_AUTH_APPLE_CLIENT
if audience is not None:
payload["aud"] = audience
headers = {"kid": "SOMEKID"}
private_key = settings.APPLE_ID_TOKEN_GENERATION_KEY
id_token = jwt.encode(payload, private_key, algorithm="RS256", headers=headers).decode(
"utf-8"
)
return id_token
def get_account_data_dict(self, email: str, name: str) -> Dict[str, Any]:
name_parts = name.split(" ")
first_name = name_parts[0]
last_name = ""
if len(name_parts) > 0:
last_name = name_parts[-1]
name_dict = {"firstName": first_name, "lastName": last_name}
return dict(email=email, name=name_dict, email_verified=True)
class AppleIdAuthBackendTest(AppleAuthMixin, SocialAuthBase):
__unittest_skip__ = False
LOGIN_URL = "/accounts/login/social/apple"
SIGNUP_URL = "/accounts/register/social/apple"
# This URL isn't used in the Apple auth flow, so we just set a
# dummy value to keep SocialAuthBase common code happy.
USER_INFO_URL = "/invalid-unused-url"
def social_auth_test_finish(
self,
result: HttpResponse,
account_data_dict: Dict[str, str],
expect_choose_email_screen: bool,
headers: Any,
**extra_data: Any,
) -> HttpResponse:
parsed_url = urllib.parse.urlparse(result.url)
state = urllib.parse.parse_qs(parsed_url.query)["state"]
user_param = json.dumps(account_data_dict)
self.client.session.flush()
result = self.client_post(
self.AUTH_FINISH_URL, dict(state=state, user=user_param), **headers
)
return result
def register_extra_endpoints(
self,
requests_mock: responses.RequestsMock,
account_data_dict: Dict[str, str],
**extra_data: Any,
) -> None:
# This is an URL of an endpoint on Apple servers that returns
# the public keys to be used for verifying the signature
# on the JWT id_token.
requests_mock.add(
requests_mock.GET,
self.BACKEND_CLASS.JWK_URL,
status=200,
json=json.loads(settings.APPLE_JWK),
)
def generate_access_url_payload(self, account_data_dict: Dict[str, str]) -> str:
# The ACCESS_TOKEN_URL endpoint works a bit different in standard Oauth2,
# where the token_data_dict contains some essential data. we add that data here.
return json.dumps(
{
"access_token": "foobar",
"expires_in": time.time() + 60 * 5,
"id_token": self.generate_id_token(account_data_dict),
"token_type": "bearer",
}
)
def test_apple_auth_enabled(self) -> None:
with self.settings(AUTHENTICATION_BACKENDS=("zproject.backends.AppleAuthBackend",)):
self.assertTrue(apple_auth_enabled())
def test_auth_registration_with_no_name_sent_from_apple(self) -> None:
"""
Apple doesn't send the name in consecutive attempts if user registration
fails the first time. This tests verifies that the social pipeline is able
to handle the case of the backend not providing this information.
"""
email = "[email protected]"
subdomain = "zulip"
realm = get_realm("zulip")
account_data_dict = self.get_account_data_dict(email=email, name="")
result = self.social_auth_test(
account_data_dict, expect_choose_email_screen=True, subdomain=subdomain, is_signup=True
)
self.stage_two_of_registration(
result,
realm,
subdomain,
email,
"",
"Full Name",
skip_registration_form=False,
expect_full_name_prepopulated=False,
)
def test_id_token_verification_failure(self) -> None:
account_data_dict = self.get_account_data_dict(email=self.email, name=self.name)
with self.assertLogs(self.logger_string, level="INFO") as m:
with mock.patch("jwt.decode", side_effect=PyJWTError):
result = self.social_auth_test(
account_data_dict,
expect_choose_email_screen=True,
subdomain="zulip",
is_signup=True,
)
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "/login/")
self.assertEqual(
m.output,
[
self.logger_output(
"AuthFailed: Authentication failed: Token validation failed", "info"
),
],
)
def test_validate_state(self) -> None:
with self.assertLogs(self.logger_string, level="INFO") as m:
# (1) check if auth fails if no state value is sent.
result = self.client_post("/complete/apple/")
self.assertEqual(result.status_code, 302)
self.assertIn("login", result.url)
# (2) Check if auth fails when a state sent has no valid data stored in Redis.
fake_state = "fa42e4ccdb630f0070c1daab70ad198d8786d4b639cd7a1b4db4d5a13c623060"
result = self.client_post("/complete/apple/", {"state": fake_state})
self.assertEqual(result.status_code, 302)
self.assertIn("login", result.url)
self.assertEqual(
m.output,
[
self.logger_output(
"Sign in with Apple failed: missing state parameter.", "info"
), # (1)
self.logger_output("Missing needed parameter state", "warning"),
self.logger_output("Sign in with Apple failed: bad state token.", "info"), # (2)
self.logger_output("Wrong state parameter given.", "warning"),
],
)
class AppleAuthBackendNativeFlowTest(AppleAuthMixin, SocialAuthBase):
__unittest_skip__ = False
SIGNUP_URL = "/complete/apple/"
LOGIN_URL = "/complete/apple/"
def prepare_login_url_and_headers(
self,
subdomain: str,
mobile_flow_otp: Optional[str] = None,
desktop_flow_otp: Optional[str] = None,
is_signup: bool = False,
next: str = "",
multiuse_object_key: str = "",
alternative_start_url: Optional[str] = None,
id_token: Optional[str] = None,
account_data_dict: Dict[str, str] = {},
*,
user_agent: Optional[str] = None,
) -> Tuple[str, Dict[str, Any]]:
url, headers = super().prepare_login_url_and_headers(
subdomain,
mobile_flow_otp,
desktop_flow_otp,
is_signup,
next,
multiuse_object_key,
alternative_start_url=alternative_start_url,
user_agent=user_agent,
)
params = {"native_flow": "true"}
if id_token is not None:
params["id_token"] = id_token
if is_signup:
params["is_signup"] = "1"
if subdomain:
params["subdomain"] = subdomain
params["user"] = json.dumps(account_data_dict)
url += f"&{urllib.parse.urlencode(params)}"
return url, headers
def social_auth_test(
self,
account_data_dict: Dict[str, str],
*,
subdomain: str,
mobile_flow_otp: Optional[str] = None,
desktop_flow_otp: Optional[str] = None,
is_signup: bool = False,
next: str = "",
multiuse_object_key: str = "",
alternative_start_url: Optional[str] = None,
skip_id_token: bool = False,
user_agent: Optional[str] = None,
**extra_data: Any,
) -> HttpResponse:
"""In Apple's native authentication flow, the client app authenticates
with Apple and receives the JWT id_token, before contacting
the Zulip server. The app sends an appropriate request with
it to /complete/apple/ to get logged in. See the backend
class for details.
As a result, we need a custom social_auth_test function that
effectively just does the second half of the flow (i.e. the
part after the redirect from this third-party authentication
provider) with a properly generated id_token.
"""
if not skip_id_token:
id_token: Optional[str] = self.generate_id_token(
account_data_dict, settings.SOCIAL_AUTH_APPLE_APP_ID
)
else:
id_token = None
url, headers = self.prepare_login_url_and_headers(
subdomain,
mobile_flow_otp,
desktop_flow_otp,
is_signup,
next,
multiuse_object_key,
alternative_start_url=self.AUTH_FINISH_URL,
user_agent=user_agent,
id_token=id_token,
account_data_dict=account_data_dict,
)
with self.apple_jwk_url_mock():
result = self.client_get(url, **headers)
return result
@contextmanager
def apple_jwk_url_mock(self) -> Iterator[None]:
with responses.RequestsMock(assert_all_requests_are_fired=False) as requests_mock:
# The server fetches public keys for validating the id_token
# from Apple servers. We need to mock that URL to return our key,
# created for these tests.
requests_mock.add(
requests_mock.GET,
self.BACKEND_CLASS.JWK_URL,
status=200,
json=json.loads(settings.APPLE_JWK),
)
yield
def test_no_id_token_sent(self) -> None:
account_data_dict = self.get_account_data_dict(email=self.email, name=self.name)
result = self.social_auth_test(
account_data_dict,
expect_choose_email_screen=False,
subdomain="zulip",
next="/user_uploads/image",
skip_id_token=True,
)
self.assert_json_error(result, "Missing id_token parameter")
def test_social_auth_session_fields_cleared_correctly(self) -> None:
mobile_flow_otp = "1234abcd" * 8
account_data_dict = self.get_account_data_dict(email=self.email, name=self.name)
def initiate_auth(mobile_flow_otp: Optional[str] = None) -> None:
url, headers = self.prepare_login_url_and_headers(
subdomain="zulip",
id_token="invalid",
mobile_flow_otp=mobile_flow_otp,
account_data_dict=account_data_dict,
)
result = self.client_get(url, **headers)
self.assertEqual(result.status_code, 302)
with self.assertLogs(self.logger_string, level="INFO") as info_log:
# Start Apple auth with mobile_flow_otp param. It should get saved into the session
# on SOCIAL_AUTH_SUBDOMAIN.
initiate_auth(mobile_flow_otp)
self.assertEqual(self.client.session["mobile_flow_otp"], mobile_flow_otp)
self.assertEqual(
info_log.output,
[
self.logger_output(
"/complete/apple/: Authentication failed: Token validation failed", "info"
)
],
)
with self.assertLogs(self.logger_string, level="INFO") as info_log:
# Make a request without mobile_flow_otp param and verify the field doesn't persist
# in the session from the previous request.
initiate_auth()
self.assertEqual(self.client.session.get("mobile_flow_otp"), None)
self.assertEqual(
info_log.output,
[
self.logger_output(
"/complete/apple/: Authentication failed: Token validation failed", "info"
)
],
)
def test_id_token_with_invalid_aud_sent(self) -> None:
account_data_dict = self.get_account_data_dict(email=self.email, name=self.name)
url, headers = self.prepare_login_url_and_headers(
subdomain="zulip",
alternative_start_url=self.AUTH_FINISH_URL,
id_token=self.generate_id_token(account_data_dict, audience="com.different.app"),
account_data_dict=account_data_dict,
)
with self.apple_jwk_url_mock(), self.assertLogs(self.logger_string, level="INFO") as m:
result = self.client_get(url, **headers)
self.assertEqual(
m.output,
[
self.logger_output(
"/complete/apple/: Authentication failed: Token validation failed", "info"
)
],
)
return result
def test_social_auth_desktop_success(self) -> None:
"""
The desktop app doesn't use the native flow currently and the desktop app flow in its
current form happens in the browser, thus only the webflow is viable there.
"""
pass
def test_social_auth_no_key(self) -> None:
"""
The basic validation of server configuration is handled on the
/login/social/apple/ endpoint which isn't even a part of the native flow.
"""
pass
class GitHubAuthBackendTest(SocialAuthBase):
__unittest_skip__ = False
BACKEND_CLASS = GitHubAuthBackend
CLIENT_KEY_SETTING = "SOCIAL_AUTH_GITHUB_KEY"
CLIENT_SECRET_SETTING = "SOCIAL_AUTH_GITHUB_SECRET"
LOGIN_URL = "/accounts/login/social/github"
SIGNUP_URL = "/accounts/register/social/github"
AUTHORIZATION_URL = "https://github.com/login/oauth/authorize"
ACCESS_TOKEN_URL = "https://github.com/login/oauth/access_token"
USER_INFO_URL = "https://api.github.com/user"
AUTH_FINISH_URL = "/complete/github/"
CONFIG_ERROR_URL = "/config-error/github"
email_data: List[Dict[str, Any]] = []
def social_auth_test_finish(
self,
result: HttpResponse,
account_data_dict: Dict[str, str],
expect_choose_email_screen: bool,
headers: Any,
expect_noreply_email_allowed: bool = False,
**extra_data: Any,
) -> HttpResponse:
parsed_url = urllib.parse.urlparse(result.url)
csrf_state = urllib.parse.parse_qs(parsed_url.query)["state"]
result = self.client_get(self.AUTH_FINISH_URL, dict(state=csrf_state), **headers)
if expect_choose_email_screen:
# As GitHub authenticates multiple email addresses,
# we'll have an additional screen where the user selects
# which email address to log in using (this screen is a
# "partial" state of the python-social-auth pipeline).
#
# TODO: Generalize this testing code for use with other
# authentication backends when a new authentacation backend
# that requires "choose email" screen;
self.assert_in_success_response(["Select account"], result)
# Verify that all the emails returned by GitHub auth
# Are in the "choose email" screen.
all_emails_verified = True
for email_data_dict in self.email_data:
email = email_data_dict["email"]
if email.endswith("@users.noreply.github.com") and not expect_noreply_email_allowed:
self.assert_not_in_success_response([email], result)
elif email_data_dict.get("verified"):
self.assert_in_success_response([email], result)
else:
# We may change this if we provide a way to see
# the list of emails the user had.
self.assert_not_in_success_response([email], result)
all_emails_verified = False
if all_emails_verified:
self.assert_not_in_success_response(["also has unverified email"], result)
else:
self.assert_in_success_response(["also has unverified email"], result)
result = self.client_get(
self.AUTH_FINISH_URL,
dict(state=csrf_state, email=account_data_dict["email"]),
**headers,
)
return result
def register_extra_endpoints(
self,
requests_mock: responses.RequestsMock,
account_data_dict: Dict[str, str],
**extra_data: Any,
) -> None:
# Keeping a verified email before the primary email makes sure
# get_verified_emails puts the primary email at the start of the
# email list returned as social_associate_user_helper assumes the
# first email as the primary email.
email_data = [
dict(email="[email protected]", verified=True),
dict(email=account_data_dict["email"], verified=True, primary=True),
dict(email="[email protected]", verified=False),
]
email_data = extra_data.get("email_data", email_data)
requests_mock.add(
requests_mock.GET,
"https://api.github.com/user/emails",
status=200,
body=json.dumps(email_data),
)
requests_mock.add(
requests_mock.GET,
"https://api.github.com/teams/zulip-webapp/members/None",
status=200,
body=json.dumps(email_data),
)
self.email_data = email_data
def get_account_data_dict(
self, email: str, name: str, user_avatar_url: str = ""
) -> Dict[str, Any]:
return dict(email=email, name=name, user_avatar_url=user_avatar_url)
def test_social_auth_email_not_verified(self) -> None:
account_data_dict = self.get_account_data_dict(email=self.email, name=self.name)
email_data = [
dict(email=account_data_dict["email"], verified=False, primary=True),
]
with self.assertLogs(self.logger_string, level="WARNING") as m:
result = self.social_auth_test(
account_data_dict, subdomain="zulip", email_data=email_data
)
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "/login/")
self.assertEqual(
m.output,
[
self.logger_output(
"Social auth ({}) failed because user has no verified emails".format("GitHub"),
"warning",
)
],
)
@override_settings(SOCIAL_AUTH_GITHUB_TEAM_ID="zulip-webapp")
def test_social_auth_github_team_not_member_failed(self) -> None:
account_data_dict = self.get_account_data_dict(email=self.email, name=self.name)
with mock.patch(
"social_core.backends.github.GithubTeamOAuth2.user_data",
side_effect=AuthFailed("Not found"),
), self.assertLogs(self.logger_string, level="INFO") as mock_info:
result = self.social_auth_test(account_data_dict, subdomain="zulip")
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "/login/")
self.assertEqual(
mock_info.output,
[
self.logger_output(
"GitHub user is not member of required team",
"info",
)
],
)
@override_settings(SOCIAL_AUTH_GITHUB_TEAM_ID="zulip-webapp")
def test_social_auth_github_team_member_success(self) -> None:
account_data_dict = self.get_account_data_dict(email=self.email, name=self.name)
with mock.patch(
"social_core.backends.github.GithubTeamOAuth2.user_data", return_value=account_data_dict
):
result = self.social_auth_test(
account_data_dict, expect_choose_email_screen=False, subdomain="zulip"
)
data = load_subdomain_token(result)
self.assertEqual(data["email"], self.example_email("hamlet"))
self.assertEqual(data["full_name"], self.name)
self.assertEqual(data["subdomain"], "zulip")
@override_settings(SOCIAL_AUTH_GITHUB_ORG_NAME="Zulip")
def test_social_auth_github_organization_not_member_failed(self) -> None:
account_data_dict = self.get_account_data_dict(email=self.email, name=self.name)
with mock.patch(
"social_core.backends.github.GithubOrganizationOAuth2.user_data",
side_effect=AuthFailed("Not found"),
), self.assertLogs(self.logger_string, level="INFO") as mock_info:
result = self.social_auth_test(account_data_dict, subdomain="zulip")
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "/login/")
self.assertEqual(
mock_info.output,
[
self.logger_output(
"GitHub user is not member of required organization",
"info",
)
],
)
@override_settings(SOCIAL_AUTH_GITHUB_ORG_NAME="Zulip")
def test_social_auth_github_organization_member_success(self) -> None:
account_data_dict = self.get_account_data_dict(email=self.email, name=self.name)
with mock.patch(
"social_core.backends.github.GithubOrganizationOAuth2.user_data",
return_value=account_data_dict,
):
result = self.social_auth_test(
account_data_dict, expect_choose_email_screen=False, subdomain="zulip"
)
data = load_subdomain_token(result)
self.assertEqual(data["email"], self.example_email("hamlet"))
self.assertEqual(data["full_name"], self.name)
self.assertEqual(data["subdomain"], "zulip")
def test_github_auth_enabled(self) -> None:
with self.settings(AUTHENTICATION_BACKENDS=("zproject.backends.GitHubAuthBackend",)):
self.assertTrue(github_auth_enabled())
def test_github_oauth2_success_non_primary(self) -> None:
account_data_dict = self.get_account_data_dict(
email="[email protected]", name="Non Primary"
)
email_data = [
dict(email=account_data_dict["email"], verified=True),
dict(email="[email protected]", verified=True, primary=True),
dict(email="[email protected]", verified=True),
dict(email="[email protected]", verified=False),
]
result = self.social_auth_test(
account_data_dict,
subdomain="zulip",
email_data=email_data,
expect_choose_email_screen=True,
next="/user_uploads/image",
)
data = load_subdomain_token(result)
self.assertEqual(data["email"], "[email protected]")
self.assertEqual(data["full_name"], "Non Primary")
self.assertEqual(data["subdomain"], "zulip")
self.assertEqual(data["redirect_to"], "/user_uploads/image")
self.assertEqual(result.status_code, 302)
parsed_url = urllib.parse.urlparse(result.url)
uri = f"{parsed_url.scheme}://{parsed_url.netloc}{parsed_url.path}"
self.assertTrue(uri.startswith("http://zulip.testserver/accounts/login/subdomain/"))
def test_github_oauth2_success_single_email(self) -> None:
# If the user has a single email associated with its GitHub account,
# the choose email screen should not be shown and the first email
# should be used for user's signup/login.
account_data_dict = self.get_account_data_dict(email="[email protected]", name=self.name)
email_data = [
dict(email="[email protected]", verified=True, primary=True),
]
result = self.social_auth_test(
account_data_dict,
subdomain="zulip",
email_data=email_data,
expect_choose_email_screen=False,
next="/user_uploads/image",
)
data = load_subdomain_token(result)
self.assertEqual(data["email"], self.example_email("hamlet"))
self.assertEqual(data["full_name"], self.name)
self.assertEqual(data["subdomain"], "zulip")
self.assertEqual(data["redirect_to"], "/user_uploads/image")
self.assertEqual(result.status_code, 302)
parsed_url = urllib.parse.urlparse(result.url)
uri = f"{parsed_url.scheme}://{parsed_url.netloc}{parsed_url.path}"
self.assertTrue(uri.startswith("http://zulip.testserver/accounts/login/subdomain/"))
def test_github_oauth2_login_only_one_account_exists(self) -> None:
# In a login flow, if only one of the user's verified emails
# is associated with an existing account, the user should be
# just logged in (skipping the "choose email screen"). We
# only want that screen if the user were instead trying to
# register a new account, which they're not.
account_data_dict = self.get_account_data_dict(email=self.email, name=self.name)
email_data = [
dict(email=account_data_dict["email"], verified=True),
dict(email="[email protected]", verified=True),
dict(email="[email protected]", verified=True),
]
result = self.social_auth_test(
account_data_dict,
subdomain="zulip",
email_data=email_data,
expect_choose_email_screen=False,
next="/user_uploads/image",
)
data = load_subdomain_token(result)
self.assertEqual(data["email"], account_data_dict["email"])
self.assertEqual(data["full_name"], self.name)
self.assertEqual(data["subdomain"], "zulip")
self.assertEqual(data["redirect_to"], "/user_uploads/image")
self.assertEqual(result.status_code, 302)
parsed_url = urllib.parse.urlparse(result.url)
uri = f"{parsed_url.scheme}://{parsed_url.netloc}{parsed_url.path}"
self.assertTrue(uri.startswith("http://zulip.testserver/accounts/login/subdomain/"))
def test_github_oauth2_login_multiple_accounts_exist(self) -> None:
# In the login flow, if multiple of the user's verified emails
# are associated with existing accounts, we expect the choose
# email screen to select which account to use.
hamlet = self.example_user("hamlet")
account_data_dict = self.get_account_data_dict(email="[email protected]", name="Hamlet")
email_data = [
dict(email=account_data_dict["email"], verified=True),
dict(email="[email protected]", verified=True, primary=True),
dict(email="[email protected]", verified=True),
dict(email="[email protected]", verified=False),
]
result = self.social_auth_test(
account_data_dict,
subdomain="zulip",
email_data=email_data,
expect_choose_email_screen=True,
next="/user_uploads/image",
)
data = load_subdomain_token(result)
self.assertEqual(data["email"], "[email protected]")
self.assertEqual(data["full_name"], hamlet.full_name)
self.assertEqual(data["subdomain"], "zulip")
self.assertEqual(data["redirect_to"], "/user_uploads/image")
self.assertEqual(result.status_code, 302)
parsed_url = urllib.parse.urlparse(result.url)
uri = f"{parsed_url.scheme}://{parsed_url.netloc}{parsed_url.path}"
self.assertTrue(uri.startswith("http://zulip.testserver/accounts/login/subdomain/"))
def test_github_oauth2_login_no_account_exists(self) -> None:
# In the login flow, if the user has multiple verified emails,
# none of which are associated with an existing account, the
# choose email screen should be shown (which will lead to a
# "continue to registration" choice).
account_data_dict = self.get_account_data_dict(
email="[email protected]", name="Not Hamlet"
)
email_data = [
dict(email=account_data_dict["email"], verified=True),
dict(email="[email protected]", verified=True),
dict(email="[email protected]", verified=True),
]
result = self.social_auth_test(
account_data_dict,
subdomain="zulip",
email_data=email_data,
expect_choose_email_screen=True,
)
email = account_data_dict["email"]
name = account_data_dict["name"]
subdomain = "zulip"
realm = get_realm("zulip")
self.stage_two_of_registration(
result,
realm,
subdomain,
email,
name,
name,
expect_confirm_registration_page=True,
skip_registration_form=False,
)
def test_github_oauth2_signup_choose_existing_account(self) -> None:
# In the sign up flow, if the user has chosen an email of an
# existing account, the user will be logged in.
account_data_dict = self.get_account_data_dict(email=self.email, name=self.name)
email_data = [
dict(email=account_data_dict["email"], verified=True),
dict(email="[email protected]", verified=True),
dict(email="[email protected]", verified=True),
]
result = self.social_auth_test(
account_data_dict,
email_data=email_data,
is_signup=True,
subdomain="zulip",
expect_choose_email_screen=True,
next="/user_uploads/image",
)
data = load_subdomain_token(result)
self.assertEqual(data["email"], account_data_dict["email"])
self.assertEqual(data["full_name"], account_data_dict["name"])
self.assertEqual(data["subdomain"], "zulip")
self.assertEqual(data["redirect_to"], "/user_uploads/image")
self.assertEqual(result.status_code, 302)
parsed_url = urllib.parse.urlparse(result.url)
uri = f"{parsed_url.scheme}://{parsed_url.netloc}{parsed_url.path}"
self.assertTrue(uri.startswith("http://zulip.testserver/accounts/login/subdomain/"))
def test_github_oauth2_signup_choose_new_email_to_register(self) -> None:
# In the sign up flow, if the user has multiple verified
# emails, we show the "choose email" screen, even if the user
# has another verified email with an existing account,
# allowing the user to register a second account associated
# with the second email.
email = "[email protected]"
name = "Full Name"
subdomain = "zulip"
realm = get_realm("zulip")
account_data_dict = self.get_account_data_dict(email=email, name=name)
email_data = [
dict(email="[email protected]", verified=True),
dict(email=email, verified=True),
dict(email="[email protected]", verified=True),
]
result = self.social_auth_test(
account_data_dict,
email_data=email_data,
expect_choose_email_screen=True,
subdomain=subdomain,
is_signup=True,
)
self.stage_two_of_registration(
result, realm, subdomain, email, name, name, self.BACKEND_CLASS.full_name_validated
)
def test_github_oauth2_email_no_reply_dot_github_dot_com(self) -> None:
# As emails ending with `noreply.github.com` are excluded from
# verified_emails unless an account with that email already exists,
# choosing it as an email should raise a `email not associated` warning.
noreply_email = "[email protected]"
account_data_dict = self.get_account_data_dict(email=noreply_email, name=self.name)
email_data = [
dict(email="[email protected]", verified=True),
dict(email="[email protected]", verified=True, primary=True),
dict(email="[email protected]", verified=True),
dict(email=account_data_dict["email"], verified=True),
]
with self.assertLogs(self.logger_string, level="WARNING") as m:
result = self.social_auth_test(
account_data_dict,
subdomain="zulip",
expect_choose_email_screen=True,
email_data=email_data,
)
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "/login/")
self.assertEqual(
m.output,
[
self.logger_output(
"Social auth (GitHub) failed because user has no verified"
" emails associated with the account",
"warning",
)
],
)
# Now we create the user account with the noreply email and verify that it's
# possible to sign in to it.
realm = get_realm("zulip")
do_create_user(
noreply_email, "password", realm, account_data_dict["name"], acting_user=None
)
result = self.social_auth_test(
account_data_dict,
subdomain="zulip",
expect_choose_email_screen=True,
expect_noreply_email_allowed=True,
email_data=email_data,
)
data = load_subdomain_token(result)
self.assertEqual(data["email"], account_data_dict["email"])
self.assertEqual(data["full_name"], account_data_dict["name"])
self.assertEqual(data["subdomain"], "zulip")
self.assertEqual(result.status_code, 302)
parsed_url = urllib.parse.urlparse(result.url)
uri = f"{parsed_url.scheme}://{parsed_url.netloc}{parsed_url.path}"
self.assertTrue(uri.startswith("http://zulip.testserver/accounts/login/subdomain/"))
def test_github_oauth2_email_not_associated(self) -> None:
account_data_dict = self.get_account_data_dict(
email="[email protected]", name=self.name
)
email_data = [
dict(email="[email protected]", verified=True),
dict(email="[email protected]", verified=True, primary=True),
dict(email="[email protected]", verified=True),
]
with self.assertLogs(self.logger_string, level="WARNING") as m:
result = self.social_auth_test(
account_data_dict,
subdomain="zulip",
expect_choose_email_screen=True,
email_data=email_data,
)
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "/login/")
self.assertEqual(
m.output,
[
self.logger_output(
"Social auth (GitHub) failed because user has no verified"
" emails associated with the account",
"warning",
)
],
)
def test_github_unverified_email_with_existing_account(self) -> None:
# check if a user is denied to log in if the user manages to
# send an unverified email that has an existing account in
# organisation through `email` GET parameter.
account_data_dict = self.get_account_data_dict(email="[email protected]", name=self.name)
email_data = [
dict(email="[email protected]", verified=True),
dict(email="[email protected]", verified=False),
dict(email="[email protected]", verified=True, primary=True),
]
with self.assertLogs(self.logger_string, level="WARNING") as m:
result = self.social_auth_test(
account_data_dict,
subdomain="zulip",
expect_choose_email_screen=True,
email_data=email_data,
)
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "/login/")
self.assertEqual(
m.output,
[
self.logger_output(
"Social auth ({}) failed because user has no verified emails associated with the account".format(
"GitHub"
),
"warning",
)
],
)
class GitLabAuthBackendTest(SocialAuthBase):
__unittest_skip__ = False
BACKEND_CLASS = GitLabAuthBackend
CLIENT_KEY_SETTING = "SOCIAL_AUTH_GITLAB_KEY"
CLIENT_SECRET_SETTING = "SOCIAL_AUTH_GITLAB_SECRET"
LOGIN_URL = "/accounts/login/social/gitlab"
SIGNUP_URL = "/accounts/register/social/gitlab"
AUTHORIZATION_URL = "https://gitlab.com/oauth/authorize"
ACCESS_TOKEN_URL = "https://gitlab.com/oauth/token"
USER_INFO_URL = "https://gitlab.com/api/v4/user"
AUTH_FINISH_URL = "/complete/gitlab/"
CONFIG_ERROR_URL = "/config-error/gitlab"
def test_gitlab_auth_enabled(self) -> None:
with self.settings(AUTHENTICATION_BACKENDS=("zproject.backends.GitLabAuthBackend",)):
self.assertTrue(gitlab_auth_enabled())
def get_account_data_dict(self, email: str, name: str) -> Dict[str, Any]:
return dict(email=email, name=name, email_verified=True)
class GoogleAuthBackendTest(SocialAuthBase):
__unittest_skip__ = False
BACKEND_CLASS = GoogleAuthBackend
CLIENT_KEY_SETTING = "SOCIAL_AUTH_GOOGLE_KEY"
CLIENT_SECRET_SETTING = "SOCIAL_AUTH_GOOGLE_SECRET"
LOGIN_URL = "/accounts/login/social/google"
SIGNUP_URL = "/accounts/register/social/google"
AUTHORIZATION_URL = "https://accounts.google.com/o/oauth2/auth"
ACCESS_TOKEN_URL = "https://accounts.google.com/o/oauth2/token"
USER_INFO_URL = "https://www.googleapis.com/oauth2/v3/userinfo"
AUTH_FINISH_URL = "/complete/google/"
CONFIG_ERROR_URL = "/config-error/google"
def get_account_data_dict(self, email: str, name: str) -> Dict[str, Any]:
return dict(email=email, name=name, email_verified=True)
def test_social_auth_email_not_verified(self) -> None:
account_data_dict = dict(email=self.email, name=self.name)
with self.assertLogs(self.logger_string, level="WARNING") as m:
result = self.social_auth_test(account_data_dict, subdomain="zulip")
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "/login/")
self.assertEqual(
m.output,
[
self.logger_output(
"Social auth ({}) failed because user has no verified emails".format("Google"),
"warning",
)
],
)
def test_social_auth_mobile_realm_uri(self) -> None:
mobile_flow_otp = "1234abcd" * 8
account_data_dict = self.get_account_data_dict(email=self.email, name="Full Name")
with self.settings(
REALM_MOBILE_REMAP_URIS={"http://zulip.testserver": "http://zulip-mobile.testserver"}
):
result = self.social_auth_test(
account_data_dict,
subdomain="zulip",
expect_choose_email_screen=True,
alternative_start_url="/accounts/login/google/",
mobile_flow_otp=mobile_flow_otp,
)
self.assertEqual(result.status_code, 302)
redirect_url = result["Location"]
parsed_url = urllib.parse.urlparse(redirect_url)
query_params = urllib.parse.parse_qs(parsed_url.query)
self.assertEqual(parsed_url.scheme, "zulip")
self.assertEqual(query_params["realm"], ["http://zulip-mobile.testserver"])
self.assertEqual(query_params["email"], [self.example_email("hamlet")])
encrypted_api_key = query_params["otp_encrypted_api_key"][0]
hamlet_api_keys = get_all_api_keys(self.example_user("hamlet"))
self.assertIn(otp_decrypt_api_key(encrypted_api_key, mobile_flow_otp), hamlet_api_keys)
def test_social_auth_mobile_success_legacy_url(self) -> None:
mobile_flow_otp = "1234abcd" * 8
account_data_dict = self.get_account_data_dict(email=self.email, name="Full Name")
self.assertEqual(len(mail.outbox), 0)
self.user_profile.date_joined = timezone_now() - datetime.timedelta(
seconds=JUST_CREATED_THRESHOLD + 1
)
self.user_profile.save()
with self.settings(SEND_LOGIN_EMAILS=True):
# Verify that the right thing happens with an invalid-format OTP
result = self.social_auth_test(
account_data_dict,
subdomain="zulip",
alternative_start_url="/accounts/login/google/",
mobile_flow_otp="1234",
)
self.assert_json_error(result, "Invalid OTP")
result = self.social_auth_test(
account_data_dict,
subdomain="zulip",
alternative_start_url="/accounts/login/google/",
mobile_flow_otp="invalido" * 8,
)
self.assert_json_error(result, "Invalid OTP")
# Now do it correctly
result = self.social_auth_test(
account_data_dict,
subdomain="zulip",
expect_choose_email_screen=True,
alternative_start_url="/accounts/login/google/",
mobile_flow_otp=mobile_flow_otp,
)
self.assertEqual(result.status_code, 302)
redirect_url = result["Location"]
parsed_url = urllib.parse.urlparse(redirect_url)
query_params = urllib.parse.parse_qs(parsed_url.query)
self.assertEqual(parsed_url.scheme, "zulip")
self.assertEqual(query_params["realm"], ["http://zulip.testserver"])
self.assertEqual(query_params["email"], [self.example_email("hamlet")])
encrypted_api_key = query_params["otp_encrypted_api_key"][0]
hamlet_api_keys = get_all_api_keys(self.example_user("hamlet"))
self.assertIn(otp_decrypt_api_key(encrypted_api_key, mobile_flow_otp), hamlet_api_keys)
self.assertEqual(len(mail.outbox), 1)
self.assertIn("Zulip on Android", mail.outbox[0].body)
def test_google_auth_enabled(self) -> None:
with self.settings(AUTHENTICATION_BACKENDS=("zproject.backends.GoogleAuthBackend",)):
self.assertTrue(google_auth_enabled())
def get_log_into_subdomain(
self,
data: ExternalAuthDataDict,
*,
subdomain: str = "zulip",
force_token: Optional[str] = None,
) -> HttpResponse:
if force_token is None:
token = ExternalAuthResult(data_dict=data).store_data()
else:
token = force_token
url_path = reverse(log_into_subdomain, args=[token])
return self.client_get(url_path, subdomain=subdomain)
def test_redirect_to_next_url_for_log_into_subdomain(self) -> None:
def test_redirect_to_next_url(next: str = "") -> HttpResponse:
data: ExternalAuthDataDict = {
"full_name": "Hamlet",
"email": self.example_email("hamlet"),
"subdomain": "zulip",
"is_signup": False,
"redirect_to": next,
}
user_profile = self.example_user("hamlet")
with mock.patch("zerver.views.auth.authenticate", return_value=user_profile):
with mock.patch("zerver.views.auth.do_login"):
result = self.get_log_into_subdomain(data)
return result
res = test_redirect_to_next_url()
self.assertEqual(res.status_code, 302)
self.assertEqual(res.url, "http://zulip.testserver")
res = test_redirect_to_next_url("/user_uploads/path_to_image")
self.assertEqual(res.status_code, 302)
self.assertEqual(res.url, "http://zulip.testserver/user_uploads/path_to_image")
res = test_redirect_to_next_url("/#narrow/stream/7-test-here")
self.assertEqual(res.status_code, 302)
self.assertEqual(res.url, "http://zulip.testserver/#narrow/stream/7-test-here")
def test_log_into_subdomain_when_token_is_malformed(self) -> None:
data: ExternalAuthDataDict = {
"full_name": "Full Name",
"email": self.example_email("hamlet"),
"subdomain": "zulip",
"is_signup": False,
"redirect_to": "",
}
with self.assertLogs(level="WARNING") as m:
result = self.get_log_into_subdomain(data, force_token="nonsense")
self.assertEqual(
m.output,
["WARNING:root:log_into_subdomain: Malformed token given: {}".format("nonsense")],
)
self.assertEqual(result.status_code, 400)
def test_log_into_subdomain_when_token_not_found(self) -> None:
data: ExternalAuthDataDict = {
"full_name": "Full Name",
"email": self.example_email("hamlet"),
"subdomain": "zulip",
"is_signup": False,
"redirect_to": "",
}
with self.assertLogs(level="WARNING") as m:
token = secrets.token_hex(ExternalAuthResult.LOGIN_TOKEN_LENGTH // 2)
result = self.get_log_into_subdomain(data, force_token=token)
self.assertEqual(result.status_code, 400)
self.assert_in_response("Invalid or expired login session.", result)
self.assertEqual(
m.output, ["WARNING:root:log_into_subdomain: Invalid token given: {}".format(token)]
)
def test_prevent_duplicate_signups(self) -> None:
existing_user = self.example_user("hamlet")
existing_user.delivery_email = "[email protected]"
existing_user.email = "[email protected]"
existing_user.save()
data: ExternalAuthDataDict = {
"full_name": "Full Name",
"email": "[email protected]",
"subdomain": "zulip",
"is_signup": True,
"redirect_to": "",
}
result = self.get_log_into_subdomain(data)
# Should simply get logged into the existing account:
self.assertEqual(result.status_code, 302)
self.assert_logged_in_user_id(existing_user.id)
def test_log_into_subdomain_when_is_signup_is_true_and_new_user(self) -> None:
data: ExternalAuthDataDict = {
"full_name": "New User Name",
"email": "[email protected]",
"subdomain": "zulip",
"is_signup": True,
"redirect_to": "",
}
result = self.get_log_into_subdomain(data)
self.assertEqual(result.status_code, 302)
confirmation = Confirmation.objects.all().first()
confirmation_key = confirmation.confirmation_key
self.assertIn("do_confirm/" + confirmation_key, result.url)
result = self.client_get(result.url)
self.assert_in_response('action="/accounts/register/"', result)
confirmation_data = {
"from_confirmation": "1",
"full_name": data["full_name"],
"key": confirmation_key,
}
result = self.client_post("/accounts/register/", confirmation_data, subdomain="zulip")
self.assert_in_response("We just need you to do one last thing", result)
# Verify that the user is asked for name but not password
self.assert_not_in_success_response(["id_password"], result)
self.assert_in_success_response(["id_full_name"], result)
def test_log_into_subdomain_when_is_signup_is_false_and_new_user(self) -> None:
data: ExternalAuthDataDict = {
"full_name": "New User Name",
"email": "[email protected]",
"subdomain": "zulip",
"is_signup": False,
"redirect_to": "",
}
result = self.get_log_into_subdomain(data)
self.assertEqual(result.status_code, 200)
self.assert_in_response("No account found for", result)
self.assert_in_response("[email protected].", result)
self.assert_in_response('action="http://zulip.testserver/accounts/do_confirm/', result)
url = re.findall(
'action="(http://zulip.testserver/accounts/do_confirm[^"]*)"',
result.content.decode("utf-8"),
)[0]
confirmation = Confirmation.objects.all().first()
confirmation_key = confirmation.confirmation_key
self.assertIn("do_confirm/" + confirmation_key, url)
result = self.client_get(url)
self.assert_in_response('action="/accounts/register/"', result)
confirmation_data = {
"from_confirmation": "1",
"full_name": data["full_name"],
"key": confirmation_key,
}
result = self.client_post("/accounts/register/", confirmation_data, subdomain="zulip")
self.assert_in_response("We just need you to do one last thing", result)
# Verify that the user is asked for name but not password
self.assert_not_in_success_response(["id_password"], result)
self.assert_in_success_response(["id_full_name"], result)
def test_log_into_subdomain_when_using_invite_link(self) -> None:
data: ExternalAuthDataDict = {
"full_name": "New User Name",
"email": "[email protected]",
"subdomain": "zulip",
"is_signup": True,
"redirect_to": "",
}
realm = get_realm("zulip")
realm.invite_required = True
realm.save()
stream_names = ["new_stream_1", "new_stream_2"]
streams = []
for stream_name in set(stream_names):
stream = ensure_stream(realm, stream_name, acting_user=None)
streams.append(stream)
# Without the invite link, we can't create an account due to invite_required
result = self.get_log_into_subdomain(data)
self.assertEqual(result.status_code, 200)
self.assert_in_success_response(["Sign up for Zulip"], result)
# Now confirm an invitation link works
referrer = self.example_user("hamlet")
multiuse_obj = MultiuseInvite.objects.create(realm=realm, referred_by=referrer)
multiuse_obj.streams.set(streams)
create_confirmation_link(multiuse_obj, Confirmation.MULTIUSE_INVITE)
multiuse_confirmation = Confirmation.objects.all().last()
multiuse_object_key = multiuse_confirmation.confirmation_key
data["multiuse_object_key"] = multiuse_object_key
result = self.get_log_into_subdomain(data)
self.assertEqual(result.status_code, 302)
confirmation = Confirmation.objects.all().last()
confirmation_key = confirmation.confirmation_key
self.assertIn("do_confirm/" + confirmation_key, result.url)
result = self.client_get(result.url)
self.assert_in_response('action="/accounts/register/"', result)
data2 = {"from_confirmation": "1", "full_name": data["full_name"], "key": confirmation_key}
result = self.client_post("/accounts/register/", data2, subdomain="zulip")
self.assert_in_response("We just need you to do one last thing", result)
# Verify that the user is asked for name but not password
self.assert_not_in_success_response(["id_password"], result)
self.assert_in_success_response(["id_full_name"], result)
# Click confirm registration button.
result = self.client_post(
"/accounts/register/",
{"full_name": "New User Name", "key": confirmation_key, "terms": True},
)
self.assertEqual(result.status_code, 302)
new_user = get_user_by_delivery_email("[email protected]", realm)
new_streams = self.get_streams(new_user)
self.assertEqual(sorted(new_streams), stream_names)
def test_log_into_subdomain_when_email_is_none(self) -> None:
data: ExternalAuthDataDict = {
"subdomain": "zulip",
"is_signup": False,
"redirect_to": "",
}
with self.assertLogs(level="WARNING") as m:
token = secrets.token_hex(ExternalAuthResult.LOGIN_TOKEN_LENGTH // 2)
result = self.get_log_into_subdomain(data, force_token=token)
self.assertEqual(result.status_code, 400)
self.assertEqual(
m.output, ["WARNING:root:log_into_subdomain: Invalid token given: {}".format(token)]
)
def test_user_cannot_log_into_wrong_subdomain(self) -> None:
data: ExternalAuthDataDict = {
"full_name": "Full Name",
"email": self.example_email("hamlet"),
"subdomain": "zephyr",
}
result = self.get_log_into_subdomain(data)
self.assert_json_error(result, "Invalid subdomain")
class JSONFetchAPIKeyTest(ZulipTestCase):
def test_success(self) -> None:
user = self.example_user("hamlet")
self.login_user(user)
result = self.client_post(
"/json/fetch_api_key", dict(password=initial_password(user.delivery_email))
)
self.assert_json_success(result)
def test_not_loggedin(self) -> None:
user = self.example_user("hamlet")
result = self.client_post(
"/json/fetch_api_key", dict(password=initial_password(user.delivery_email))
)
self.assert_json_error(
result, "Not logged in: API authentication or user session required", 401
)
def test_wrong_password(self) -> None:
user = self.example_user("hamlet")
self.login_user(user)
result = self.client_post("/json/fetch_api_key", dict(password="wrong"))
self.assert_json_error(result, "Your username or password is incorrect.", 400)
def test_invalid_subdomain(self) -> None:
username = "hamlet"
user = self.example_user(username)
self.login_user(user)
with mock.patch("zerver.views.auth.get_realm_from_request", return_value=None):
result = self.client_post(
"/json/fetch_api_key",
dict(username=username, password=initial_password(user.delivery_email)),
)
self.assert_json_error(result, "Invalid subdomain", 400)
class FetchAPIKeyTest(ZulipTestCase):
def setUp(self) -> None:
super().setUp()
self.user_profile = self.example_user("hamlet")
self.email = self.user_profile.delivery_email
def test_success(self) -> None:
result = self.client_post(
"/api/v1/fetch_api_key",
dict(username=self.email, password=initial_password(self.email)),
)
self.assert_json_success(result)
def test_invalid_email(self) -> None:
result = self.client_post(
"/api/v1/fetch_api_key", dict(username="hamlet", password=initial_password(self.email))
)
self.assert_json_error(result, "Enter a valid email address.", 400)
def test_wrong_password(self) -> None:
result = self.client_post(
"/api/v1/fetch_api_key", dict(username=self.email, password="wrong")
)
self.assert_json_error(result, "Your username or password is incorrect.", 403)
def test_invalid_subdomain(self) -> None:
with mock.patch("zerver.views.auth.get_realm_from_request", return_value=None):
result = self.client_post(
"/api/v1/fetch_api_key",
dict(username="hamlet", password=initial_password(self.email)),
)
self.assert_json_error(result, "Invalid subdomain", 400)
def test_password_auth_disabled(self) -> None:
with mock.patch("zproject.backends.password_auth_enabled", return_value=False):
result = self.client_post(
"/api/v1/fetch_api_key",
dict(username=self.email, password=initial_password(self.email)),
)
self.assert_json_error_contains(result, "Password auth is disabled", 403)
@override_settings(AUTHENTICATION_BACKENDS=("zproject.backends.ZulipLDAPAuthBackend",))
def test_ldap_auth_email_auth_disabled_success(self) -> None:
self.init_default_ldap_database()
with self.settings(LDAP_APPEND_DOMAIN="zulip.com"):
result = self.client_post(
"/api/v1/fetch_api_key",
dict(username=self.example_email("hamlet"), password=self.ldap_password("hamlet")),
)
self.assert_json_success(result)
@override_settings(
AUTHENTICATION_BACKENDS=("zproject.backends.ZulipLDAPAuthBackend",),
AUTH_LDAP_USER_ATTR_MAP={"full_name": "cn", "org_membership": "department"},
)
def test_ldap_auth_email_auth_organization_restriction(self) -> None:
self.init_default_ldap_database()
# We do test two combinations here:
# The first user has no (department) attribute set
# The second user has one set, but to a different value
result = self.client_post(
"/api/v1/fetch_api_key",
dict(username=self.example_email("hamlet"), password=self.ldap_password("hamlet")),
)
self.assert_json_error(result, "Your username or password is incorrect.", 403)
self.change_ldap_user_attr("hamlet", "department", "testWrongRealm")
result = self.client_post(
"/api/v1/fetch_api_key",
dict(username=self.example_email("hamlet"), password=self.ldap_password("hamlet")),
)
self.assert_json_error(result, "Your username or password is incorrect.", 403)
self.change_ldap_user_attr("hamlet", "department", "zulip")
result = self.client_post(
"/api/v1/fetch_api_key",
dict(username=self.example_email("hamlet"), password=self.ldap_password("hamlet")),
)
self.assert_json_success(result)
def test_inactive_user(self) -> None:
do_deactivate_user(self.user_profile, acting_user=None)
result = self.client_post(
"/api/v1/fetch_api_key",
dict(username=self.email, password=initial_password(self.email)),
)
self.assert_json_error_contains(result, "Your account has been disabled", 403)
def test_deactivated_realm(self) -> None:
do_deactivate_realm(self.user_profile.realm, acting_user=None)
result = self.client_post(
"/api/v1/fetch_api_key",
dict(username=self.email, password=initial_password(self.email)),
)
self.assert_json_error_contains(result, "This organization has been deactivated", 403)
class DevFetchAPIKeyTest(ZulipTestCase):
def setUp(self) -> None:
super().setUp()
self.user_profile = self.example_user("hamlet")
self.email = self.user_profile.delivery_email
def test_success(self) -> None:
result = self.client_post("/api/v1/dev_fetch_api_key", dict(username=self.email))
self.assert_json_success(result)
data = result.json()
self.assertEqual(data["email"], self.email)
user_api_keys = get_all_api_keys(self.user_profile)
self.assertIn(data["api_key"], user_api_keys)
def test_invalid_email(self) -> None:
email = "hamlet"
result = self.client_post("/api/v1/dev_fetch_api_key", dict(username=email))
self.assert_json_error_contains(result, "Enter a valid email address.", 400)
def test_unregistered_user(self) -> None:
email = "[email protected]"
result = self.client_post("/api/v1/dev_fetch_api_key", dict(username=email))
self.assert_json_error_contains(result, "This user is not registered.", 403)
def test_inactive_user(self) -> None:
do_deactivate_user(self.user_profile, acting_user=None)
result = self.client_post("/api/v1/dev_fetch_api_key", dict(username=self.email))
self.assert_json_error_contains(result, "Your account has been disabled", 403)
def test_deactivated_realm(self) -> None:
do_deactivate_realm(self.user_profile.realm, acting_user=None)
result = self.client_post("/api/v1/dev_fetch_api_key", dict(username=self.email))
self.assert_json_error_contains(result, "This organization has been deactivated", 403)
def test_dev_auth_disabled(self) -> None:
with mock.patch("zerver.views.development.dev_login.dev_auth_enabled", return_value=False):
result = self.client_post("/api/v1/dev_fetch_api_key", dict(username=self.email))
self.assert_json_error_contains(result, "DevAuthBackend not enabled.", 400)
def test_invalid_subdomain(self) -> None:
with mock.patch(
"zerver.views.development.dev_login.get_realm_from_request", return_value=None
):
result = self.client_post(
"/api/v1/dev_fetch_api_key",
dict(username=self.email, password=initial_password(self.email)),
)
self.assert_json_error_contains(result, "Invalid subdomain", 400)
class DevGetEmailsTest(ZulipTestCase):
def test_success(self) -> None:
result = self.client_get("/api/v1/dev_list_users")
self.assert_json_success(result)
self.assert_in_response("direct_admins", result)
self.assert_in_response("direct_users", result)
def test_dev_auth_disabled(self) -> None:
with mock.patch("zerver.views.development.dev_login.dev_auth_enabled", return_value=False):
result = self.client_get("/api/v1/dev_list_users")
self.assert_json_error_contains(result, "DevAuthBackend not enabled.", 400)
with override_settings(PRODUCTION=True):
result = self.client_get("/api/v1/dev_list_users")
self.assert_json_error_contains(result, "Endpoint not available in production.", 400)
class ExternalMethodDictsTests(ZulipTestCase):
def get_configured_saml_backend_idp_names(self) -> List[str]:
return settings.SOCIAL_AUTH_SAML_ENABLED_IDPS.keys()
def test_get_external_method_dicts_correctly_sorted(self) -> None:
with self.settings(
AUTHENTICATION_BACKENDS=(
"zproject.backends.EmailAuthBackend",
"zproject.backends.GitHubAuthBackend",
"zproject.backends.GoogleAuthBackend",
"zproject.backends.ZulipRemoteUserBackend",
"zproject.backends.SAMLAuthBackend",
"zproject.backends.AzureADAuthBackend",
),
):
external_auth_methods = get_external_method_dicts()
# First backends in the list should be SAML:
self.assertIn("saml:", external_auth_methods[0]["name"])
self.assertEqual(
[social_backend["name"] for social_backend in external_auth_methods[1:]],
[
social_backend.name
for social_backend in sorted(
[
ZulipRemoteUserBackend,
GitHubAuthBackend,
AzureADAuthBackend,
GoogleAuthBackend,
],
key=lambda x: x.sort_order,
reverse=True,
)
],
)
def test_get_external_method_buttons(self) -> None:
with self.settings(
AUTHENTICATION_BACKENDS=(
"zproject.backends.EmailAuthBackend",
"zproject.backends.GitHubAuthBackend",
"zproject.backends.GoogleAuthBackend",
"zproject.backends.SAMLAuthBackend",
),
):
saml_idp_names = self.get_configured_saml_backend_idp_names()
expected_button_id_strings = [
'id="{}_auth_button_github"',
'id="{}_auth_button_google"',
]
for name in saml_idp_names:
expected_button_id_strings.append(f'id="{{}}_auth_button_saml:{name}"')
result = self.client_get("/login/")
self.assert_in_success_response(
[string.format("login") for string in expected_button_id_strings], result
)
result = self.client_get("/register/")
self.assert_in_success_response(
[string.format("register") for string in expected_button_id_strings], result
)
def test_get_external_method_dicts_multiple_saml_idps(self) -> None:
idps_dict = copy.deepcopy(settings.SOCIAL_AUTH_SAML_ENABLED_IDPS)
# Create another IdP config, by copying the original one and changing some details.idps_dict['test_idp'])
idps_dict["test_idp2"] = copy.deepcopy(idps_dict["test_idp"])
idps_dict["test_idp2"]["url"] = "https://idp2.example.com/idp/profile/SAML2/Redirect/SSO"
idps_dict["test_idp2"]["display_name"] = "Second Test IdP"
idps_dict["test_idp2"]["limit_to_subdomains"] = ["zephyr"]
with self.settings(
SOCIAL_AUTH_SAML_ENABLED_IDPS=idps_dict,
AUTHENTICATION_BACKENDS=(
"zproject.backends.EmailAuthBackend",
"zproject.backends.GitHubAuthBackend",
"zproject.backends.SAMLAuthBackend",
),
):
# Calling get_external_method_dicts without a realm returns all methods configured on the server:
external_auth_methods = get_external_method_dicts()
# 1 IdP enabled for all realms + a dict for GitHub auth
self.assert_length(external_auth_methods, 2)
self.assertEqual(
[external_auth_methods[0]["name"], external_auth_methods[1]["name"]],
["saml:test_idp", "github"],
)
external_auth_methods = get_external_method_dicts(get_realm("zulip"))
# Only test_idp enabled for the zulip realm, + GitHub auth.
self.assert_length(external_auth_methods, 2)
self.assertEqual(
[external_auth_methods[0]["name"], external_auth_methods[1]["name"]],
["saml:test_idp", "github"],
)
external_auth_methods = get_external_method_dicts(get_realm("zephyr"))
# Both idps enabled for the zephyr realm, + GitHub auth.
self.assert_length(external_auth_methods, 3)
self.assertEqual(
{external_auth_methods[0]["name"], external_auth_methods[1]["name"]},
{"saml:test_idp", "saml:test_idp2"},
)
class FetchAuthBackends(ZulipTestCase):
def test_get_server_settings(self) -> None:
def check_result(
result: HttpResponse, extra_fields: Sequence[Tuple[str, Validator[object]]] = []
) -> None:
authentication_methods_list = [
("password", check_bool),
]
for backend_name_with_case in AUTH_BACKEND_NAME_MAP:
authentication_methods_list.append((backend_name_with_case.lower(), check_bool))
external_auth_methods = get_external_method_dicts()
self.assert_json_success(result)
checker = check_dict_only(
[
("authentication_methods", check_dict_only(authentication_methods_list)),
(
"external_authentication_methods",
check_list(
check_dict_only(
[
("display_icon", check_none_or(check_string)),
("display_name", check_string),
("login_url", check_string),
("name", check_string),
("signup_url", check_string),
]
),
length=len(external_auth_methods),
),
),
("email_auth_enabled", check_bool),
("is_incompatible", check_bool),
("require_email_format_usernames", check_bool),
("realm_uri", check_string),
("zulip_version", check_string),
("zulip_feature_level", check_int),
("push_notifications_enabled", check_bool),
("msg", check_string),
("result", check_string),
*extra_fields,
]
)
checker("data", result.json())
result = self.client_get("/api/v1/server_settings", subdomain="", HTTP_USER_AGENT="")
check_result(result)
self.assertEqual(
result.json()["external_authentication_methods"], get_external_method_dicts()
)
result = self.client_get(
"/api/v1/server_settings", subdomain="", HTTP_USER_AGENT="ZulipInvalid"
)
self.assertTrue(result.json()["is_incompatible"])
with self.settings(ROOT_DOMAIN_LANDING_PAGE=False):
result = self.client_get("/api/v1/server_settings", subdomain="", HTTP_USER_AGENT="")
check_result(result)
with self.settings(ROOT_DOMAIN_LANDING_PAGE=False):
result = self.client_get(
"/api/v1/server_settings", subdomain="zulip", HTTP_USER_AGENT=""
)
check_result(
result,
[
("realm_name", check_string),
("realm_description", check_string),
("realm_icon", check_string),
],
)
# Verify invalid subdomain
result = self.client_get("/api/v1/server_settings", subdomain="invalid")
self.assert_json_error_contains(result, "Invalid subdomain", 400)
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
# With ROOT_DOMAIN_LANDING_PAGE, homepage fails
result = self.client_get("/api/v1/server_settings", subdomain="")
self.assert_json_error_contains(result, "Subdomain required", 400)
class TestTwoFactor(ZulipTestCase):
def test_direct_dev_login_with_2fa(self) -> None:
email = self.example_email("hamlet")
user_profile = self.example_user("hamlet")
with self.settings(TWO_FACTOR_AUTHENTICATION_ENABLED=True):
data = {"direct_email": email}
result = self.client_post("/accounts/login/local/", data)
self.assertEqual(result.status_code, 302)
self.assert_logged_in_user_id(user_profile.id)
# User logs in but when otp device doesn't exist.
self.assertNotIn("otp_device_id", self.client.session.keys())
self.create_default_device(user_profile)
data = {"direct_email": email}
result = self.client_post("/accounts/login/local/", data)
self.assertEqual(result.status_code, 302)
self.assert_logged_in_user_id(user_profile.id)
# User logs in when otp device exists.
self.assertIn("otp_device_id", self.client.session.keys())
@mock.patch("two_factor.models.totp")
def test_two_factor_login_with_ldap(self, mock_totp: mock.MagicMock) -> None:
token = 123456
email = self.example_email("hamlet")
password = self.ldap_password("hamlet")
user_profile = self.example_user("hamlet")
user_profile.set_password(password)
user_profile.save()
self.create_default_device(user_profile)
def totp(*args: Any, **kwargs: Any) -> int:
return token
mock_totp.side_effect = totp
# Setup LDAP
self.init_default_ldap_database()
ldap_user_attr_map = {"full_name": "cn"}
with self.settings(
AUTHENTICATION_BACKENDS=("zproject.backends.ZulipLDAPAuthBackend",),
TWO_FACTOR_CALL_GATEWAY="two_factor.gateways.fake.Fake",
TWO_FACTOR_SMS_GATEWAY="two_factor.gateways.fake.Fake",
TWO_FACTOR_AUTHENTICATION_ENABLED=True,
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_APPEND_DOMAIN="zulip.com",
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
):
first_step_data = {
"username": email,
"password": password,
"two_factor_login_view-current_step": "auth",
}
with self.assertLogs("two_factor.gateways.fake", "INFO") as info_log:
result = self.client_post("/accounts/login/", first_step_data)
self.assertEqual(result.status_code, 200)
self.assertEqual(
info_log.output,
['INFO:two_factor.gateways.fake:Fake SMS to +12125550100: "Your token is: 123456"'],
)
second_step_data = {
"token-otp_token": str(token),
"two_factor_login_view-current_step": "token",
}
result = self.client_post("/accounts/login/", second_step_data)
self.assertEqual(result.status_code, 302)
self.assertEqual(result["Location"], "http://zulip.testserver")
# Going to login page should redirect to `realm.uri` if user is
# already logged in.
result = self.client_get("/accounts/login/")
self.assertEqual(result.status_code, 302)
self.assertEqual(result["Location"], "http://zulip.testserver")
class TestDevAuthBackend(ZulipTestCase):
def test_login_success(self) -> None:
user_profile = self.example_user("hamlet")
email = user_profile.delivery_email
data = {"direct_email": email}
result = self.client_post("/accounts/login/local/", data)
self.assertEqual(result.status_code, 302)
self.assert_logged_in_user_id(user_profile.id)
def test_login_success_with_2fa(self) -> None:
user_profile = self.example_user("hamlet")
self.create_default_device(user_profile)
email = user_profile.delivery_email
data = {"direct_email": email}
with self.settings(TWO_FACTOR_AUTHENTICATION_ENABLED=True):
result = self.client_post("/accounts/login/local/", data)
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "http://zulip.testserver/")
self.assert_logged_in_user_id(user_profile.id)
self.assertIn("otp_device_id", list(self.client.session.keys()))
def test_redirect_to_next_url(self) -> None:
def do_local_login(formaction: str) -> HttpResponse:
user_email = self.example_email("hamlet")
data = {"direct_email": user_email}
return self.client_post(formaction, data)
res = do_local_login("/accounts/login/local/")
self.assertEqual(res.status_code, 302)
self.assertEqual(res.url, "http://zulip.testserver/")
res = do_local_login("/accounts/login/local/?next=/user_uploads/path_to_image")
self.assertEqual(res.status_code, 302)
self.assertEqual(res.url, "http://zulip.testserver/user_uploads/path_to_image")
# In local Email based authentication we never make browser send the hash
# to the backend. Rather we depend upon the browser's behaviour of persisting
# hash anchors in between redirect requests. See below stackoverflow conversation
# https://stackoverflow.com/questions/5283395/url-hash-is-persisting-between-redirects
res = do_local_login("/accounts/login/local/?next=#narrow/stream/7-test-here")
self.assertEqual(res.status_code, 302)
self.assertEqual(res.url, "http://zulip.testserver")
def test_login_with_subdomain(self) -> None:
user_profile = self.example_user("hamlet")
email = user_profile.delivery_email
data = {"direct_email": email}
result = self.client_post("/accounts/login/local/", data)
self.assertEqual(result.status_code, 302)
self.assert_logged_in_user_id(user_profile.id)
def test_choose_realm(self) -> None:
result = self.client_post("/devlogin/", subdomain="zulip")
self.assertEqual(result.status_code, 200)
self.assert_in_success_response(["Click on a user to log in to Zulip Dev!"], result)
self.assert_in_success_response(["[email protected]", "[email protected]"], result)
result = self.client_post("/devlogin/", subdomain="")
self.assertEqual(result.status_code, 200)
self.assert_in_success_response(["Click on a user to log in!"], result)
self.assert_in_success_response(["[email protected]", "[email protected]"], result)
self.assert_in_success_response(["[email protected]", "[email protected]"], result)
result = self.client_post("/devlogin/", {"new_realm": "all_realms"}, subdomain="zephyr")
self.assertEqual(result.status_code, 200)
self.assert_in_success_response(["[email protected]", "[email protected]"], result)
self.assert_in_success_response(["Click on a user to log in!"], result)
self.assert_in_success_response(["[email protected]", "[email protected]"], result)
data = {"new_realm": "zephyr"}
result = self.client_post("/devlogin/", data, subdomain="zulip")
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "http://zephyr.testserver")
result = self.client_get("/devlogin/", subdomain="zephyr")
self.assertEqual(result.status_code, 200)
self.assert_in_success_response(["[email protected]", "[email protected]"], result)
self.assert_in_success_response(["Click on a user to log in to MIT!"], result)
self.assert_not_in_success_response(["[email protected]", "[email protected]"], result)
def test_choose_realm_with_subdomains_enabled(self) -> None:
with mock.patch("zerver.views.auth.is_subdomain_root_or_alias", return_value=False):
with mock.patch(
"zerver.views.auth.get_realm_from_request", return_value=get_realm("zulip")
):
result = self.client_get("http://zulip.testserver/devlogin/")
self.assert_in_success_response(["[email protected]", "[email protected]"], result)
self.assert_not_in_success_response(["[email protected]", "[email protected]"], result)
self.assert_in_success_response(["Click on a user to log in to Zulip Dev!"], result)
with mock.patch(
"zerver.views.auth.get_realm_from_request", return_value=get_realm("zephyr")
):
result = self.client_post(
"http://zulip.testserver/devlogin/", {"new_realm": "zephyr"}
)
self.assertEqual(result["Location"], "http://zephyr.testserver")
result = self.client_get("http://zephyr.testserver/devlogin/")
self.assert_not_in_success_response(["[email protected]", "[email protected]"], result)
self.assert_in_success_response(["[email protected]", "[email protected]"], result)
self.assert_in_success_response(["Click on a user to log in to MIT!"], result)
def test_login_failure(self) -> None:
email = self.example_email("hamlet")
data = {"direct_email": email}
with self.settings(AUTHENTICATION_BACKENDS=("zproject.backends.EmailAuthBackend",)):
response = self.client_post("/accounts/login/local/", data)
self.assert_in_success_response(["Configuration error", "DevAuthBackend"], response)
def test_dev_direct_production_config_error(self) -> None:
result = self.client_get("/config-error/dev")
self.assertEqual(result.status_code, 200)
self.assert_in_success_response(["DevAuthBackend"], result)
def test_login_failure_due_to_nonexistent_user(self) -> None:
email = "[email protected]"
data = {"direct_email": email}
response = self.client_post("/accounts/login/local/", data)
self.assert_in_success_response(["Configuration error", "DevAuthBackend"], response)
class TestZulipRemoteUserBackend(DesktopFlowTestingLib, ZulipTestCase):
def test_start_remote_user_sso(self) -> None:
result = self.client_get(
"/accounts/login/start/sso/", {"param1": "value1", "params": "value2"}
)
self.assertEqual(result.status_code, 302)
url = result.url
parsed_url = urllib.parse.urlparse(url)
self.assertEqual(parsed_url.path, "/accounts/login/sso/")
self.assertEqual(parsed_url.query, "param1=value1¶ms=value2")
def test_start_remote_user_sso_with_desktop_app(self) -> None:
headers = dict(HTTP_USER_AGENT="ZulipElectron/5.0.0")
result = self.client_get("/accounts/login/start/sso/", {}, **headers)
self.verify_desktop_flow_app_page(result)
def test_login_success(self) -> None:
user_profile = self.example_user("hamlet")
email = user_profile.delivery_email
with self.settings(AUTHENTICATION_BACKENDS=("zproject.backends.ZulipRemoteUserBackend",)):
result = self.client_get("/accounts/login/sso/", REMOTE_USER=email)
self.assertEqual(result.status_code, 302)
self.assert_logged_in_user_id(user_profile.id)
def test_login_success_with_sso_append_domain(self) -> None:
username = "hamlet"
user_profile = self.example_user("hamlet")
with self.settings(
AUTHENTICATION_BACKENDS=("zproject.backends.ZulipRemoteUserBackend",),
SSO_APPEND_DOMAIN="zulip.com",
):
result = self.client_get("/accounts/login/sso/", REMOTE_USER=username)
self.assertEqual(result.status_code, 302)
self.assert_logged_in_user_id(user_profile.id)
def test_login_case_insensitive(self) -> None:
user_profile = self.example_user("hamlet")
email_upper = user_profile.delivery_email.upper()
with self.settings(AUTHENTICATION_BACKENDS=("zproject.backends.ZulipRemoteUserBackend",)):
result = self.client_get("/accounts/login/sso/", REMOTE_USER=email_upper)
self.assertEqual(result.status_code, 302)
self.assert_logged_in_user_id(user_profile.id)
def test_login_failure(self) -> None:
email = self.example_email("hamlet")
result = self.client_get("/accounts/login/sso/", REMOTE_USER=email)
self.assert_in_success_response(
["Configuration error", "Authentication via the REMOTE_USER header is"], result
)
self.assert_logged_in_user_id(None)
def test_login_failure_due_to_nonexisting_user(self) -> None:
email = "[email protected]"
with self.settings(AUTHENTICATION_BACKENDS=("zproject.backends.ZulipRemoteUserBackend",)):
result = self.client_get("/accounts/login/sso/", REMOTE_USER=email)
self.assertEqual(result.status_code, 200)
self.assert_logged_in_user_id(None)
self.assert_in_response("No account found for", result)
def test_login_failure_due_to_invalid_email(self) -> None:
email = "hamlet"
with self.settings(AUTHENTICATION_BACKENDS=("zproject.backends.ZulipRemoteUserBackend",)):
result = self.client_get("/accounts/login/sso/", REMOTE_USER=email)
self.assert_json_error_contains(result, "Enter a valid email address.", 400)
def test_login_failure_due_to_missing_field(self) -> None:
with self.settings(AUTHENTICATION_BACKENDS=("zproject.backends.ZulipRemoteUserBackend",)):
result = self.client_get("/accounts/login/sso/")
self.assert_in_success_response(
["Configuration error", "The REMOTE_USER header is not set."], result
)
def test_login_failure_due_to_wrong_subdomain(self) -> None:
email = self.example_email("hamlet")
with self.settings(AUTHENTICATION_BACKENDS=("zproject.backends.ZulipRemoteUserBackend",)):
with mock.patch("zerver.views.auth.get_subdomain", return_value="acme"):
result = self.client_get(
"http://testserver:9080/accounts/login/sso/", REMOTE_USER=email
)
self.assertEqual(result.status_code, 200)
self.assert_logged_in_user_id(None)
self.assert_in_response("You need an invitation to join this organization.", result)
def test_login_failure_due_to_empty_subdomain(self) -> None:
email = self.example_email("hamlet")
with self.settings(AUTHENTICATION_BACKENDS=("zproject.backends.ZulipRemoteUserBackend",)):
with mock.patch("zerver.views.auth.get_subdomain", return_value=""):
result = self.client_get(
"http://testserver:9080/accounts/login/sso/", REMOTE_USER=email
)
self.assertEqual(result.status_code, 200)
self.assert_logged_in_user_id(None)
self.assert_in_response("You need an invitation to join this organization.", result)
def test_login_success_under_subdomains(self) -> None:
user_profile = self.example_user("hamlet")
email = user_profile.delivery_email
with mock.patch("zerver.views.auth.get_subdomain", return_value="zulip"):
with self.settings(
AUTHENTICATION_BACKENDS=("zproject.backends.ZulipRemoteUserBackend",)
):
result = self.client_get("/accounts/login/sso/", REMOTE_USER=email)
self.assertEqual(result.status_code, 302)
self.assert_logged_in_user_id(user_profile.id)
@override_settings(SEND_LOGIN_EMAILS=True)
@override_settings(AUTHENTICATION_BACKENDS=("zproject.backends.ZulipRemoteUserBackend",))
def test_login_mobile_flow_otp_success_email(self) -> None:
user_profile = self.example_user("hamlet")
email = user_profile.delivery_email
user_profile.date_joined = timezone_now() - datetime.timedelta(seconds=61)
user_profile.save()
mobile_flow_otp = "1234abcd" * 8
# Verify that the right thing happens with an invalid-format OTP
result = self.client_get(
"/accounts/login/sso/",
dict(mobile_flow_otp="1234"),
REMOTE_USER=email,
HTTP_USER_AGENT="ZulipAndroid",
)
self.assert_logged_in_user_id(None)
self.assert_json_error_contains(result, "Invalid OTP", 400)
result = self.client_get(
"/accounts/login/sso/",
dict(mobile_flow_otp="invalido" * 8),
REMOTE_USER=email,
HTTP_USER_AGENT="ZulipAndroid",
)
self.assert_logged_in_user_id(None)
self.assert_json_error_contains(result, "Invalid OTP", 400)
result = self.client_get(
"/accounts/login/sso/",
dict(mobile_flow_otp=mobile_flow_otp),
REMOTE_USER=email,
HTTP_USER_AGENT="ZulipAndroid",
)
self.assertEqual(result.status_code, 302)
redirect_url = result["Location"]
parsed_url = urllib.parse.urlparse(redirect_url)
query_params = urllib.parse.parse_qs(parsed_url.query)
self.assertEqual(parsed_url.scheme, "zulip")
self.assertEqual(query_params["realm"], ["http://zulip.testserver"])
self.assertEqual(query_params["email"], [self.example_email("hamlet")])
encrypted_api_key = query_params["otp_encrypted_api_key"][0]
hamlet_api_keys = get_all_api_keys(self.example_user("hamlet"))
self.assertIn(otp_decrypt_api_key(encrypted_api_key, mobile_flow_otp), hamlet_api_keys)
self.assertEqual(len(mail.outbox), 1)
self.assertIn("Zulip on Android", mail.outbox[0].body)
@override_settings(SEND_LOGIN_EMAILS=True)
@override_settings(SSO_APPEND_DOMAIN="zulip.com")
@override_settings(AUTHENTICATION_BACKENDS=("zproject.backends.ZulipRemoteUserBackend",))
def test_login_mobile_flow_otp_success_username(self) -> None:
user_profile = self.example_user("hamlet")
email = user_profile.delivery_email
remote_user = email_to_username(email)
user_profile.date_joined = timezone_now() - datetime.timedelta(seconds=61)
user_profile.save()
mobile_flow_otp = "1234abcd" * 8
# Verify that the right thing happens with an invalid-format OTP
result = self.client_get(
"/accounts/login/sso/",
dict(mobile_flow_otp="1234"),
REMOTE_USER=remote_user,
HTTP_USER_AGENT="ZulipAndroid",
)
self.assert_logged_in_user_id(None)
self.assert_json_error_contains(result, "Invalid OTP", 400)
result = self.client_get(
"/accounts/login/sso/",
dict(mobile_flow_otp="invalido" * 8),
REMOTE_USER=remote_user,
HTTP_USER_AGENT="ZulipAndroid",
)
self.assert_logged_in_user_id(None)
self.assert_json_error_contains(result, "Invalid OTP", 400)
result = self.client_get(
"/accounts/login/sso/",
dict(mobile_flow_otp=mobile_flow_otp),
REMOTE_USER=remote_user,
HTTP_USER_AGENT="ZulipAndroid",
)
self.assertEqual(result.status_code, 302)
redirect_url = result["Location"]
parsed_url = urllib.parse.urlparse(redirect_url)
query_params = urllib.parse.parse_qs(parsed_url.query)
self.assertEqual(parsed_url.scheme, "zulip")
self.assertEqual(query_params["realm"], ["http://zulip.testserver"])
self.assertEqual(query_params["email"], [self.example_email("hamlet")])
encrypted_api_key = query_params["otp_encrypted_api_key"][0]
hamlet_api_keys = get_all_api_keys(self.example_user("hamlet"))
self.assertIn(otp_decrypt_api_key(encrypted_api_key, mobile_flow_otp), hamlet_api_keys)
self.assertEqual(len(mail.outbox), 1)
self.assertIn("Zulip on Android", mail.outbox[0].body)
@override_settings(SEND_LOGIN_EMAILS=True)
@override_settings(
AUTHENTICATION_BACKENDS=(
"zproject.backends.ZulipRemoteUserBackend",
"zproject.backends.ZulipDummyBackend",
)
)
def test_login_desktop_flow_otp_success_email(self) -> None:
user_profile = self.example_user("hamlet")
email = user_profile.delivery_email
user_profile.date_joined = timezone_now() - datetime.timedelta(seconds=61)
user_profile.save()
desktop_flow_otp = "1234abcd" * 8
# Verify that the right thing happens with an invalid-format OTP
result = self.client_get(
"/accounts/login/sso/", dict(desktop_flow_otp="1234"), REMOTE_USER=email
)
self.assert_logged_in_user_id(None)
self.assert_json_error_contains(result, "Invalid OTP", 400)
result = self.client_get(
"/accounts/login/sso/", dict(desktop_flow_otp="invalido" * 8), REMOTE_USER=email
)
self.assert_logged_in_user_id(None)
self.assert_json_error_contains(result, "Invalid OTP", 400)
result = self.client_get(
"/accounts/login/sso/", dict(desktop_flow_otp=desktop_flow_otp), REMOTE_USER=email
)
self.verify_desktop_flow_end_page(result, email, desktop_flow_otp)
@override_settings(SEND_LOGIN_EMAILS=True)
@override_settings(SSO_APPEND_DOMAIN="zulip.com")
@override_settings(
AUTHENTICATION_BACKENDS=(
"zproject.backends.ZulipRemoteUserBackend",
"zproject.backends.ZulipDummyBackend",
)
)
def test_login_desktop_flow_otp_success_username(self) -> None:
user_profile = self.example_user("hamlet")
email = user_profile.delivery_email
remote_user = email_to_username(email)
user_profile.date_joined = timezone_now() - datetime.timedelta(seconds=61)
user_profile.save()
desktop_flow_otp = "1234abcd" * 8
# Verify that the right thing happens with an invalid-format OTP
result = self.client_get(
"/accounts/login/sso/", dict(desktop_flow_otp="1234"), REMOTE_USER=remote_user
)
self.assert_logged_in_user_id(None)
self.assert_json_error_contains(result, "Invalid OTP", 400)
result = self.client_get(
"/accounts/login/sso/", dict(desktop_flow_otp="invalido" * 8), REMOTE_USER=remote_user
)
self.assert_logged_in_user_id(None)
self.assert_json_error_contains(result, "Invalid OTP", 400)
result = self.client_get(
"/accounts/login/sso/", dict(desktop_flow_otp=desktop_flow_otp), REMOTE_USER=remote_user
)
self.verify_desktop_flow_end_page(result, email, desktop_flow_otp)
def test_redirect_to(self) -> None:
"""This test verifies the behavior of the redirect_to logic in
login_or_register_remote_user."""
def test_with_redirect_to_param_set_as_next(next: str = "") -> HttpResponse:
user_profile = self.example_user("hamlet")
email = user_profile.delivery_email
with self.settings(
AUTHENTICATION_BACKENDS=("zproject.backends.ZulipRemoteUserBackend",)
):
result = self.client_get("/accounts/login/sso/", {"next": next}, REMOTE_USER=email)
return result
res = test_with_redirect_to_param_set_as_next()
self.assertEqual("http://zulip.testserver", res.url)
res = test_with_redirect_to_param_set_as_next("/user_uploads/image_path")
self.assertEqual("http://zulip.testserver/user_uploads/image_path", res.url)
# Third-party domains are rejected and just send you to root domain
res = test_with_redirect_to_param_set_as_next("https://rogue.zulip-like.server/login")
self.assertEqual("http://zulip.testserver", res.url)
class TestJWTLogin(ZulipTestCase):
"""
JWT uses ZulipDummyBackend.
"""
def test_login_success(self) -> None:
payload = {"user": "hamlet", "realm": "zulip.com"}
with self.settings(JWT_AUTH_KEYS={"zulip": {"key": "key", "algorithms": ["HS256"]}}):
email = self.example_email("hamlet")
realm = get_realm("zulip")
key = settings.JWT_AUTH_KEYS["zulip"]["key"]
[algorithm] = settings.JWT_AUTH_KEYS["zulip"]["algorithms"]
web_token = jwt.encode(payload, key, algorithm).decode("utf8")
user_profile = get_user_by_delivery_email(email, realm)
data = {"json_web_token": web_token}
result = self.client_post("/accounts/login/jwt/", data)
self.assertEqual(result.status_code, 302)
self.assert_logged_in_user_id(user_profile.id)
def test_login_failure_when_user_is_missing(self) -> None:
payload = {"realm": "zulip.com"}
with self.settings(JWT_AUTH_KEYS={"zulip": {"key": "key", "algorithms": ["HS256"]}}):
key = settings.JWT_AUTH_KEYS["zulip"]["key"]
[algorithm] = settings.JWT_AUTH_KEYS["zulip"]["algorithms"]
web_token = jwt.encode(payload, key, algorithm).decode("utf8")
data = {"json_web_token": web_token}
result = self.client_post("/accounts/login/jwt/", data)
self.assert_json_error_contains(
result, "No user specified in JSON web token claims", 400
)
def test_login_failure_when_realm_is_missing(self) -> None:
payload = {"user": "hamlet"}
with self.settings(JWT_AUTH_KEYS={"zulip": {"key": "key", "algorithms": ["HS256"]}}):
key = settings.JWT_AUTH_KEYS["zulip"]["key"]
[algorithm] = settings.JWT_AUTH_KEYS["zulip"]["algorithms"]
web_token = jwt.encode(payload, key, algorithm).decode("utf8")
data = {"json_web_token": web_token}
result = self.client_post("/accounts/login/jwt/", data)
self.assert_json_error_contains(
result, "No organization specified in JSON web token claims", 400
)
def test_login_failure_when_key_does_not_exist(self) -> None:
data = {"json_web_token": "not relevant"}
result = self.client_post("/accounts/login/jwt/", data)
self.assert_json_error_contains(result, "Auth key for this subdomain not found.", 400)
def test_login_failure_when_key_is_missing(self) -> None:
with self.settings(JWT_AUTH_KEYS={"zulip": {"key": "key", "algorithms": ["HS256"]}}):
result = self.client_post("/accounts/login/jwt/")
self.assert_json_error_contains(result, "No JSON web token passed in request", 400)
def test_login_failure_when_bad_token_is_passed(self) -> None:
with self.settings(JWT_AUTH_KEYS={"zulip": {"key": "key", "algorithms": ["HS256"]}}):
result = self.client_post("/accounts/login/jwt/")
self.assert_json_error_contains(result, "No JSON web token passed in request", 400)
data = {"json_web_token": "bad token"}
result = self.client_post("/accounts/login/jwt/", data)
self.assert_json_error_contains(result, "Bad JSON web token", 400)
def test_login_failure_when_user_does_not_exist(self) -> None:
payload = {"user": "nonexisting", "realm": "zulip.com"}
with self.settings(JWT_AUTH_KEYS={"zulip": {"key": "key", "algorithms": ["HS256"]}}):
key = settings.JWT_AUTH_KEYS["zulip"]["key"]
[algorithm] = settings.JWT_AUTH_KEYS["zulip"]["algorithms"]
web_token = jwt.encode(payload, key, algorithm).decode("utf8")
data = {"json_web_token": web_token}
result = self.client_post("/accounts/login/jwt/", data)
self.assertEqual(result.status_code, 200) # This should ideally be not 200.
self.assert_logged_in_user_id(None)
def test_login_failure_due_to_wrong_subdomain(self) -> None:
payload = {"user": "hamlet", "realm": "zulip.com"}
with self.settings(JWT_AUTH_KEYS={"acme": {"key": "key", "algorithms": ["HS256"]}}):
with mock.patch("zerver.views.auth.get_subdomain", return_value="acme"):
key = settings.JWT_AUTH_KEYS["acme"]["key"]
[algorithm] = settings.JWT_AUTH_KEYS["acme"]["algorithms"]
web_token = jwt.encode(payload, key, algorithm).decode("utf8")
data = {"json_web_token": web_token}
result = self.client_post("/accounts/login/jwt/", data)
self.assert_json_error_contains(result, "Wrong subdomain", 400)
self.assert_logged_in_user_id(None)
def test_login_failure_due_to_empty_subdomain(self) -> None:
payload = {"user": "hamlet", "realm": "zulip.com"}
with self.settings(JWT_AUTH_KEYS={"": {"key": "key", "algorithms": ["HS256"]}}):
with mock.patch("zerver.views.auth.get_subdomain", return_value=""):
key = settings.JWT_AUTH_KEYS[""]["key"]
[algorithm] = settings.JWT_AUTH_KEYS[""]["algorithms"]
web_token = jwt.encode(payload, key, algorithm).decode("utf8")
data = {"json_web_token": web_token}
result = self.client_post("/accounts/login/jwt/", data)
self.assert_json_error_contains(result, "Wrong subdomain", 400)
self.assert_logged_in_user_id(None)
def test_login_success_under_subdomains(self) -> None:
payload = {"user": "hamlet", "realm": "zulip.com"}
with self.settings(JWT_AUTH_KEYS={"zulip": {"key": "key", "algorithms": ["HS256"]}}):
with mock.patch("zerver.views.auth.get_subdomain", return_value="zulip"):
key = settings.JWT_AUTH_KEYS["zulip"]["key"]
[algorithm] = settings.JWT_AUTH_KEYS["zulip"]["algorithms"]
web_token = jwt.encode(payload, key, algorithm).decode("utf8")
data = {"json_web_token": web_token}
result = self.client_post("/accounts/login/jwt/", data)
self.assertEqual(result.status_code, 302)
user_profile = self.example_user("hamlet")
self.assert_logged_in_user_id(user_profile.id)
class DjangoToLDAPUsernameTests(ZulipTestCase):
def setUp(self) -> None:
self.init_default_ldap_database()
self.backend = ZulipLDAPAuthBackend()
def test_django_to_ldap_username_with_append_domain(self) -> None:
with self.settings(LDAP_APPEND_DOMAIN="zulip.com"):
self.assertEqual(self.backend.django_to_ldap_username("hamlet"), "hamlet")
self.assertEqual(self.backend.django_to_ldap_username("[email protected]"), "hamlet")
with self.assertRaisesRegex(
ZulipLDAPExceptionOutsideDomain,
"Email [email protected] does not match LDAP domain zulip.com.",
):
self.backend.django_to_ldap_username("[email protected]")
self.mock_ldap.directory['uid="hamlet@test",ou=users,dc=zulip,dc=com'] = {
"cn": ["King Hamlet"],
"uid": ['"hamlet@test"'],
}
username = self.backend.django_to_ldap_username('"hamlet@test"@zulip.com')
self.assertEqual(username, '"hamlet@test"')
self.mock_ldap.directory['uid="hamlet@test"@zulip,ou=users,dc=zulip,dc=com'] = {
"cn": ["King Hamlet"],
"uid": ['"hamlet@test"@zulip'],
}
username = self.backend.django_to_ldap_username('"hamlet@test"@zulip')
self.assertEqual(username, '"hamlet@test"@zulip')
def test_django_to_ldap_username_with_email_search(self) -> None:
self.assertEqual(
self.backend.django_to_ldap_username("hamlet"), self.ldap_username("hamlet")
)
self.assertEqual(
self.backend.django_to_ldap_username("[email protected]"), self.ldap_username("hamlet")
)
# If there are no matches through the email search, raise exception:
with self.assertRaises(ZulipLDAPExceptionNoMatchingLDAPUser):
self.backend.django_to_ldap_username("[email protected]")
self.assertEqual(
self.backend.django_to_ldap_username("[email protected]"), self.ldap_username("aaron")
)
with self.assertLogs(level="WARNING") as m:
with self.assertRaises(ZulipLDAPExceptionNoMatchingLDAPUser):
self.backend.django_to_ldap_username("[email protected]")
self.assertEqual(
m.output,
[
"WARNING:root:Multiple users with email {} found in LDAP.".format(
"[email protected]"
)
],
)
# Test on a weird case of a user whose uid is an email and his actual "mail"
# attribute is a different email address:
self.mock_ldap.directory["uid=some_user@organization_a.com,ou=users,dc=zulip,dc=com"] = {
"cn": ["Some User"],
"uid": ["some_user@organization_a.com"],
"mail": ["[email protected]"],
}
self.assertEqual(
self.backend.django_to_ldap_username("[email protected]"),
"some_user@organization_a.com",
)
self.assertEqual(
self.backend.django_to_ldap_username("some_user@organization_a.com"),
"some_user@organization_a.com",
)
# Configure email search for emails in the uid attribute:
with self.settings(
AUTH_LDAP_REVERSE_EMAIL_SEARCH=LDAPSearch(
"ou=users,dc=zulip,dc=com", ldap.SCOPE_ONELEVEL, "(uid=%(email)s)"
)
):
self.assertEqual(
self.backend.django_to_ldap_username("[email protected]"),
"[email protected]",
)
self.mock_ldap.directory['uid="hamlet@test"@zulip.com",ou=users,dc=zulip,dc=com'] = {
"cn": ["King Hamlet"],
"uid": ['"hamlet@test"@zulip.com'],
}
username = self.backend.django_to_ldap_username('"hamlet@test"@zulip.com')
self.assertEqual(username, '"hamlet@test"@zulip.com')
@override_settings(
AUTHENTICATION_BACKENDS=(
"zproject.backends.EmailAuthBackend",
"zproject.backends.ZulipLDAPAuthBackend",
)
)
def test_authenticate_to_ldap_via_email(self) -> None:
"""
With AUTH_LDAP_REVERSE_EMAIL_SEARCH configured, django_to_ldap_username
should be able to translate an email to LDAP username,
and thus it should be possible to authenticate through user_profile.delivery_email.
"""
realm = get_realm("zulip")
user_profile = self.example_user("hamlet")
password = "testpassword"
user_profile.set_password(password)
user_profile.save()
with self.settings(LDAP_EMAIL_ATTR="mail"):
self.assertEqual(
authenticate(
request=mock.MagicMock(),
username=user_profile.delivery_email,
password=self.ldap_password("hamlet"),
realm=realm,
),
user_profile,
)
@override_settings(LDAP_EMAIL_ATTR="mail", LDAP_DEACTIVATE_NON_MATCHING_USERS=True)
def test_sync_user_from_ldap_with_email_attr(self) -> None:
"""In LDAP configurations with LDAP_EMAIL_ATTR configured and
LDAP_DEACTIVATE_NON_MATCHING_USERS set, a possible failure
mode if django_to_ldap_username isn't configured correctly is
all LDAP users having their accounts deactivated. Before the
introduction of AUTH_LDAP_REVERSE_EMAIL_SEARCH, this would happen
even in valid LDAP configurations using LDAP_EMAIL_ATTR.
This test confirms that such a failure mode doesn't happen with
a valid LDAP configuration.
"""
user_profile = self.example_user("hamlet")
with self.settings():
sync_user_from_ldap(user_profile, mock.Mock())
# Syncing didn't deactivate the user:
self.assertTrue(user_profile.is_active)
class ZulipLDAPTestCase(ZulipTestCase):
def setUp(self) -> None:
super().setUp()
self.init_default_ldap_database()
user_profile = self.example_user("hamlet")
self.setup_subdomain(user_profile)
self.backend = ZulipLDAPAuthBackend()
# Internally `_realm` and `_prereg_user` attributes are automatically set
# by the `authenticate()` method. But for testing the `get_or_build_user()`
# method separately, we need to set them manually.
self.backend._realm = get_realm("zulip")
self.backend._prereg_user = None
def setup_subdomain(self, user_profile: UserProfile) -> None:
realm = user_profile.realm
realm.string_id = "zulip"
realm.save()
class TestLDAP(ZulipLDAPTestCase):
def test_generate_dev_ldap_dir(self) -> None:
ldap_dir = generate_dev_ldap_dir("A", 10)
self.assertEqual(len(ldap_dir), 10)
regex = re.compile(
r"(uid\=)+[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+(\,ou\=users\,dc\=zulip\,dc\=com)"
)
common_attrs = ["cn", "userPassword", "phoneNumber", "birthDate"]
for key, value in ldap_dir.items():
self.assertTrue(regex.match(key))
self.assertCountEqual(
list(value.keys()), [*common_attrs, "uid", "thumbnailPhoto", "userAccountControl"]
)
ldap_dir = generate_dev_ldap_dir("b", 9)
self.assertEqual(len(ldap_dir), 9)
regex = re.compile(r"(uid\=)+[a-zA-Z0-9_.+-]+(\,ou\=users\,dc\=zulip\,dc\=com)")
for key, value in ldap_dir.items():
self.assertTrue(regex.match(key))
self.assertCountEqual(list(value.keys()), [*common_attrs, "uid", "jpegPhoto"])
ldap_dir = generate_dev_ldap_dir("c", 8)
self.assertEqual(len(ldap_dir), 8)
regex = re.compile(r"(uid\=)+[a-zA-Z0-9_.+-]+(\,ou\=users\,dc\=zulip\,dc\=com)")
for key, value in ldap_dir.items():
self.assertTrue(regex.match(key))
self.assertCountEqual(list(value.keys()), [*common_attrs, "uid", "email"])
@override_settings(AUTHENTICATION_BACKENDS=("zproject.backends.ZulipLDAPAuthBackend",))
def test_dev_ldap_fail_login(self) -> None:
# Tests that login with a substring of password fails. We had a bug in
# dev LDAP environment that allowed login via password substrings.
self.mock_ldap.directory = generate_dev_ldap_dir("B", 8)
with self.settings(
AUTH_LDAP_USER_SEARCH=LDAPSearch(
"ou=users,dc=zulip,dc=com", ldap.SCOPE_ONELEVEL, "(uid=%(user)s)"
),
AUTH_LDAP_REVERSE_EMAIL_SEARCH=LDAPSearch(
"ou=users,dc=zulip,dc=com", ldap.SCOPE_ONELEVEL, "(email=%(email)s)"
),
LDAP_APPEND_DOMAIN="zulip.com",
):
user_profile = self.backend.authenticate(
request=mock.MagicMock(),
username="ldapuser1",
password="dapu",
realm=get_realm("zulip"),
)
assert user_profile is None
@override_settings(AUTHENTICATION_BACKENDS=("zproject.backends.ZulipLDAPAuthBackend",))
def test_login_success(self) -> None:
with self.settings(LDAP_APPEND_DOMAIN="zulip.com"):
user_profile = self.backend.authenticate(
request=mock.MagicMock(),
username=self.example_email("hamlet"),
password=self.ldap_password("hamlet"),
realm=get_realm("zulip"),
)
assert user_profile is not None
self.assertEqual(user_profile.delivery_email, self.example_email("hamlet"))
@override_settings(AUTHENTICATION_BACKENDS=("zproject.backends.ZulipLDAPAuthBackend",))
def test_login_success_with_username(self) -> None:
with self.settings(LDAP_APPEND_DOMAIN="zulip.com"):
user_profile = self.backend.authenticate(
request=mock.MagicMock(),
username="hamlet",
password=self.ldap_password("hamlet"),
realm=get_realm("zulip"),
)
assert user_profile is not None
self.assertEqual(user_profile, self.example_user("hamlet"))
@override_settings(AUTHENTICATION_BACKENDS=("zproject.backends.ZulipLDAPAuthBackend",))
def test_login_success_with_email_attr(self) -> None:
with self.settings(LDAP_EMAIL_ATTR="mail"):
username = self.ldap_username("aaron")
user_profile = self.backend.authenticate(
request=mock.MagicMock(),
username=username,
password=self.ldap_password(username),
realm=get_realm("zulip"),
)
assert user_profile is not None
self.assertEqual(user_profile, self.example_user("aaron"))
@override_settings(
AUTHENTICATION_BACKENDS=(
"zproject.backends.EmailAuthBackend",
"zproject.backends.ZulipLDAPAuthBackend",
)
)
def test_email_and_ldap_backends_together(self) -> None:
with self.settings(
LDAP_EMAIL_ATTR="mail",
AUTH_LDAP_REVERSE_EMAIL_SEARCH=LDAPSearch(
"ou=users,dc=zulip,dc=com", ldap.SCOPE_ONELEVEL, "(mail=%(email)s)"
),
AUTH_LDAP_USERNAME_ATTR="uid",
):
realm = get_realm("zulip")
self.assertEqual(email_belongs_to_ldap(realm, self.example_email("aaron")), True)
username = self.ldap_username("aaron")
user_profile = ZulipLDAPAuthBackend().authenticate(
request=mock.MagicMock(),
username=username,
password=self.ldap_password(username),
realm=realm,
)
self.assertEqual(user_profile, self.example_user("aaron"))
othello = self.example_user("othello")
password = "testpassword"
othello.set_password(password)
othello.save()
self.assertEqual(email_belongs_to_ldap(realm, othello.delivery_email), False)
user_profile = EmailAuthBackend().authenticate(
request=mock.MagicMock(),
username=othello.delivery_email,
password=password,
realm=realm,
)
self.assertEqual(user_profile, othello)
@override_settings(AUTHENTICATION_BACKENDS=("zproject.backends.ZulipLDAPAuthBackend",))
def test_login_failure_due_to_wrong_password(self) -> None:
with self.settings(LDAP_APPEND_DOMAIN="zulip.com"):
user = self.backend.authenticate(
request=mock.MagicMock(),
username=self.example_email("hamlet"),
password="wrong",
realm=get_realm("zulip"),
)
self.assertIs(user, None)
@override_settings(AUTHENTICATION_BACKENDS=("zproject.backends.ZulipLDAPAuthBackend",))
def test_login_failure_due_to_nonexistent_user(self) -> None:
with self.settings(LDAP_APPEND_DOMAIN="zulip.com"), self.assertLogs(
"zulip.ldap", level="DEBUG"
) as log_debug:
user = self.backend.authenticate(
request=mock.MagicMock(),
username="[email protected]",
password="doesnt_matter",
realm=get_realm("zulip"),
)
self.assertEqual(
log_debug.output,
[
"DEBUG:zulip.ldap:ZulipLDAPAuthBackend: No LDAP user matching django_to_ldap_username result: nonexistent. Input username: [email protected]"
],
)
self.assertIs(user, None)
@override_settings(AUTHENTICATION_BACKENDS=("zproject.backends.ZulipLDAPAuthBackend",))
def test_ldap_permissions(self) -> None:
backend = self.backend
self.assertFalse(backend.has_perm(None, None))
self.assertFalse(backend.has_module_perms(None, None))
self.assertTrue(backend.get_all_permissions(None, None) == set())
self.assertTrue(backend.get_group_permissions(None, None) == set())
@override_settings(AUTHENTICATION_BACKENDS=("zproject.backends.ZulipLDAPAuthBackend",))
def test_user_email_from_ldapuser_with_append_domain(self) -> None:
backend = self.backend
with self.settings(LDAP_APPEND_DOMAIN="zulip.com"):
username = backend.user_email_from_ldapuser(
"this_argument_is_ignored", _LDAPUser(self.backend, username='"hamlet@test"')
)
self.assertEqual(username, '"hamlet@test"@zulip.com')
@override_settings(AUTHENTICATION_BACKENDS=("zproject.backends.ZulipLDAPAuthBackend",))
def test_get_or_build_user_when_user_exists(self) -> None:
class _LDAPUser:
attrs = {"fn": ["Full Name"], "sn": ["Short Name"]}
backend = self.backend
email = self.example_email("hamlet")
user_profile, created = backend.get_or_build_user(str(email), _LDAPUser())
self.assertFalse(created)
self.assertEqual(user_profile.delivery_email, email)
@override_settings(AUTHENTICATION_BACKENDS=("zproject.backends.ZulipLDAPAuthBackend",))
def test_get_or_build_user_when_user_does_not_exist(self) -> None:
class _LDAPUser:
attrs = {"fn": ["Full Name"]}
ldap_user_attr_map = {"full_name": "fn"}
with self.settings(AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map):
backend = self.backend
email = "[email protected]"
user_profile, created = backend.get_or_build_user(email, _LDAPUser())
self.assertTrue(created)
self.assertEqual(user_profile.delivery_email, email)
self.assertEqual(user_profile.full_name, "Full Name")
@override_settings(AUTHENTICATION_BACKENDS=("zproject.backends.ZulipLDAPAuthBackend",))
def test_get_or_build_user_when_user_has_invalid_name(self) -> None:
class _LDAPUser:
attrs = {"fn": ["<invalid name>"]}
ldap_user_attr_map = {"full_name": "fn"}
with self.settings(AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map):
backend = self.backend
email = "[email protected]"
with self.assertRaisesRegex(Exception, "Invalid characters in name!"):
backend.get_or_build_user(email, _LDAPUser())
@override_settings(AUTHENTICATION_BACKENDS=("zproject.backends.ZulipLDAPAuthBackend",))
def test_get_or_build_user_when_realm_is_deactivated(self) -> None:
class _LDAPUser:
attrs = {"fn": ["Full Name"]}
ldap_user_attr_map = {"full_name": "fn"}
with self.settings(AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map):
backend = self.backend
email = "[email protected]"
do_deactivate_realm(backend._realm, acting_user=None)
with self.assertRaisesRegex(Exception, "Realm has been deactivated"):
backend.get_or_build_user(email, _LDAPUser())
@override_settings(AUTHENTICATION_BACKENDS=("zproject.backends.ZulipLDAPAuthBackend",))
def test_get_or_build_user_when_ldap_has_no_email_attr(self) -> None:
class _LDAPUser:
attrs = {"fn": ["Full Name"], "sn": ["Short Name"]}
nonexisting_attr = "email"
with self.settings(LDAP_EMAIL_ATTR=nonexisting_attr):
backend = self.backend
email = "[email protected]"
with self.assertRaisesRegex(
Exception, "LDAP user doesn't have the needed email attribute"
):
backend.get_or_build_user(email, _LDAPUser())
@override_settings(AUTHENTICATION_BACKENDS=("zproject.backends.ZulipLDAPAuthBackend",))
def test_get_or_build_user_email(self) -> None:
class _LDAPUser:
attrs = {"fn": ["Test User"]}
ldap_user_attr_map = {"full_name": "fn"}
with self.settings(AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map):
realm = self.backend._realm
realm.emails_restricted_to_domains = False
realm.disallow_disposable_email_addresses = True
realm.save()
email = "[email protected]"
with self.assertRaisesRegex(ZulipLDAPException, "Email validation failed."):
self.backend.get_or_build_user(email, _LDAPUser())
realm.emails_restricted_to_domains = True
realm.save(update_fields=["emails_restricted_to_domains"])
email = "[email protected]"
with self.assertRaisesRegex(ZulipLDAPException, "Email validation failed."):
self.backend.get_or_build_user(email, _LDAPUser())
email = "[email protected]"
with self.assertRaisesRegex(
ZulipLDAPException, "This email domain isn't allowed in this organization."
):
self.backend.get_or_build_user(email, _LDAPUser())
@override_settings(AUTHENTICATION_BACKENDS=("zproject.backends.ZulipLDAPAuthBackend",))
def test_get_or_build_user_when_ldap_has_no_full_name_mapping(self) -> None:
class _LDAPUser:
attrs = {"fn": ["Full Name"], "sn": ["Short Name"]}
with self.settings(AUTH_LDAP_USER_ATTR_MAP={}):
backend = self.backend
email = "[email protected]"
with self.assertRaisesRegex(Exception, "Missing required mapping for user's full name"):
backend.get_or_build_user(email, _LDAPUser())
@override_settings(AUTHENTICATION_BACKENDS=("zproject.backends.ZulipLDAPAuthBackend",))
def test_login_failure_when_domain_does_not_match(self) -> None:
with self.settings(LDAP_APPEND_DOMAIN="acme.com"), self.assertLogs(
"zulip.ldap", "DEBUG"
) as debug_log:
user_profile = self.backend.authenticate(
request=mock.MagicMock(),
username=self.example_email("hamlet"),
password=self.ldap_password("hamlet"),
realm=get_realm("zulip"),
)
self.assertIs(user_profile, None)
self.assertEqual(
debug_log.output,
[
"DEBUG:zulip.ldap:ZulipLDAPAuthBackend: Email [email protected] does not match LDAP domain acme.com."
],
)
@override_settings(AUTHENTICATION_BACKENDS=("zproject.backends.ZulipLDAPAuthBackend",))
def test_login_success_with_different_subdomain(self) -> None:
ldap_user_attr_map = {"full_name": "cn"}
do_create_realm(string_id="acme", name="acme")
with self.settings(
LDAP_APPEND_DOMAIN="zulip.com", AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map
):
user_profile = self.backend.authenticate(
request=mock.MagicMock(),
username=self.example_email("hamlet"),
password=self.ldap_password("hamlet"),
realm=get_realm("acme"),
)
self.assertEqual(user_profile.delivery_email, self.example_email("hamlet"))
@override_settings(AUTHENTICATION_BACKENDS=("zproject.backends.ZulipLDAPAuthBackend",))
def test_login_success_with_valid_subdomain(self) -> None:
with self.settings(LDAP_APPEND_DOMAIN="zulip.com"):
user_profile = self.backend.authenticate(
request=mock.MagicMock(),
username=self.example_email("hamlet"),
password=self.ldap_password("hamlet"),
realm=get_realm("zulip"),
)
assert user_profile is not None
self.assertEqual(user_profile.delivery_email, self.example_email("hamlet"))
@override_settings(AUTHENTICATION_BACKENDS=("zproject.backends.ZulipLDAPAuthBackend",))
def test_login_failure_due_to_deactivated_user(self) -> None:
user_profile = self.example_user("hamlet")
do_deactivate_user(user_profile, acting_user=None)
with self.settings(LDAP_APPEND_DOMAIN="zulip.com"):
user_profile = self.backend.authenticate(
request=mock.MagicMock(),
username=self.example_email("hamlet"),
password=self.ldap_password("hamlet"),
realm=get_realm("zulip"),
)
self.assertIs(user_profile, None)
@override_settings(AUTHENTICATION_BACKENDS=("zproject.backends.ZulipLDAPAuthBackend",))
@override_settings(
AUTH_LDAP_USER_ATTR_MAP={
"full_name": "cn",
"avatar": "jpegPhoto",
}
)
def test_login_success_when_user_does_not_exist_with_valid_subdomain(self) -> None:
RealmDomain.objects.create(realm=self.backend._realm, domain="acme.com")
with self.settings(LDAP_APPEND_DOMAIN="acme.com"):
user_profile = self.backend.authenticate(
request=mock.MagicMock(),
username="[email protected]",
password=self.ldap_password("newuser"),
realm=get_realm("zulip"),
)
assert user_profile is not None
self.assertEqual(user_profile.delivery_email, "[email protected]")
self.assertEqual(user_profile.full_name, "New LDAP fullname")
self.assertEqual(user_profile.realm.string_id, "zulip")
# Verify avatar gets created
self.assertEqual(user_profile.avatar_source, UserProfile.AVATAR_FROM_USER)
url = avatar_url(user_profile)
assert url is not None
result = self.client_get(url)
self.assertEqual(result.status_code, 200)
@override_settings(AUTHENTICATION_BACKENDS=("zproject.backends.ZulipLDAPAuthBackend",))
def test_login_success_when_user_does_not_exist_with_split_full_name_mapping(self) -> None:
with self.settings(
LDAP_APPEND_DOMAIN="zulip.com",
AUTH_LDAP_USER_ATTR_MAP={"first_name": "sn", "last_name": "cn"},
):
user_profile = self.backend.authenticate(
request=mock.MagicMock(),
username="[email protected]",
password=self.ldap_password("newuser_splitname"),
realm=get_realm("zulip"),
)
assert user_profile is not None
self.assertEqual(user_profile.delivery_email, "[email protected]")
self.assertEqual(user_profile.full_name, "First Last")
self.assertEqual(user_profile.realm.string_id, "zulip")
class TestZulipLDAPUserPopulator(ZulipLDAPTestCase):
def test_authenticate(self) -> None:
backend = ZulipLDAPUserPopulator()
result = backend.authenticate(
username=self.example_email("hamlet"),
password=self.ldap_password("hamlet"),
realm=get_realm("zulip"),
)
self.assertIs(result, None)
def perform_ldap_sync(self, user_profile: UserProfile) -> None:
with self.settings(LDAP_APPEND_DOMAIN="zulip.com"):
result = sync_user_from_ldap(user_profile, mock.Mock())
self.assertTrue(result)
@mock.patch("zproject.backends.do_deactivate_user")
def test_ldaperror_doesnt_deactivate_user(self, mock_deactivate: mock.MagicMock) -> None:
"""
This is a test for a bug where failure to connect to LDAP in sync_user_from_ldap
(e.g. due to invalid credentials) would cause the user to be deactivated if
LDAP_DEACTIVATE_NON_MATCHING_USERS was True.
Details: https://github.com/zulip/zulip/issues/13130
"""
with self.settings(
LDAP_DEACTIVATE_NON_MATCHING_USERS=True,
LDAP_APPEND_DOMAIN="zulip.com",
AUTH_LDAP_BIND_PASSWORD="wrongpass",
):
with self.assertRaises(ldap.INVALID_CREDENTIALS):
sync_user_from_ldap(self.example_user("hamlet"), mock.Mock())
mock_deactivate.assert_not_called()
# Make sure other types of LDAPError won't cause deactivation either:
with mock.patch.object(_LDAPUser, "_get_or_create_user", side_effect=ldap.LDAPError):
with self.assertRaises(PopulateUserLDAPError):
sync_user_from_ldap(self.example_user("hamlet"), mock.Mock())
mock_deactivate.assert_not_called()
@override_settings(LDAP_EMAIL_ATTR="mail")
def test_populate_user_returns_none(self) -> None:
with mock.patch.object(ZulipLDAPUser, "populate_user", return_value=None):
with self.assertRaises(PopulateUserLDAPError):
sync_user_from_ldap(self.example_user("hamlet"), mock.Mock())
def test_update_full_name(self) -> None:
self.change_ldap_user_attr("hamlet", "cn", "New Name")
self.perform_ldap_sync(self.example_user("hamlet"))
hamlet = self.example_user("hamlet")
self.assertEqual(hamlet.full_name, "New Name")
def test_update_with_hidden_emails(self) -> None:
hamlet = self.example_user("hamlet")
realm = get_realm("zulip")
do_set_realm_property(
realm,
"email_address_visibility",
Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS,
acting_user=None,
)
hamlet.refresh_from_db()
self.change_ldap_user_attr("hamlet", "cn", "New Name")
self.perform_ldap_sync(hamlet)
hamlet.refresh_from_db()
self.assertEqual(hamlet.full_name, "New Name")
def test_update_split_full_name(self) -> None:
self.change_ldap_user_attr("hamlet", "cn", "Name")
self.change_ldap_user_attr("hamlet", "sn", "Full")
with self.settings(AUTH_LDAP_USER_ATTR_MAP={"first_name": "sn", "last_name": "cn"}):
self.perform_ldap_sync(self.example_user("hamlet"))
hamlet = self.example_user("hamlet")
self.assertEqual(hamlet.full_name, "Full Name")
def test_same_full_name(self) -> None:
with mock.patch("zerver.lib.actions.do_change_full_name") as fn:
self.perform_ldap_sync(self.example_user("hamlet"))
fn.assert_not_called()
def test_too_short_name(self) -> None:
self.change_ldap_user_attr("hamlet", "cn", "a")
with self.assertRaises(ZulipLDAPException), self.assertLogs(
"django_auth_ldap", "WARNING"
) as warn_log:
self.perform_ldap_sync(self.example_user("hamlet"))
self.assertEqual(
warn_log.output,
["WARNING:django_auth_ldap:Name too short! while authenticating hamlet"],
)
def test_deactivate_user(self) -> None:
self.change_ldap_user_attr("hamlet", "userAccountControl", "2")
with self.settings(
AUTH_LDAP_USER_ATTR_MAP={"full_name": "cn", "userAccountControl": "userAccountControl"}
), self.assertLogs("zulip.ldap") as info_logs:
self.perform_ldap_sync(self.example_user("hamlet"))
hamlet = self.example_user("hamlet")
self.assertFalse(hamlet.is_active)
self.assertEqual(
info_logs.output,
[
"INFO:zulip.ldap:Deactivating user [email protected] because they are disabled in LDAP."
],
)
@mock.patch("zproject.backends.ZulipLDAPAuthBackendBase.sync_full_name_from_ldap")
def test_dont_sync_disabled_ldap_user(self, fake_sync: mock.MagicMock) -> None:
self.change_ldap_user_attr("hamlet", "userAccountControl", "2")
with self.settings(
AUTH_LDAP_USER_ATTR_MAP={"full_name": "cn", "userAccountControl": "userAccountControl"}
), self.assertLogs("zulip.ldap") as info_logs:
self.perform_ldap_sync(self.example_user("hamlet"))
fake_sync.assert_not_called()
self.assertEqual(
info_logs.output,
[
"INFO:zulip.ldap:Deactivating user [email protected] because they are disabled in LDAP."
],
)
def test_reactivate_user(self) -> None:
do_deactivate_user(self.example_user("hamlet"), acting_user=None)
with self.settings(
AUTH_LDAP_USER_ATTR_MAP={"full_name": "cn", "userAccountControl": "userAccountControl"}
), self.assertLogs("zulip.ldap") as info_logs:
self.perform_ldap_sync(self.example_user("hamlet"))
hamlet = self.example_user("hamlet")
self.assertTrue(hamlet.is_active)
self.assertEqual(
info_logs.output,
[
"INFO:zulip.ldap:Reactivating user [email protected] because they are not disabled in LDAP."
],
)
def test_user_in_multiple_realms(self) -> None:
test_realm = do_create_realm("test", "test", emails_restricted_to_domains=False)
hamlet = self.example_user("hamlet")
email = hamlet.delivery_email
hamlet2 = do_create_user(email, None, test_realm, hamlet.full_name, acting_user=None)
self.change_ldap_user_attr("hamlet", "cn", "Second Hamlet")
expected_call_args = [hamlet2, "Second Hamlet", None]
with self.settings(AUTH_LDAP_USER_ATTR_MAP={"full_name": "cn"}):
with mock.patch("zerver.lib.actions.do_change_full_name") as f:
self.perform_ldap_sync(hamlet2)
f.assert_called_once_with(*expected_call_args)
# Get the updated model and make sure the full name is changed correctly:
hamlet2 = get_user_by_delivery_email(email, test_realm)
self.assertEqual(hamlet2.full_name, "Second Hamlet")
# Now get the original hamlet and make he still has his name unchanged:
hamlet = self.example_user("hamlet")
self.assertEqual(hamlet.full_name, "King Hamlet")
def test_user_not_found_in_ldap(self) -> None:
with self.settings(
LDAP_DEACTIVATE_NON_MATCHING_USERS=False, LDAP_APPEND_DOMAIN="zulip.com"
):
othello = self.example_user("othello") # othello isn't in our test directory
mock_logger = mock.MagicMock()
result = sync_user_from_ldap(othello, mock_logger)
mock_logger.warning.assert_called_once_with(
"Did not find %s in LDAP.", othello.delivery_email
)
self.assertFalse(result)
do_deactivate_user(othello, acting_user=None)
mock_logger = mock.MagicMock()
result = sync_user_from_ldap(othello, mock_logger)
# In this case the logger shouldn't be used.
self.assertEqual(mock_logger.method_calls, [])
self.assertFalse(result)
def test_update_user_avatar(self) -> None:
# Hamlet has jpegPhoto set in our test directory by default.
with mock.patch("zerver.lib.upload.upload_avatar_image") as fn, self.settings(
AUTH_LDAP_USER_ATTR_MAP={"full_name": "cn", "avatar": "jpegPhoto"}
):
self.perform_ldap_sync(self.example_user("hamlet"))
fn.assert_called_once()
hamlet = self.example_user("hamlet")
self.assertEqual(hamlet.avatar_source, UserProfile.AVATAR_FROM_USER)
# Verify that the next time we do an LDAP sync, we don't
# end up updating this user's avatar again if the LDAP
# data hasn't changed.
self.perform_ldap_sync(self.example_user("hamlet"))
fn.assert_called_once()
# Now verify that if we do change the jpegPhoto image, we
# will upload a new avatar.
self.change_ldap_user_attr(
"hamlet", "jpegPhoto", static_path("images/logo/zulip-icon-512x512.png"), binary=True
)
with mock.patch("zerver.lib.upload.upload_avatar_image") as fn, self.settings(
AUTH_LDAP_USER_ATTR_MAP={"full_name": "cn", "avatar": "jpegPhoto"}
):
self.perform_ldap_sync(self.example_user("hamlet"))
fn.assert_called_once()
hamlet = self.example_user("hamlet")
self.assertEqual(hamlet.avatar_source, UserProfile.AVATAR_FROM_USER)
@use_s3_backend
def test_update_user_avatar_for_s3(self) -> None:
bucket = create_s3_buckets(settings.S3_AVATAR_BUCKET)[0]
with get_test_image_file("img.png") as f:
test_image_data = f.read()
self.change_ldap_user_attr("hamlet", "jpegPhoto", test_image_data)
with self.settings(AUTH_LDAP_USER_ATTR_MAP={"full_name": "cn", "avatar": "jpegPhoto"}):
self.perform_ldap_sync(self.example_user("hamlet"))
hamlet = self.example_user("hamlet")
path_id = user_avatar_path(hamlet)
original_image_path_id = path_id + ".original"
medium_path_id = path_id + "-medium.png"
original_image_key = bucket.Object(original_image_path_id)
medium_image_key = bucket.Object(medium_path_id)
image_data = original_image_key.get()["Body"].read()
self.assertEqual(image_data, test_image_data)
test_medium_image_data = resize_avatar(test_image_data, MEDIUM_AVATAR_SIZE)
medium_image_data = medium_image_key.get()["Body"].read()
self.assertEqual(medium_image_data, test_medium_image_data)
# Try to use invalid data as the image:
self.change_ldap_user_attr("hamlet", "jpegPhoto", b"00" + test_image_data)
with self.settings(AUTH_LDAP_USER_ATTR_MAP={"full_name": "cn", "avatar": "jpegPhoto"}):
with self.assertLogs(level="WARNING") as m:
self.perform_ldap_sync(self.example_user("hamlet"))
self.assertEqual(
m.output,
[
"WARNING:root:Could not parse {} field for user {}".format(
"jpegPhoto", hamlet.id
)
],
)
def test_deactivate_non_matching_users(self) -> None:
with self.settings(LDAP_APPEND_DOMAIN="zulip.com", LDAP_DEACTIVATE_NON_MATCHING_USERS=True):
# othello isn't in our test directory
result = sync_user_from_ldap(self.example_user("othello"), mock.Mock())
self.assertTrue(result)
othello = self.example_user("othello")
self.assertFalse(othello.is_active)
def test_update_custom_profile_field(self) -> None:
with self.settings(
AUTH_LDAP_USER_ATTR_MAP={
"full_name": "cn",
"custom_profile_field__phone_number": "homePhone",
"custom_profile_field__birthday": "birthDate",
}
):
self.perform_ldap_sync(self.example_user("hamlet"))
hamlet = self.example_user("hamlet")
test_data = [
{
"field_name": "Phone number",
"expected_value": "123456789",
},
{
"field_name": "Birthday",
"expected_value": "1900-09-08",
},
]
for test_case in test_data:
field = CustomProfileField.objects.get(realm=hamlet.realm, name=test_case["field_name"])
field_value = CustomProfileFieldValue.objects.get(
user_profile=hamlet, field=field
).value
self.assertEqual(field_value, test_case["expected_value"])
def test_update_non_existent_profile_field(self) -> None:
with self.settings(
AUTH_LDAP_USER_ATTR_MAP={
"full_name": "cn",
"custom_profile_field__non_existent": "homePhone",
}
):
with self.assertRaisesRegex(
ZulipLDAPException, "Custom profile field with name non_existent not found"
), self.assertLogs("django_auth_ldap", "WARNING") as warn_log:
self.perform_ldap_sync(self.example_user("hamlet"))
self.assertEqual(
warn_log.output,
[
"WARNING:django_auth_ldap:Custom profile field with name non_existent not found. while authenticating hamlet"
],
)
def test_update_custom_profile_field_invalid_data(self) -> None:
self.change_ldap_user_attr("hamlet", "birthDate", "9999")
with self.settings(
AUTH_LDAP_USER_ATTR_MAP={
"full_name": "cn",
"custom_profile_field__birthday": "birthDate",
}
):
with self.assertRaisesRegex(
ZulipLDAPException, "Invalid data for birthday field"
), self.assertLogs("django_auth_ldap", "WARNING") as warn_log:
self.perform_ldap_sync(self.example_user("hamlet"))
self.assertEqual(
warn_log.output,
[
"WARNING:django_auth_ldap:Invalid data for birthday field: Birthday is not a date while authenticating hamlet"
],
)
def test_update_custom_profile_field_no_mapping(self) -> None:
hamlet = self.example_user("hamlet")
no_op_field = CustomProfileField.objects.get(realm=hamlet.realm, name="Phone number")
expected_value = CustomProfileFieldValue.objects.get(
user_profile=hamlet, field=no_op_field
).value
with self.settings(
AUTH_LDAP_USER_ATTR_MAP={
"full_name": "cn",
"custom_profile_field__birthday": "birthDate",
}
):
self.perform_ldap_sync(self.example_user("hamlet"))
actual_value = CustomProfileFieldValue.objects.get(
user_profile=hamlet, field=no_op_field
).value
self.assertEqual(actual_value, expected_value)
def test_update_custom_profile_field_no_update(self) -> None:
hamlet = self.example_user("hamlet")
phone_number_field = CustomProfileField.objects.get(realm=hamlet.realm, name="Phone number")
birthday_field = CustomProfileField.objects.get(realm=hamlet.realm, name="Birthday")
phone_number_field_value = CustomProfileFieldValue.objects.get(
user_profile=hamlet, field=phone_number_field
)
phone_number_field_value.value = "123456789"
phone_number_field_value.save(update_fields=["value"])
expected_call_args = [
hamlet,
[
{
"id": birthday_field.id,
"value": "1900-09-08",
},
],
]
with self.settings(
AUTH_LDAP_USER_ATTR_MAP={
"full_name": "cn",
"custom_profile_field__birthday": "birthDate",
"custom_profile_field__phone_number": "homePhone",
}
):
with mock.patch("zproject.backends.do_update_user_custom_profile_data_if_changed") as f:
self.perform_ldap_sync(self.example_user("hamlet"))
f.assert_called_once_with(*expected_call_args)
def test_update_custom_profile_field_not_present_in_ldap(self) -> None:
hamlet = self.example_user("hamlet")
no_op_field = CustomProfileField.objects.get(realm=hamlet.realm, name="Birthday")
expected_value = CustomProfileFieldValue.objects.get(
user_profile=hamlet, field=no_op_field
).value
with self.settings(
AUTH_LDAP_USER_ATTR_MAP={
"full_name": "cn",
"custom_profile_field__birthday": "nonExistantAttr",
}
), self.assertLogs("django_auth_ldap", "WARNING") as warn_log:
self.perform_ldap_sync(self.example_user("hamlet"))
actual_value = CustomProfileFieldValue.objects.get(
user_profile=hamlet, field=no_op_field
).value
self.assertEqual(actual_value, expected_value)
self.assertEqual(
warn_log.output,
[
"WARNING:django_auth_ldap:uid=hamlet,ou=users,dc=zulip,dc=com does not have a value for the attribute nonExistantAttr"
],
)
class TestQueryLDAP(ZulipLDAPTestCase):
@override_settings(AUTHENTICATION_BACKENDS=("zproject.backends.EmailAuthBackend",))
def test_ldap_not_configured(self) -> None:
values = query_ldap(self.example_email("hamlet"))
self.assertEqual(values, ["LDAP backend not configured on this server."])
@override_settings(AUTHENTICATION_BACKENDS=("zproject.backends.ZulipLDAPAuthBackend",))
def test_user_not_present(self) -> None:
# othello doesn't have an entry in our test directory
values = query_ldap(self.example_email("othello"))
self.assert_length(values, 1)
self.assertIn("No such user found", values[0])
@override_settings(AUTHENTICATION_BACKENDS=("zproject.backends.ZulipLDAPAuthBackend",))
def test_normal_query(self) -> None:
with self.settings(
AUTH_LDAP_USER_ATTR_MAP={
"full_name": "cn",
"avatar": "jpegPhoto",
"custom_profile_field__birthday": "birthDate",
"custom_profile_field__phone_number": "nonExistentAttr",
}
):
values = query_ldap(self.example_email("hamlet"))
self.assertEqual(len(values), 4)
self.assertIn("full_name: King Hamlet", values)
self.assertIn("avatar: (An avatar image file)", values)
self.assertIn("custom_profile_field__birthday: 1900-09-08", values)
self.assertIn("custom_profile_field__phone_number: LDAP field not present", values)
@override_settings(AUTHENTICATION_BACKENDS=("zproject.backends.ZulipLDAPAuthBackend",))
def test_query_email_attr(self) -> None:
with self.settings(AUTH_LDAP_USER_ATTR_MAP={"full_name": "cn"}, LDAP_EMAIL_ATTR="mail"):
# This will look up the user by email in our test dictionary,
# should successfully find hamlet's LDAP entry.
values = query_ldap(self.example_email("hamlet"))
self.assertEqual(len(values), 2)
self.assertIn("full_name: King Hamlet", values)
self.assertIn("email: [email protected]", values)
class TestZulipAuthMixin(ZulipTestCase):
def test_get_user(self) -> None:
backend = ZulipAuthMixin()
result = backend.get_user(11111)
self.assertIs(result, None)
class TestPasswordAuthEnabled(ZulipTestCase):
def test_password_auth_enabled_for_ldap(self) -> None:
with self.settings(AUTHENTICATION_BACKENDS=("zproject.backends.ZulipLDAPAuthBackend",)):
realm = Realm.objects.get(string_id="zulip")
self.assertTrue(password_auth_enabled(realm))
class TestRequireEmailFormatUsernames(ZulipTestCase):
def test_require_email_format_usernames_for_ldap_with_append_domain(self) -> None:
with self.settings(
AUTHENTICATION_BACKENDS=("zproject.backends.ZulipLDAPAuthBackend",),
LDAP_APPEND_DOMAIN="zulip.com",
):
realm = Realm.objects.get(string_id="zulip")
self.assertFalse(require_email_format_usernames(realm))
def test_require_email_format_usernames_for_ldap_with_email_attr(self) -> None:
with self.settings(
AUTHENTICATION_BACKENDS=("zproject.backends.ZulipLDAPAuthBackend",),
LDAP_EMAIL_ATTR="email",
):
realm = Realm.objects.get(string_id="zulip")
self.assertFalse(require_email_format_usernames(realm))
def test_require_email_format_usernames_for_email_only(self) -> None:
with self.settings(AUTHENTICATION_BACKENDS=("zproject.backends.EmailAuthBackend",)):
realm = Realm.objects.get(string_id="zulip")
self.assertTrue(require_email_format_usernames(realm))
def test_require_email_format_usernames_for_email_and_ldap_with_email_attr(self) -> None:
with self.settings(
AUTHENTICATION_BACKENDS=(
"zproject.backends.EmailAuthBackend",
"zproject.backends.ZulipLDAPAuthBackend",
),
LDAP_EMAIL_ATTR="email",
):
realm = Realm.objects.get(string_id="zulip")
self.assertFalse(require_email_format_usernames(realm))
def test_require_email_format_usernames_for_email_and_ldap_with_append_email(self) -> None:
with self.settings(
AUTHENTICATION_BACKENDS=(
"zproject.backends.EmailAuthBackend",
"zproject.backends.ZulipLDAPAuthBackend",
),
LDAP_APPEND_DOMAIN="zulip.com",
):
realm = Realm.objects.get(string_id="zulip")
self.assertFalse(require_email_format_usernames(realm))
class TestMaybeSendToRegistration(ZulipTestCase):
def test_sso_only_when_preregistration_user_does_not_exist(self) -> None:
rf = RequestFactory()
request = rf.get("/")
request.session = {}
request.user = None
# Creating a mock Django form in order to keep the test simple.
# This form will be returned by the create_hompage_form function
# and will always be valid so that the code that we want to test
# actually runs.
class Form:
def is_valid(self) -> bool:
return True
with mock.patch("zerver.views.auth.HomepageForm", return_value=Form()):
self.assertEqual(PreregistrationUser.objects.all().count(), 0)
result = maybe_send_to_registration(
request, self.example_email("hamlet"), is_signup=True
)
self.assertEqual(result.status_code, 302)
confirmation = Confirmation.objects.all().first()
confirmation_key = confirmation.confirmation_key
self.assertIn("do_confirm/" + confirmation_key, result.url)
self.assertEqual(PreregistrationUser.objects.all().count(), 1)
result = self.client_get(result.url)
self.assert_in_response('action="/accounts/register/"', result)
self.assert_in_response(f'value="{confirmation_key}" name="key"', result)
def test_sso_only_when_preregistration_user_exists(self) -> None:
rf = RequestFactory()
request = rf.get("/")
request.session = {}
request.user = None
# Creating a mock Django form in order to keep the test simple.
# This form will be returned by the create_hompage_form function
# and will always be valid so that the code that we want to test
# actually runs.
class Form:
def is_valid(self) -> bool:
return True
email = self.example_email("hamlet")
user = PreregistrationUser(email=email)
user.save()
with mock.patch("zerver.views.auth.HomepageForm", return_value=Form()):
self.assertEqual(PreregistrationUser.objects.all().count(), 1)
result = maybe_send_to_registration(request, email, is_signup=True)
self.assertEqual(result.status_code, 302)
confirmation = Confirmation.objects.all().first()
confirmation_key = confirmation.confirmation_key
self.assertIn("do_confirm/" + confirmation_key, result.url)
self.assertEqual(PreregistrationUser.objects.all().count(), 1)
class TestAdminSetBackends(ZulipTestCase):
def test_change_enabled_backends(self) -> None:
# Log in as admin
self.login("iago")
result = self.client_patch(
"/json/realm",
{"authentication_methods": orjson.dumps({"Email": False, "Dev": True}).decode()},
)
self.assert_json_error(result, "Must be an organization owner")
self.login("desdemona")
result = self.client_patch(
"/json/realm",
{"authentication_methods": orjson.dumps({"Email": False, "Dev": True}).decode()},
)
self.assert_json_success(result)
realm = get_realm("zulip")
self.assertFalse(password_auth_enabled(realm))
self.assertTrue(dev_auth_enabled(realm))
def test_disable_all_backends(self) -> None:
# Log in as admin
self.login("desdemona")
result = self.client_patch(
"/json/realm",
{"authentication_methods": orjson.dumps({"Email": False, "Dev": False}).decode()},
)
self.assert_json_error(result, "At least one authentication method must be enabled.")
realm = get_realm("zulip")
self.assertTrue(password_auth_enabled(realm))
self.assertTrue(dev_auth_enabled(realm))
def test_supported_backends_only_updated(self) -> None:
# Log in as admin
self.login("desdemona")
# Set some supported and unsupported backends
result = self.client_patch(
"/json/realm",
{
"authentication_methods": orjson.dumps(
{"Email": False, "Dev": True, "GitHub": False}
).decode()
},
)
self.assert_json_success(result)
realm = get_realm("zulip")
# Check that unsupported backend is not enabled
self.assertFalse(github_auth_enabled(realm))
self.assertTrue(dev_auth_enabled(realm))
self.assertFalse(password_auth_enabled(realm))
class EmailValidatorTestCase(ZulipTestCase):
def test_valid_email(self) -> None:
validate_login_email(self.example_email("hamlet"))
def test_invalid_email(self) -> None:
with self.assertRaises(JsonableError):
validate_login_email("hamlet")
def test_validate_email(self) -> None:
inviter = self.example_user("hamlet")
cordelia = self.example_user("cordelia")
realm = inviter.realm
do_set_realm_property(realm, "emails_restricted_to_domains", True, acting_user=None)
inviter.realm.refresh_from_db()
error = validate_email_is_valid(
"[email protected]",
get_realm_email_validator(realm),
)
self.assertIn("containing + are not allowed", error)
cordelia_email = cordelia.delivery_email
errors = get_existing_user_errors(realm, {cordelia_email})
error, is_deactivated = errors[cordelia_email]
self.assertEqual(False, is_deactivated)
self.assertEqual(error, "Already has an account.")
change_user_is_active(cordelia, False)
errors = get_existing_user_errors(realm, {cordelia_email})
error, is_deactivated = errors[cordelia_email]
self.assertEqual(True, is_deactivated)
self.assertEqual(error, "Account has been deactivated.")
errors = get_existing_user_errors(realm, {"[email protected]"})
self.assertEqual(errors, {})
class LDAPBackendTest(ZulipTestCase):
@override_settings(AUTHENTICATION_BACKENDS=("zproject.backends.ZulipLDAPAuthBackend",))
def test_non_existing_realm(self) -> None:
self.init_default_ldap_database()
user = self.example_user("hamlet")
data = dict(
username=user.delivery_email,
password=initial_password(user.delivery_email),
)
error_type = ZulipLDAPAuthBackend.REALM_IS_NONE_ERROR
error = ZulipLDAPConfigurationError("Realm is None", error_type)
with mock.patch(
"zproject.backends.ZulipLDAPAuthBackend.get_or_build_user", side_effect=error
), mock.patch("django_auth_ldap.backend._LDAPUser._authenticate_user_dn"), self.assertLogs(
"django_auth_ldap", "WARNING"
) as warn_log:
response = self.client_post("/login/", data)
self.assert_in_success_response(
["Configuration error", "You are trying to log in using LDAP without creating an"],
response,
)
self.assertEqual(
warn_log.output,
["WARNING:django_auth_ldap:('Realm is None', 1) while authenticating hamlet"],
)
|
py | b401d85dc56ca66bb6e1cb31af567a839d33b60b | """ Compute racing line using Bayesian Optimization (BayesOpt).
This script compares EI, noisyEI and random strategies for sampling.
"""
__author__ = 'Achin Jain'
__email__ = '[email protected]'
import time
import numpy as np
import torch
from botorch.models import SingleTaskGP
from gpytorch.mlls import ExactMarginalLogLikelihood
from botorch.optim import optimize_acqf
from botorch import fit_gpytorch_model
from botorch.acquisition.monte_carlo import qExpectedImprovement, qNoisyExpectedImprovement
from botorch.sampling.samplers import SobolQMCNormalSampler
from bayes_race.tracks import MAP2
from bayes_race.params import F110
from bayes_race.raceline import randomTrajectory
from bayes_race.raceline import calcMinimumTime
from matplotlib import pyplot as plt
#####################################################################
# set device in torch
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device = torch.device("cpu")
dtype = torch.float
#####################################################################
# simulation settings
SEED = np.random.randint(1000)
torch.manual_seed(SEED)
np.random.seed(SEED)
BATCH_SIZE = 1 # useful for parallelization, DON'T change
N_TRIALS = 3 # number of times bayesopt is run
N_BATCH = 100 # new observations after initialization
MC_SAMPLES = 64 # monte carlo samples
N_INITIAL_SAMPLES = 10 # samples to initialize GP
PLOT_RESULTS = False # whether to plot results
SAVE_RESULTS = True # whether to save results
N_WAYPOINTS = 100 # resampled waypoints
SCALE = 0.90 # shrinking factor for track width
LASTIDX = 1 # fixed node at the end DO NOT CHANGE
#####################################################################
# track specific data
params = F110()
track_name = 'MAP2'
track = MAP2()
NODES = [5, 15, 25, 35, 45, 55, 65, 75, 85, 95, 105, 115, 5]
track_width = track.track_width*SCALE
theta = track.theta_track[NODES]
N_DIMS = len(NODES)
n_waypoints = N_DIMS
rand_traj = randomTrajectory(track=track, n_waypoints=n_waypoints)
bounds = torch.tensor([[-track_width/2] * N_DIMS, [track_width/2] * N_DIMS], device=device, dtype=dtype)
def evaluate_y(x_eval, mean_y=None, std_y=None):
""" evaluate true output for given x (distance of nodes from center line)
TODO: parallelize evaluations
"""
if type(x_eval) is torch.Tensor:
is_tensor = True
x_eval = x_eval.cpu().numpy()
else:
is_tensor = False
if len(x_eval.shape)==1:
x_eval = x_eval.reshape(1,-1)
n_eval = x_eval.shape[0]
y_eval = np.zeros(n_eval)
for ids in range(n_eval):
wx, wy = rand_traj.calculate_xy(
width=x_eval[ids],
last_index=NODES[LASTIDX],
theta=theta,
)
x, y = rand_traj.fit_cubic_splines(
wx=wx,
wy=wy,
n_samples=N_WAYPOINTS,
)
y_eval[ids] = -calcMinimumTime(x, y, **params) # we want to max negative lap times
if mean_y and std_y:
y_eval = normalize(y_eval, mean_y, std_y)
if is_tensor:
return torch.tensor(y_eval, device=device, dtype=dtype).unsqueeze(-1)
else:
return y_eval.ravel()
def generate_initial_data(n_samples=10):
""" generate training data
"""
train_x = np.zeros([n_samples, n_waypoints])
train_y_ = np.zeros([n_samples, 1])
for ids in range(n_samples):
width_random = rand_traj.sample_nodes(scale=SCALE)
t_random = evaluate_y(width_random)
train_x[ids,:] = width_random
train_y_[ids,:] = t_random
mean_y, std_y = train_y_.mean(), train_y_.std()
train_y = normalize(train_y_, mean_y, std_y)
train_x = torch.tensor(train_x, device=device, dtype=dtype)
train_y = torch.tensor(train_y, device=device, dtype=dtype)
best_y = train_y.max().item()
return train_x, train_y, best_y, mean_y, std_y
def normalize(y_eval, mean_y, std_y):
""" normalize outputs for GP
"""
return (y_eval - mean_y) / std_y
#####################################################################
# modeling and optimization functions called in closed-loop
def initialize_model(train_x, train_y, state_dict=None):
"""initialize GP model with/without initial states
"""
model = SingleTaskGP(train_x, train_y).to(train_x)
mll = ExactMarginalLogLikelihood(model.likelihood, model)
# load state dict if it is passed
if state_dict is not None:
model.load_state_dict(state_dict)
return mll, model
def optimize_acqf_and_get_observation(acq_func, mean_y=None, std_y=None):
"""optimize acquisition function and evaluate new candidates
"""
# optimize
candidates, _ = optimize_acqf(
acq_function=acq_func,
bounds=bounds,
q=BATCH_SIZE,
num_restarts=10,
raw_samples=512, # used for intialization heuristic
)
# observe new values
new_x = candidates.detach()
new_y = evaluate_y(new_x, mean_y=mean_y, std_y=std_y)
return new_x, new_y
def sample_random_observations(mean_y, std_y):
"""sample a random trajectory
"""
rand_x = torch.tensor(rand_traj.sample_nodes(scale=SCALE).reshape(1,-1), device=device, dtype=dtype)
rand_y = evaluate_y(rand_x, mean_y=mean_y, std_y=std_y)
return rand_x, rand_y
#####################################################################
# main simulation loop
# define the qEI and qNEI acquisition modules using a QMC sampler
qmc_sampler = SobolQMCNormalSampler(num_samples=MC_SAMPLES)
def optimize():
verbose = True
best_observed_all_ei, best_observed_all_nei, best_random_all = [], [], []
train_x_all_ei, train_x_all_nei, train_x_all_random = [], [], []
train_y_all_ei, train_y_all_nei, train_y_all_random = [], [], []
# statistics over multiple trials
for trial in range(1, N_TRIALS + 1):
print('\nTrial {} of {}'.format(trial, N_TRIALS))
best_observed_ei, best_observed_nei = [], []
best_random = []
# generate initial training data and initialize model
print('\nGenerating {} random samples'.format(N_INITIAL_SAMPLES))
train_x_ei, train_y_ei, best_y_ei, mean_y, std_y = generate_initial_data(n_samples=N_INITIAL_SAMPLES)
denormalize = lambda x: -(x*std_y + mean_y)
mll_ei, model_ei = initialize_model(train_x_ei, train_y_ei)
train_x_nei, train_y_nei, best_y_nei = train_x_ei, train_y_ei, best_y_ei
mll_nei, model_nei = initialize_model(train_x_nei, train_y_nei)
train_x_random, train_y_random, best_y_random = train_x_ei, train_y_ei, best_y_ei
best_observed_ei.append(denormalize(best_y_ei))
best_observed_nei.append(denormalize(best_y_nei))
best_random.append(denormalize(best_y_random))
# run N_BATCH rounds of BayesOpt after the initial random batch
for iteration in range(1, N_BATCH + 1):
print('\nBatch {} of {}\n'.format(iteration, N_BATCH))
t0 = time.time()
# fit the models
fit_gpytorch_model(mll_ei)
fit_gpytorch_model(mll_nei)
# update acquisition functions
qEI = qExpectedImprovement(
model=model_ei,
best_f=train_y_ei.max(),
sampler=qmc_sampler,
)
qNEI = qNoisyExpectedImprovement(
model=model_nei,
X_baseline=train_x_nei,
sampler=qmc_sampler,
)
# optimize acquisition function and evaluate new sample
new_x_ei, new_y_ei = optimize_acqf_and_get_observation(qEI, mean_y=mean_y, std_y=std_y)
print('EI: time to traverse is {:.4f}s'.format(-(new_y_ei.numpy().ravel()[0]*std_y+mean_y)))
new_x_nei, new_y_nei = optimize_acqf_and_get_observation(qNEI, mean_y=mean_y, std_y=std_y)
print('NEI: time to traverse is {:.4f}s'.format(-(new_y_nei.numpy().ravel()[0]*std_y+mean_y)))
new_x_random, new_y_random = sample_random_observations(mean_y=mean_y, std_y=std_y)
print('Random: time to traverse is {:.4f}s'.format(-(new_y_random.numpy().ravel()[0]*std_y+mean_y)))
# update training points
train_x_ei = torch.cat([train_x_ei, new_x_ei])
train_y_ei = torch.cat([train_y_ei, new_y_ei])
train_x_nei = torch.cat([train_x_nei, new_x_nei])
train_y_nei = torch.cat([train_y_nei, new_y_nei])
train_x_random = torch.cat([train_x_random, new_x_random])
train_y_random = torch.cat([train_y_random, new_y_random])
# update progress
best_value_ei = denormalize(train_y_ei.max().item())
best_value_nei = denormalize(train_y_nei.max().item())
best_value_random = denormalize(train_y_random.max().item())
best_observed_ei.append(best_value_ei)
best_observed_nei.append(best_value_nei)
best_random.append(best_value_random)
# reinitialize the models so they are ready for fitting on next iteration
# use the current state dict to speed up fitting
mll_ei, model_ei = initialize_model(
train_x_ei,
train_y_ei,
model_ei.state_dict(),
)
mll_nei, model_nei = initialize_model(
train_x_nei,
train_y_nei,
model_nei.state_dict(),
)
t1 = time.time()
if verbose:
print(
'best lap time (random, qEI, qNEI) = {:.2f}, {:.2f}, {:.2f}, time to compute = {:.2f}s'.format(
best_value_random,
best_value_ei,
best_value_nei,
t1-t0
)
)
else:
print(".")
best_observed_all_ei.append(best_observed_ei)
best_observed_all_nei.append(best_observed_nei)
best_random_all.append(best_random)
train_x_all_ei.append(train_x_ei.cpu().numpy())
train_x_all_nei.append(train_x_nei.cpu().numpy())
train_x_all_random.append(train_x_random.cpu().numpy())
train_y_all_ei.append(denormalize(train_y_ei.cpu().numpy()))
train_y_all_nei.append(denormalize(train_y_nei.cpu().numpy()))
train_y_all_random.append(denormalize(train_y_random.cpu().numpy()))
iters = np.arange(N_BATCH + 1) * BATCH_SIZE
y_ei = np.asarray(best_observed_all_ei)
y_nei = np.asarray(best_observed_all_nei)
y_rnd = np.asarray(best_random_all)
savestr = time.strftime('%Y%m%d%H%M%S')
#####################################################################
# save results
if SAVE_RESULTS:
np.savez(
'results/{}_raceline_data-{}.npz'.format(track_name, savestr),
y_ei=y_ei,
y_nei=y_nei,
y_rnd=y_rnd,
iters=iters,
train_x_all_ei=np.asarray(train_x_all_ei),
train_x_all_nei=np.asarray(train_x_all_nei),
train_x_all_random=np.asarray(train_x_all_random),
train_y_all_ei=np.asarray(train_y_all_ei),
train_y_all_nei=np.asarray(train_y_all_nei),
train_y_all_random=np.asarray(train_y_all_random),
SEED=SEED,
)
#####################################################################
# plot results
if PLOT_RESULTS:
def ci(y):
return 1.96 * y.std(axis=0) / np.sqrt(N_TRIALS)
plt.figure()
plt.gca().set_prop_cycle(None)
plt.plot(iters, y_rnd.mean(axis=0), linewidth=1.5)
plt.plot(iters, y_ei.mean(axis=0), linewidth=1.5)
plt.plot(iters, y_nei.mean(axis=0), linewidth=1.5)
plt.gca().set_prop_cycle(None)
plt.fill_between(iters, y_rnd.mean(axis=0)-ci(y_rnd), y_rnd.mean(axis=0)+ci(y_rnd), label='random', alpha=0.2)
plt.fill_between(iters, y_ei.mean(axis=0)-ci(y_ei), y_ei.mean(axis=0)+ci(y_ei), label='qEI', alpha=0.2)
plt.fill_between(iters, y_nei.mean(axis=0)-ci(y_nei), y_nei.mean(axis=0)+ci(y_nei), label='qNEI', alpha=0.2)
plt.xlabel('number of observations (beyond initial points)')
plt.ylabel('best lap times')
plt.grid(True)
plt.legend(loc=0)
plt.savefig('results/{}_laptimes-{}.png'.format(track_name, savestr), dpi=600)
plt.show()
if __name__ == '__main__':
optimize()
|
py | b401d85f4658fef04245529aa70a7a96a6e0efb0 | import numpy
import json
import math
import random
from client import *
num_weights = 11
sol_per_pop = 10
num_parents_mating = 2
total_api_calls = 1000
train_data_wieght = 0.4
p = 0.8
pop_size = (sol_per_pop,num_weights)
try:
with open('./output.txt','r') as prev:
old_generation = json.load(prev)
old_generation = json.loads(old_generation)
data = list(old_generation)
new_population = numpy.array(data)
except:
initial_inputs = []
try:
with open('./overfit.txt','r') as overfit:
tmp = json.load(overfit)
for i in tmp:
if i != '':
initial_inputs.append(float(i))
new_population = []
new_population.append(initial_inputs)
rng = 0.1
const_addition = min(initial_inputs)
for i in range(sol_per_pop-1):
ls = []
for vec in initial_inputs:
num = numpy.random.uniform(-rng,rng)
num = vec*(1.01+num)
if(not num):
num += const_addition
ls.append(num)
new_population.append(ls)
new_population = numpy.array(new_population)
except:
new_population = numpy.random.uniform(low=-10.0, high=10.0, size=pop_size)
def fitness_function(fitness):
for e in fitness:
e[0] = train_data_wieght*e[0] + (1-train_data_wieght)*e[1]
# print("Fitness = ",fitness)
return fitness
def cal_pop_fitness(pop):
fitness = []
i = 1
for p in pop:
fitness.append(get_errors(SECRET_KEY, list(p)))
return fitness
def select_parents(pop, fitness):
total = 0
for e in fitness:
total = total + e[0]
percent = []
for e in fitness:
percent.append(math.pow(total/e[0],0.85))
print("Values fitness : ",list(percent))
total = 0
for e in percent:
total = total + e
roulette = [0]
val = 0
for e in percent:
val = val + e
roulette.append(val/total)
l = []
for e in percent:
l.append((e/total)*100)
print("fitness % : ",list(l))
parents = []
for p in pop:
num = numpy.random.uniform(0,1)
id = 1
while id < len(roulette) and (roulette[id] - num) < 1e-20:
id = id + 1
val = pop[id-1]
parents.append(val)
parents = numpy.array(parents)
return parents
def crossover(parents, num_parents_mating,fitness):
offspring = numpy.empty(parents.shape)
n = offspring.shape[0]
i = 0
while i < n:
num = random.sample(range(1,offspring.shape[1]),num_parents_mating-1)
num.sort()
num.append(offspring.shape[1])
for idx in range(i, i+num_parents_mating):
offspring[idx][0:num[0]] = parents[idx][0:num[0]]
for k in range(0,len(num)-1):
offspring[idx][num[k]:num[k+1]] = parents[i+(idx+k+1)%num_parents_mating][num[k]:num[k+1]]
i = i + num_parents_mating
return offspring
def mutation(offspring_crossover):
for idx in range(offspring_crossover.shape[0]):
for j in range(offspring_crossover.shape[1]):
random_value = numpy.random.uniform(-1.0, 1.0, 1)
if(random_value > -0.3 and random_value < 0.3 ):
mut = numpy.random.uniform(-0.3,0.3)
s = numpy.random.choice([-1,1])
offspring_crossover[idx, j] = offspring_crossover[idx, j]*(1+s*mut)
return offspring_crossover
num_generations = total_api_calls//sol_per_pop
for generation in range(num_generations):
print("Generation : ", generation)
print('Initial population: ',list(new_population),end='\n')
fitness = cal_pop_fitness(new_population)
var_fitness = [[fitness[x][y] for y in range(len(fitness[0]))] for x in range(len(fitness))]
for f in var_fitness:
f[0] = "{:e}".format(f[0])
f[1] = "{:e}".format(f[1])
print('Errors: ',var_fitness,end='\n\n')
fitness = fitness_function(fitness)
parents = select_parents(new_population, fitness)
print("Selection : ",list(parents))
offspring_crossover = crossover(parents,num_parents_mating,fitness)
print("Crossover : ",list(offspring_crossover))
offspring_mutation = mutation(offspring_crossover)
print("Mutation : ",list(offspring_mutation))
new_population = offspring_mutation
answer = json.dumps(new_population.tolist())
with open('./output.txt','w+') as write_file:
json.dump(answer, write_file)
print('************************************')
# end here
|
py | b401d8e9e1564ccd4156edf56bea017dbfa482f6 | """Main app/routing file for Twitoff"""
from os import getenv
from flask import Flask, render_template, request
from .models import DB, User
from .twitter import add_or_update_user, update_all_users
from .predict import predict_user
#creates application
def create_app():
"""Creating and configuring an instance of the Flask application"""
app = Flask(__name__)
app.config["SQLALCHEMY_DATABASE_URI"] = getenv("DATABASE_URL")
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
DB.init_app(app)
@app.route('/')
def root():
return render_template('base.html', title='home', users=User.query.all())
@app.route('/compare',methods=['POST'])
def compare():
user0, user1 = sorted([request.values['user1'], request.values['user2']])
if user0 == user1:
message = 'Can not compare users to themselves!'
else:
prediction = predict.user(user0, user1, request.values['tweet_text'])
message = '{} is more likely to be said by {} than {}'.format(
request.values['tweet_text'], user1 if prediction else user0,
user0 if prediction else user1
)
@app.route('/user', methods=['POST'])
@app.route('/user/<name>', methods=['GET'])
def user(name=None, message=''):
name = name or request.values['user_name']
try:
if request.method == 'POST':
add_or_update_user(name)
message = 'User {} was successfully added!'.format(name)
tweets = User.query.filter(User.name == name).one().tweets
except Exception as e:
message = 'Error adding {}: {}'.format(name, e)
tweets = []
return render_template('user.html', title=name, tweets=tweets, message=message)
@app.route('/update')
def update():
reset()
update_all_users()
return render_template('base.html', title="home", users=User.query.all())
@app.route('/reset')
def reset():
DB.drop_all()
DB.create_all()
return render_template('base.html', users=User.query.all(), title='All Tweets updated!')
return app
# def insert_example_users():
# add_or_update_user('elonmusk')
# add_or_update_user('nasa') |
py | b401d91ef949cb69123eb09e261efdd6ce19ba78 | import pytest
import bme680
def test_setup_not_present(smbus_notpresent):
"""Mock the adbsence of a BME680 and test initialisation."""
with pytest.raises(RuntimeError):
sensor = bme680.BME680() # noqa F841
def test_setup_mock_present(smbus):
"""Mock the presence of a BME680 and test initialisation."""
sensor = bme680.BME680() # noqa F841
|
py | b401dbfd0435858ef07e891bf24df29b87146634 | """
In mitmproxy, protocols are implemented as a set of layers, which are composed
on top each other. The first layer is usually the proxy mode, e.g. transparent
proxy or normal HTTP proxy. Next, various protocol layers are stacked on top of
each other - imagine WebSocket on top of an HTTP Upgrade request. An actual
mitmproxy connection may look as follows (outermost layer first):
Transparent HTTP proxy, no TLS:
- TransparentProxy
- Http1Layer
- HttpLayer
Regular proxy, CONNECT request with WebSocket over SSL:
- ReverseProxy
- Http1Layer
- HttpLayer
- TLSLayer
- WebSocketLayer (or TCPLayer)
Every layer acts as a read-only context for its inner layers (see
:py:class:`Layer`). To communicate with an outer layer, a layer can use
functions provided in the context. The next layer is always determined by a
call to :py:meth:`.next_layer() <mitmproxy.proxy.RootContext.next_layer>`,
which is provided by the root context.
Another subtle design goal of this architecture is that upstream connections
should be established as late as possible; this makes server replay without any
outgoing connections possible.
"""
from .base import Layer, ServerConnectionMixin
from .http import UpstreamConnectLayer
from .http import HttpLayer
from .http1 import Http1Layer
from .http2 import Http2Layer
from .websocket import WebSocketLayer
from .rawtcp import RawTCPLayer
from .tls import TlsLayer
from .tunnel import TunnelLayer
__all__ = [
"Layer", "ServerConnectionMixin",
"TlsLayer",
"UpstreamConnectLayer",
"HttpLayer",
"Http1Layer",
"Http2Layer",
"WebSocketLayer",
"RawTCPLayer",
"TunnelLayer",
]
|
py | b401dcf3a7c33d4b7c3c66bd73d83724e25129b8 | # Using Python requests and the Google Maps Geocoding API.
#
# References:
#
# * http://docs.python-requests.org/en/latest/
# * https://developers.google.com/maps/
import re
import unicodedata
import urlsigner
import requests
GOOGLE_MAPS_API_URL = 'https://maps.googleapis.com/maps/api/geocode/json'
clientId = 'put your client ID here'
key = 'put your key here'
def convert_to_abbreviation(street_address):
street_address = re.sub('road', 'rd', street_address)
street_address = re.sub('street', 'st', street_address)
street_address = re.sub('boulevard', 'blvd', street_address)
street_address = re.sub('court', 'ct', street_address)
street_address = re.sub('terrace', 'terr', street_address)
street_address = re.sub('circle', 'cir', street_address)
street_address = re.sub('highway', 'hwy', street_address)
street_address = re.sub('parkway', 'pkwy', street_address)
street_address = re.sub('ridge', 'rdg', street_address)
street_address = re.sub('drive', 'dr', street_address)
street_address = re.sub('lane', 'ln', street_address)
street_address = re.sub('north', 'n', street_address)
street_address = re.sub('south', 's', street_address)
street_address = re.sub('east', 'e', street_address)
street_address = re.sub('west', 'w', street_address)
return street_address
def check_street_match(address_component, input_address_component):
address_types = {
'street_address': 0,
'route': 1,
'intersection': 2
}
found_address_component = None
if address_component['types'][0] in address_types:
found_address_component = address_component['short_name'].lower()
if found_address_component is None:
return False
elif unicodedata.normalize('NFKD', found_address_component).encode('ascii', 'ignore') == input_address_component:
return True
else:
return False
def check_city_match(address_component, input_address_component):
address_types = {
'locality': 0,
'administrative_area_level_3': 1
}
found_address_component = None
if address_component['types'][0] in address_types:
found_address_component = address_component['short_name'].lower()
if found_address_component is None:
return None
elif unicodedata.normalize('NFKD', found_address_component).encode('ascii', 'ignore') == input_address_component:
return True
else:
return False
def check_zip_code_match(address_component, input_address_component):
address_types = {
'postal_code': 0,
'postal_code_prefix': 1
}
found_address_component = None
if address_component['types'][0] in address_types:
found_address_component = address_component['long_name'].lower()
if found_address_component is None:
return None
elif unicodedata.normalize('NFKD', found_address_component).encode('ascii', 'ignore') == input_address_component:
return True
else:
return False
def check_state_match(address_component, input_address_component):
address_types = {
'administrative_area_level_1': 0
}
found_address_component = None
if address_component['types'][0] in address_types:
found_address_component = address_component['short_name'].lower()
if found_address_component is None:
return None
elif unicodedata.normalize('NFKD', found_address_component).encode('ascii', 'ignore') == input_address_component:
return True
else:
return False
def check_country_match(address_component, input_address_component):
address_types = {
'country': 0
}
found_address_component = None
if address_component['types'][0] in address_types:
found_address_component = address_component['short_name'].lower()
if found_address_component is None:
return None
elif unicodedata.normalize('NFKD', found_address_component).encode('ascii', 'ignore') == input_address_component:
return True
else:
return False
def address_validation(street_address_input, city_input, zip_code_input, state_input, country_input):
street_address_input = convert_to_abbreviation(street_address_input)
street_address_input = ' '.join([word for word in street_address_input.split() if not word.isdigit()])
address = [street_address_input, city_input, zip_code_input, state_input, country_input]
url = GOOGLE_MAPS_API_URL + '?'
url += 'address=' + ','.join(address).replace(' ', '+').lower()
url += '&client=' + clientId
signed_url = urlsigner.sign_url(url, key)
# Do the request and get the response data
req = requests.post(signed_url)
res = req.json()
# "OK" indicates that no errors occurred; the address was successfully parsed and at least one geocode was returned.
if res['status'].upper() != 'OK':
print res['status']
return False
is_street_matched = None
is_city_matched = None
is_zip_code_matched = None
is_state_matched = None
is_country_matched = None
geodata = dict()
for address_component in res['results'][0]['address_components']:
if is_street_matched is None:
is_street_matched = check_street_match(address_component, street_address_input)
if is_city_matched is None:
is_city_matched = check_city_match(address_component, city_input)
if is_zip_code_matched is None:
is_zip_code_matched = check_zip_code_match(address_component, zip_code_input)
if is_state_matched is None:
is_state_matched = check_state_match(address_component, state_input)
if is_country_matched is None:
is_country_matched = check_country_match(address_component, country_input)
if is_street_matched is not None and is_city_matched is not None and is_zip_code_matched is not None and \
is_state_matched is not None and is_country_matched is not None:
geodata['lat'] = res['results'][0]['geometry']['location']['lat']
geodata['lng'] = res['results'][0]['geometry']['location']['lng']
geodata['formatted_address'] = res['results'][0]['formatted_address']
break
results = dict()
if len(geodata) > 0:
geodata['street'] = is_street_matched
geodata['city'] = is_city_matched
geodata['zip_code'] = is_zip_code_matched
geodata['state'] = is_state_matched
geodata['country'] = is_country_matched
return geodata
if __name__ == "__main__":
print ('Enter an address')
geodata = address_validation(raw_input("Street Address:"), raw_input("City:"), raw_input("State/Province:"), raw_input("Zip Code:"), raw_input("Country:"))
if len(geodata) > 0:
print ('Found a valid address: {formatted_address}'.format(**geodata))
print('Matched street = {street}, Matched city = {city}, Matched zip code = {zip_code}, '
'Matched state = {state}, Matched country = {country}, '
'(lat, lng) = ({lat}, {lng})'.format(**geodata))
else:
print ('Unknown address') |
py | b401dcf93e17fac95ad413f4047b320290f6d47b | import ctypes
import math
import boost_histogram as bh
import numpy as np
import pytest
from hist import NamedHist, axis
# ToDo: specify what error is raised
def test_named_init():
"""
Test named init -- whether NamedHist can be properly initialized.
"""
# basic
h = NamedHist(
axis.Regular(10, 0, 1, name="x"), axis.Regular(10, 0, 1, name="y")
).fill(x=[0.35, 0.35, 0.45], y=[0.35, 0.35, 0.45])
for idx in range(10):
if idx == 3:
assert h[idx, idx] == 2
assert h[{"x": idx, "y": idx}] == 2
elif idx == 4:
assert h[idx, idx] == 1
else:
assert h[idx, idx] == 0
with pytest.raises(Exception):
h[{0: idx, 1: idx}]
# with named axes
assert NamedHist(
axis.Regular(50, -3, 3, name="x"), axis.Regular(50, -3, 3, name="y")
).fill(x=np.random.randn(10), y=np.random.randn(10))
assert NamedHist(axis.Boolean(name="x"), axis.Boolean(name="y")).fill(
y=[True, False, True], x=[True, False, True]
)
assert NamedHist(
axis.Variable(range(-3, 3), name="x"), axis.Variable(range(-3, 3), name="y")
).fill(x=np.random.randn(10), y=np.random.randn(10))
assert NamedHist(axis.Integer(-3, 3, name="x"), axis.Integer(-3, 3, name="y")).fill(
x=np.random.randn(10), y=np.random.randn(10)
)
assert NamedHist(
axis.IntCategory(range(-3, 3), name="x"),
axis.IntCategory(range(-3, 3), name="y"),
).fill(x=np.random.randn(10), y=np.random.randn(10))
assert NamedHist(
axis.StrCategory(["F", "T"], name="x"), axis.StrCategory("FT", name="y")
).fill(y=["T", "F", "T"], x=["T", "F", "T"])
# cannot access via index
h = NamedHist(axis.Regular(10, 0, 1, name="x")).fill(x=[0.35, 0.35, 0.45])
for idx in range(10):
with pytest.raises(Exception):
h[{0: idx}]
# with no-named axes
with pytest.raises(Exception):
NamedHist(axis.Regular(50, -3, 3), axis.Regular(50, -3, 3)).fill(
x=np.random.randn(10), y=np.random.randn(10)
)
with pytest.raises(Exception):
NamedHist(axis.Boolean(), axis.Boolean()).fill(
y=[True, False, True], x=[True, False, True]
)
with pytest.raises(Exception):
NamedHist(axis.Variable(range(-3, 3)), axis.Variable(range(-3, 3))).fill(
x=np.random.randn(10), y=np.random.randn(10)
)
with pytest.raises(Exception):
NamedHist(axis.Integer(-3, 3), axis.Integer(-3, 3)).fill(
x=np.random.randn(10), y=np.random.randn(10)
)
with pytest.raises(Exception):
NamedHist(
axis.IntCategory(range(-3, 3)),
axis.IntCategory(range(-3, 3)),
).fill(x=np.random.randn(10), y=np.random.randn(10))
with pytest.raises(Exception):
NamedHist(axis.StrCategory(["F", "T"]), axis.StrCategory("FT")).fill(
y=["T", "F", "T"], x=["T", "F", "T"]
)
# with duplicated names
with pytest.raises(Exception):
NamedHist(axis.Regular(50, -3, 3, name="x"), axis.Regular(50, -3, 3, name="x"))
with pytest.raises(Exception):
NamedHist(axis.Boolean(name="y"), axis.Boolean(name="y"))
with pytest.raises(Exception):
NamedHist(
axis.Variable(range(-3, 3), name="x"), axis.Variable(range(-3, 3), name="x")
)
with pytest.raises(Exception):
NamedHist(axis.Integer(-3, 3, name="x"), axis.Integer(-3, 3, name="x"))
with pytest.raises(Exception):
NamedHist(
axis.IntCategory(range(-3, 3), name="x"),
axis.IntCategory(range(-3, 3), name="x"),
)
with pytest.raises(Exception):
NamedHist(
axis.StrCategory("TF", name="y"), axis.StrCategory(["T", "F"], name="y")
)
def test_named_fill():
"""
Test named fill -- whether NamedHist can be properly filled.
"""
# Regular
h = NamedHist(
axis.Regular(10, 0, 1, name="x"),
axis.Regular(10, 0, 1, name="y"),
axis.Regular(2, 0, 2, name="z"),
).fill(
x=[0.35, 0.35, 0.35, 0.45, 0.55, 0.55, 0.55],
y=[0.35, 0.35, 0.45, 0.45, 0.45, 0.45, 0.45],
z=[0, 0, 1, 1, 1, 1, 1],
)
z_one_only = h[{"z": bh.loc(1)}]
for idx_x in range(10):
for idx_y in range(10):
if idx_x == 3 and idx_y == 4 or idx_x == 4 and idx_y == 4:
assert (
z_one_only[idx_x, idx_y]
== z_one_only[{"x": idx_x, "y": idx_y}]
== 1
)
elif idx_x == 5 and idx_y == 4:
assert (
z_one_only[idx_x, idx_y]
== z_one_only[{"x": idx_x, "y": idx_y}]
== 3
)
else:
assert (
z_one_only[idx_x, idx_y]
== z_one_only[{"x": idx_x, "y": idx_y}]
== 0
)
# Boolean
h = NamedHist(
axis.Boolean(name="x"),
axis.Boolean(name="y"),
axis.Boolean(name="z"),
).fill(
x=[True, True, True, True, True, False, True],
y=[False, True, True, False, False, True, False],
z=[False, False, True, True, True, True, True],
)
z_one_only = h[{"z": bh.loc(True)}]
assert z_one_only[False, False] == z_one_only[{"x": False, "y": False}] == 0
assert z_one_only[False, True] == z_one_only[{"x": False, "y": True}] == 1
assert z_one_only[True, False] == z_one_only[{"x": True, "y": False}] == 3
assert z_one_only[True, True] == z_one_only[{"x": True, "y": True}] == 1
# Variable
h = NamedHist(
axis.Variable(range(11), name="x"),
axis.Variable(range(11), name="y"),
axis.Variable(range(3), name="z"),
).fill(
x=[3.5, 3.5, 3.5, 4.5, 5.5, 5.5, 5.5],
y=[3.5, 3.5, 4.5, 4.5, 4.5, 4.5, 4.5],
z=[0, 0, 1, 1, 1, 1, 1],
)
z_one_only = h[{"z": bh.loc(1)}]
for idx_x in range(10):
for idx_y in range(10):
if idx_x == 3 and idx_y == 4 or idx_x == 4 and idx_y == 4:
assert (
z_one_only[idx_x, idx_y]
== z_one_only[{"x": idx_x, "y": idx_y}]
== 1
)
elif idx_x == 5 and idx_y == 4:
assert (
z_one_only[idx_x, idx_y]
== z_one_only[{"x": idx_x, "y": idx_y}]
== 3
)
else:
assert (
z_one_only[idx_x, idx_y]
== z_one_only[{"x": idx_x, "y": idx_y}]
== 0
)
# Integer
h = NamedHist(
axis.Integer(0, 10, name="x"),
axis.Integer(0, 10, name="y"),
axis.Integer(0, 2, name="z"),
).fill(
x=[3.5, 3.5, 3.5, 4.5, 5.5, 5.5, 5.5],
y=[3.5, 3.5, 4.5, 4.5, 4.5, 4.5, 4.5],
z=[0, 0, 1, 1, 1, 1, 1],
)
z_one_only = h[{"z": bh.loc(1)}]
for idx_x in range(10):
for idx_y in range(10):
if idx_x == 3 and idx_y == 4 or idx_x == 4 and idx_y == 4:
assert (
z_one_only[idx_x, idx_y]
== z_one_only[{"x": idx_x, "y": idx_y}]
== 1
)
elif idx_x == 5 and idx_y == 4:
assert (
z_one_only[idx_x, idx_y]
== z_one_only[{"x": idx_x, "y": idx_y}]
== 3
)
else:
assert (
z_one_only[idx_x, idx_y]
== z_one_only[{"x": idx_x, "y": idx_y}]
== 0
)
# IntCategory
h = NamedHist(
axis.IntCategory(range(10), name="x"),
axis.IntCategory(range(10), name="y"),
axis.IntCategory(range(2), name="z"),
).fill(
x=[3.5, 3.5, 3.5, 4.5, 5.5, 5.5, 5.5],
y=[3.5, 3.5, 4.5, 4.5, 4.5, 4.5, 4.5],
z=[0, 0, 1, 1, 1, 1, 1],
)
z_one_only = h[{"z": bh.loc(1)}]
for idx_x in range(10):
for idx_y in range(10):
if idx_x == 3 and idx_y == 4 or idx_x == 4 and idx_y == 4:
assert (
z_one_only[idx_x, idx_y]
== z_one_only[{"x": idx_x, "y": idx_y}]
== 1
)
elif idx_x == 5 and idx_y == 4:
assert (
z_one_only[idx_x, idx_y]
== z_one_only[{"x": idx_x, "y": idx_y}]
== 3
)
else:
assert (
z_one_only[idx_x, idx_y]
== z_one_only[{"x": idx_x, "y": idx_y}]
== 0
)
# StrCategory
h = NamedHist(
axis.StrCategory("FT", name="x"),
axis.StrCategory(list("FT"), name="y"),
axis.StrCategory(["F", "T"], name="z"),
).fill(
x=["T", "T", "T", "T", "T", "F", "T"],
y=["F", "T", "T", "F", "F", "T", "F"],
z=["F", "F", "T", "T", "T", "T", "T"],
)
z_one_only = h[{"z": bh.loc("T")}]
assert z_one_only[bh.loc("F"), bh.loc("F")] == 0
assert z_one_only[bh.loc("F"), bh.loc("T")] == 1
assert z_one_only[bh.loc("T"), bh.loc("F")] == 3
assert z_one_only[bh.loc("T"), bh.loc("T")] == 1
# without names
with pytest.raises(Exception):
NamedHist(
axis.Regular(50, -3, 3, name="x"), axis.Regular(50, -3, 3, name="y")
).fill(np.random.randn(10), np.random.randn(10))
with pytest.raises(Exception):
NamedHist(axis.Boolean(name="x"), axis.Boolean(name="y")).fill(
[True, False, True], [True, False, True]
)
with pytest.raises(Exception):
NamedHist(
axis.Variable(range(-3, 3), name="x"), axis.Variable(range(-3, 3), name="y")
).fill(np.random.randn(10), np.random.randn(10))
with pytest.raises(Exception):
NamedHist(axis.Integer(-3, 3, name="x"), axis.Integer(-3, 3, name="y")).fill(
np.random.randn(10), np.random.randn(10)
)
with pytest.raises(Exception):
NamedHist(
axis.IntCategory(range(-3, 3), name="x"),
axis.IntCategory(range(-3, 3), name="y"),
).fill(np.random.randn(10), np.random.randn(10))
with pytest.raises(Exception):
NamedHist(
axis.StrCategory(["F", "T"], name="x"), axis.StrCategory("FT", name="y")
).fill(["T", "F", "T"], ["T", "F", "T"])
# wrong names
with pytest.raises(Exception):
NamedHist(
axis.Regular(50, -3, 3, name="x"), axis.Regular(50, -3, 3, name="y")
).fill(x=np.random.randn(10), z=np.random.randn(10))
with pytest.raises(Exception):
NamedHist(axis.Boolean(name="x"), axis.Boolean(name="y")).fill(
y=[True, False, True], z=[True, False, True]
)
with pytest.raises(Exception):
NamedHist(
axis.Variable(range(-3, 3), name="x"), axis.Variable(range(-3, 3), name="y")
).fill(z=np.random.randn(10), x=np.random.randn(10))
with pytest.raises(Exception):
NamedHist(axis.Integer(-3, 3, name="x"), axis.Integer(-3, 3, name="y")).fill(
x=np.random.randn(10), z=np.random.randn(10)
)
with pytest.raises(Exception):
NamedHist(
axis.IntCategory(range(-3, 3), name="x"),
axis.IntCategory(range(-3, 3), name="y"),
).fill(y=np.random.randn(10), z=np.random.randn(10))
with pytest.raises(Exception):
NamedHist(
axis.StrCategory(["F", "T"], name="x"), axis.StrCategory("FT", name="y")
).fill(z=["T", "F", "T"], x=["T", "F", "T"])
h = NamedHist(
axis.Regular(
50, -4, 4, name="X", label="s [units]", underflow=False, overflow=False
)
).fill(X=np.random.normal(size=10))
def test_named_access():
"""
Test named access -- whether NamedHist bins can be accessed.
"""
h = NamedHist(axis.Regular(10, -5, 5, name="X", label="x [units]")).fill(
X=np.random.normal(size=1000)
)
assert h[6] == h[bh.loc(1)] == h[1j] == h[0j + 1] == h[-3j + 4] == h[bh.loc(1, 0)]
h[6] = h[bh.loc(1)] = h[1j] = h[0j + 1] = h[-3j + 4] = h[bh.loc(1, 0)] = 0
h = NamedHist(
axis.Regular(50, -5, 5, name="Norm", label="normal distribution"),
axis.Regular(50, -5, 5, name="Unif", label="uniform distribution"),
axis.StrCategory(["hi", "hello"], name="Greet"),
axis.Boolean(name="Yes"),
axis.Integer(0, 1000, name="Int"),
).fill(
Norm=np.random.normal(size=1000),
Unif=np.random.uniform(size=1000),
Greet=["hi"] * 800 + ["hello"] * 200,
Yes=[True] * 600 + [False] * 400,
Int=np.ones(1000),
)
assert h[0j, -0j + 2, "hi", True, 1]
# mis-match dimension
with pytest.raises(Exception):
h[0j, -0j + 2, "hi", True]
class TestNamedStorageProxy:
"""
Test named storage proxy suite -- whether NamedHist storage proxy \
works properly.
"""
def test_double(self):
h = (
NamedHist.new.Reg(10, 0, 1, name="x")
.Reg(10, 0, 1, name="y")
.Double()
.fill(x=[0.5, 0.5], y=[0.2, 0.6])
)
assert h[0.5j, 0.2j] == 1
assert h[bh.loc(0.5), bh.loc(0.6)] == 1
assert isinstance(h[0.5j, 0.5j], float)
# add storage to existing storage
with pytest.raises(Exception):
h.Double()
def test_int64(self):
h = NamedHist.new.Reg(10, 0, 1, name="x").Int64().fill(x=[0.5, 0.5])
assert h[0.5j] == 2
assert isinstance(h[0.5j], int)
# add storage to existing storage
with pytest.raises(Exception):
h.Int64()
def test_atomic_int64(self):
h = NamedHist.new.Reg(10, 0, 1, name="x").AtomicInt64().fill(x=[0.5, 0.5])
assert h[0.5j] == 2
assert isinstance(h[0.5j], int)
# add storage to existing storage
with pytest.raises(Exception):
h.AtomicInt64()
def test_weight(self):
h = NamedHist.new.Reg(10, 0, 1, name="x").Weight().fill(x=[0.5, 0.5])
assert h[0.5j].variance == 2
assert h[0.5j].value == 2
# add storage to existing storage
with pytest.raises(Exception):
h.Weight()
def test_mean(self):
h = (
NamedHist.new.Reg(10, 0, 1, name="x")
.Mean()
.fill(x=[0.5, 0.5], weight=[1, 1], sample=[1, 1])
)
assert h[0.5j].count == 2
assert h[0.5j].value == 1
assert h[0.5j].variance == 0
# add storage to existing storage
with pytest.raises(Exception):
h.Mean()
def test_weighted_mean(self):
h = (
NamedHist.new.Reg(10, 0, 1, name="x")
.WeightedMean()
.fill(x=[0.5, 0.5], weight=[1, 1], sample=[1, 1])
)
assert h[0.5j].sum_of_weights == 2
assert h[0.5j].sum_of_weights_squared == 2
assert h[0.5j].value == 1
assert h[0.5j].variance == 0
# add storage to existing storage
with pytest.raises(Exception):
h.WeightedMean()
def test_unlimited(self):
h = NamedHist.new.Reg(10, 0, 1, name="x").Unlimited().fill(x=[0.5, 0.5])
assert h[0.5j] == 2
# add storage to existing storage
with pytest.raises(Exception):
h.Unlimited()
def test_named_project():
"""
Test named project -- whether NamedHist can be projected properly.
"""
h = NamedHist(
axis.Regular(
50, -5, 5, name="A", label="a [units]", underflow=False, overflow=False
),
axis.Boolean(name="B", label="b [units]"),
axis.Variable(range(11), name="C", label="c [units]"),
axis.Integer(0, 10, name="D", label="d [units]"),
axis.IntCategory(range(10), name="E", label="e [units]"),
axis.StrCategory("FT", name="F", label="f [units]"),
)
# via names
assert h.project()
assert h.project("A", "B")
assert h.project("A", "B", "C", "D", "E", "F")
h = NamedHist(
axis.Regular(
50, -5, 5, name="A", label="a [units]", underflow=False, overflow=False
),
axis.Boolean(name="B", label="b [units]"),
axis.Variable(range(11), name="C", label="c [units]"),
axis.Integer(0, 10, name="D", label="d [units]"),
axis.IntCategory(range(10), name="E", label="e [units]"),
axis.StrCategory("FT", name="F", label="f [units]"),
)
# via indices
with pytest.raises(Exception):
h.project(0, 1)
with pytest.raises(Exception):
h.project(0, 1, 2, 3, 4, 5)
# duplicated
with pytest.raises(Exception):
h.project(0, 0)
with pytest.raises(Exception):
h.project("A", "A")
# wrong/mixed types
with pytest.raises(Exception):
h.project(2, "A")
with pytest.raises(Exception):
h.project(True, "A")
# cannot found
with pytest.raises(Exception):
h.project(-1, 9)
with pytest.raises(Exception):
h.project("G", "H")
def test_named_index_access():
"""
Test named index access -- whether NamedHist can be accessed by index.
"""
h = NamedHist(
axis.Regular(10, -5, 5, name="Ones"),
axis.Regular(10, -5, 5, name="Twos"),
axis.StrCategory(["hi", "hello"], name="Greet"),
axis.Boolean(name="Yes"),
axis.Integer(0, 10, name="Int"),
).fill(
Ones=np.ones(10),
Twos=np.ones(10) * 2,
Greet=["hi"] * 8 + ["hello"] * 2,
Yes=[True] * 6 + [False] * 4,
Int=np.ones(10),
)
assert h[1j, 2j, "hi", True, 1] == 6
assert (
h[
{
"Ones": 6,
"Twos": 7,
"Greet": bh.loc("hi"),
"Yes": bh.loc(True),
"Int": bh.loc(1),
}
]
== 6
)
assert h[0j + 1, -2j + 4, "hi", True, 1] == 6
assert (
h[
{
"Ones": bh.loc(1, 0),
"Twos": bh.loc(3, -1),
"Greet": "hi",
"Yes": True,
"Int": 1,
}
]
== 6
)
with pytest.raises(Exception):
h[0 : bh.loc(1, 0), 1 : bh.loc(3, -1), 2:"hi", 3:True, 4:1]
with pytest.raises(Exception):
h[0 : bh.loc(1, 0), 1 : bh.loc(3, -1), "Greet":"hi", 3:True, 4:1]
assert h[0:10:2j, 0:5:5j, "hello", False, 5]
assert len(h[::2j, 0:5, :, :, :].axes[1]) == 5
assert len(h[:, 0:5, :, :, :].axes[1]) == 5
# wrong loc shortcut
with pytest.raises(Exception):
h[0.5, 1 / 2, "hi", True, 1]
with pytest.raises(Exception):
h[0.5 + 1j, 1 / 2 + 1j, "hi", True, 1]
# wrong rebin shortcut
with pytest.raises(Exception):
h[0:10:0.2j, 0:5:0.5j, "hello", False, 5]
with pytest.raises(Exception):
h[0 : 10 : 1 + 2j, 0 : 5 : 1 + 5j, "hello", False, 5]
with pytest.raises(Exception):
h[0:10:20j, 0:5:10j, "hello", False, 5]
def test_named_transform_proxy():
"""
Test named transform proxy -- whether NamedHist transform proxy works properly.
"""
h0 = NamedHist.new.Sqrt(3, 4, 25, name="x").Sqrt(4, 25, 81, name="y").Double()
h0.fill(x=[5, 10, 17, 17], y=[26, 37, 50, 65])
assert h0[0, 0] == 1
assert h0[1, 1] == 1
assert h0[2, 2] == 1
assert h0[2, 3] == 1
# based on existing axis
with pytest.raises(Exception):
NamedHist.new.Regular(3, 4, 25, name="x").Sqrt()
# wrong value
with pytest.raises(Exception):
NamedHist.new.Sqrt(3, -4, 25, name="x")
h1 = (
NamedHist.new.Log(4, 1, 10_000, name="x")
.Log(3, 1 / 1_000, 1, name="y")
.Double()
)
h1.fill(x=[2, 11, 101, 1_001], y=[1 / 999, 1 / 99, 1 / 9, 1 / 9])
assert h1[0, 0] == 1
assert h1[1, 1] == 1
assert h1[2, 2] == 1
assert h1[3, 2] == 1
# wrong arguments
with pytest.raises(TypeError):
NamedHist.new.Reg(4, 1, 10_000, name="x").Log()
# wrong value
with pytest.raises(ValueError):
NamedHist.new.Log(3, -1, 10_000, name="x")
h2 = (
NamedHist.new.Pow(24, 1, 5, power=2, name="x")
.Pow(124, 1, 5, power=3, name="y")
.Double()
)
h2.fill(x=[1, 2, 3, 4], y=[1, 2, 3, 4])
assert h2[0, 0] == 1
assert h2[3, 7] == 1
assert h2[8, 26] == 1
assert h2[15, 63] == 1
# based on existing axis
with pytest.raises(TypeError):
NamedHist.new.Reg(24, 1, 5, name="x").Pow(2)
# wrong value
with pytest.raises(ValueError):
NamedHist.new.Pow(24, -1, 5, power=1 / 2, name="x")
# lack args
with pytest.raises(TypeError):
NamedHist.new.Pow(24, 1, 5, name="x")
ftype = ctypes.CFUNCTYPE(ctypes.c_double, ctypes.c_double)
h3 = (
NamedHist.new.Func(
4, 1, 5, forward=ftype(math.log), inverse=ftype(math.exp), name="x"
)
.Func(4, 1, 5, forward=ftype(np.log), inverse=ftype(np.exp), name="y")
.Double()
)
h3.fill(x=[1, 2, 3, 4], y=[1, 2, 3, 4])
assert h3[0, 0] == 1
assert h3[1, 1] == 1
assert h3[2, 2] == 1
assert h3[3, 3] == 1
# based on existing axis
with pytest.raises(TypeError):
NamedHist.new.Reg(24, 1, 5, name="x").Func(ftype(math.log), ftype(math.exp))
# Uncatchable warning
# assert NamedHist.new.Func(
# 4, -1, 5, name="x", forward=ftype(math.log), inverse=ftype(math.log)
# )
with pytest.raises(ValueError):
with pytest.warns(RuntimeWarning):
NamedHist.new.Func(
4, -1, 5, name="x", forward=ftype(np.log), inverse=ftype(np.log)
)
# lack args
with pytest.raises(TypeError):
NamedHist.new.Func(4, 1, 5, name="x")
def test_named_hist_proxy():
"""
Test named hist proxy -- whether NamedHist hist proxy works properly.
"""
h = NamedHist.new.Reg(10, 0, 1, name="x").Double().fill(x=[0.5, 0.5])
assert h[0.5j] == 2
assert type(h) is NamedHist
with pytest.raises(AttributeError):
NamedHist().new
h = (
NamedHist.new.Reg(10, 0, 1, name="x")
.Reg(10, 0, 1, name="y")
.Double()
.fill(x=[0.5, 0.5], y=[0.2, 0.6])
)
assert h[0.5j, 0.2j] == 1
assert h[bh.loc(0.5), bh.loc(0.6)] == 1
h = NamedHist.new.Bool(name="x").Double().fill(x=[True, True])
assert h[bh.loc(True)] == 2
h = (
NamedHist.new.Bool(name="x")
.Bool(name="y")
.Double()
.fill(x=[True, True], y=[True, False])
)
assert h[True, True] == 1
assert h[True, False] == 1
h = NamedHist.new.Var(range(10), name="x").Double().fill(x=[5, 5])
assert h[5j] == 2
h = (
NamedHist.new.Var(range(10), name="x")
.Var(range(10), name="y")
.Double()
.fill(x=[5, 5], y=[2, 6])
)
assert h[5j, 2j] == 1
assert h[bh.loc(5), bh.loc(6)] == 1
h = NamedHist.new.Int(0, 10, name="x").Double().fill(x=[5, 5])
assert h[5j] == 2
h = (
NamedHist.new.Int(0, 10, name="x")
.Int(0, 10, name="y")
.Double()
.fill(x=[5, 5], y=[2, 6])
)
assert h[5j, 2j] == 1
assert h[bh.loc(5), bh.loc(6)] == 1
h = NamedHist.new.IntCat(range(10), name="x").Double().fill(x=[5, 5])
assert h[5j] == 2
h = (
NamedHist.new.IntCat(range(10), name="x")
.IntCat(range(10), name="y")
.Double()
.fill(x=[5, 5], y=[2, 6])
)
assert h[5j, 2j] == 1
assert h[bh.loc(5), bh.loc(6)] == 1
h = NamedHist.new.StrCat("TF", name="x").Double().fill(x=["T", "T"])
assert h["T"] == 2
h = (
NamedHist.new.StrCat("TF", name="x")
.StrCat("TF", name="y")
.Double()
.fill(x=["T", "T"], y=["T", "F"])
)
assert h["T", "T"] == 1
assert h["T", "F"] == 1
def test_named_density():
"""
Test named density -- whether NamedHist density work properly.
"""
for data in range(10, 20, 10):
h = NamedHist(axis.Regular(10, -3, 3, name="x")).fill(x=np.random.randn(data))
assert pytest.approx(sum(h.density()), 2) == pytest.approx(10 / 6, 2)
def test_named_axestuple():
"""
Test named axes tuple -- whether NamedHist axes tuple work properly.
"""
h = NamedHist(
axis.Regular(20, 0, 12, name="A"),
axis.Regular(10, 1, 3, name="B", label="Beta"),
axis.Regular(15, 3, 5, name="C"),
axis.Regular(5, 3, 2, name="D", label="Axis 3"),
)
assert h.axes.name == ("A", "B", "C", "D")
assert h.axes.label == ("A", "Beta", "C", "Axis 3")
assert h.axes[0].size == 20
assert h.axes["A"].size == 20
assert h.axes[1].size == 10
assert h.axes["B"].size == 10
assert h.axes[2].size == 15
assert h.axes[:2].size == (20, 10)
assert h.axes["A":"B"].size == (20,)
assert h.axes[:"B"].size == (20,)
assert h.axes["B":].size == (10, 15, 5)
|
py | b401dd68c95ece7a2d41182990318f7e46c22c8f | #!/usr/bin/env python
import json
from oic.utils.client_management import CDB
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-l', dest='list', action='store_true')
parser.add_argument('-a', dest='add')
parser.add_argument('-d', dest='delete')
parser.add_argument(dest="config")
args = parser.parse_args()
# Client data base
cdb = CDB(args.config)
if args.list:
for key, val in cdb.items():
print('{}:{}'.format(key, val['redirect_uris']))
if args.add:
fp = open(args.add)
spec = json.load(fp)
cli_info = cdb.create(**spec)
print(cli_info)
if args.delete:
del cdb[args.delete]
|
py | b401dd6933fa38bfa06ceb9c78900f633bfd762e | # -*- coding: utf-8 -*-
import codecs
import numpy as np
from nltk.corpus import wordnet as wn
from tqdm import tqdm
from numpy import linalg as LA
import argparse
import json
from numpy import linalg as LA
import operator
from collections import defaultdict
from de_lemmatizer.lemmatizer import LOOKUP
import json
import random
from demorphy import Analyzer
from random import shuffle
import ast
import itertools
random.seed(10)
parser = argparse.ArgumentParser()
parser.add_argument("--input", help="input file")
parser.add_argument("--output", help="output file")
parser.add_argument("--lang", default='de', help="output file")
parser.add_argument("--win", default=4, help="window size (for each size)")
parser.add_argument("--lemmatize", help="type of lemmatization")
parser.add_argument("--plus", action='store_true', help="add manual tags")
parser.add_argument("--rand", action='store_true', help="choose one of the options at random")
parser.add_argument("--hybrid", action='store_true', help="use hybrid dictionary")
parser.add_argument("--first", action='store_true', help="use first possible lemma")
parser.add_argument("--different", action='store_true', help="use different lemma for each instance")
def print_pairs(lang, vocab, to_male_dict=None, to_female_dict=None, to_lemma_dict=None, to_neut_dict=None, to_options_dict=None):
"""
print pairs into file, required for training word2vecf
supports the different types of lemmatizations, as discussed in the paper
"""
print ('printing output ...')
j = 0
with codecs.open(args.input, 'r', 'utf-8') as f, codecs.open(args.output, 'w', 'utf-8') as f_out:
for l in tqdm(f):
l = l.strip().lower().split()
for i,w in enumerate(l):
if w in vocab:
start = max(0, i - args.win)
end = i + args.win + 1
for c in l[start:end]:
if w != c and c in vocab:
if lang == 'it':
if c in to_male_dict and to_male_dict[c] == None or c in to_female_dict and to_female_dict[c] == None:
continue
if args.lemmatize == 'basic':
if args.different:
if c in to_options_dict:
opts = to_options_dict[c]
shuffle(opts)
f_out.write(w + ' ' + opts[0] + '\n')
else:
f_out.write(w + ' ' + c + '\n')
else:
f_out.write(w + ' ' + to_lemma_dict.get(c, c) + '\n')
elif args.lemmatize == 'to_masc':
f_out.write(w + ' ' + to_male_dict.get(c, c) + '\n')
elif args.lemmatize == 'to_fem':
f_out.write(w + ' ' + to_female_dict.get(c, c) + '\n')
elif not args.lemmatize:
f_out.write(w + ' ' + c + '\n')
if lang == 'de':
if args.lemmatize == 'basic':
if args.different:
if c in to_options_dict:
opts = to_options_dict[c]
shuffle(opts)
f_out.write(w + ' ' + opts[0] + '\n')
else:
f_out.write(w + ' ' + c + '\n')
else:
f_out.write(w + ' ' + to_lemma_dict.get(c, c) + '\n')
elif args.lemmatize == 'to_masc':
f_out.write(w + ' ' + to_male_dict.get(c, c) + '\n')
elif args.lemmatize == 'to_fem':
if c in to_female_dict and to_female_dict[c] == None:
continue
f_out.write(w + ' ' + to_female_dict.get(c, c) + '\n')
elif args.lemmatize == 'to_neut':
if c in to_neut_dict and to_neut_dict[c] == None:
continue
f_out.write(w + ' ' + to_neut_dict.get(c, c) + '\n')
elif not args.lemmatize:
f_out.write(w + ' ' + c + '\n')
if lang == 'en':
f_out.write(w + ' ' + c + '\n')
print ('done')
def extract_features(anlyss):
atts = [a for a in dir(anlyss) if not a.startswith('__')]
for att in atts:
print('att', att)
print(getattr(anlyss, att))
def extract_lemmas(lang):
"""
Returns dictionaries of the lemmas and words in language 'lang' (with the respective features)
"""
if lang == 'it':
words = defaultdict(list)
lemmas = defaultdict(list)
with open('../data/lemmatizer_unique.txt', 'r', encoding='latin-1') as f:
for l in f:
l = l.strip().split('\t')
if len(l) == 3:
atts = l[2].split(':')
if len(atts) > 1:
features = set(atts[1].split('+'))
else:
features = None
pos = set(atts[0].split('-'))
words[l[0]].append((l[1], pos, features))
lemmas[l[1]].append((l[0], pos, features))
if lang =='de':
analyzer = Analyzer(char_subs_allowed=True)
words = defaultdict(list)
lemmas = defaultdict(list)
for w in vocab:
try:
s = analyzer.analyze(w)
except:
continue
else:
if len(s) == 0:
continue
for anlyss in s:
features = ast.literal_eval(str(anlyss))
words[w].append((features['LEMMA'], features))
lemmas[features['LEMMA']].append((w, features))
return words, lemmas
def create_vocab():
i = 0
freq = defaultdict(int)
with open(args.input, 'r', encoding="utf-8") as f:
for l in tqdm(f):
for w in l.strip().lower().split():
freq[w] += 1
vocab = {}
for w in freq:
if freq[w] > 50:
vocab[w] = freq[w]
w2i = {w: i for i, w in enumerate(vocab)}
return vocab, w2i
def return_oppos_gender(word, words, lemmas, to_gender, from_gender, lang):
"""
Returns a word with the opposite gender (with same other features)
"""
options = []
if lang == 'it':
for (lemma, pos, feat) in words[word]:
if feat and from_gender in feat:
#this is from-gender according to feat
for (w_new, pos_new, feat_new) in lemmas[lemma]:
if pos == pos_new and feat.union({to_gender}).difference({from_gender}) == feat_new:
options.append(w_new)
if len(pos) > 1 and from_gender.upper() in pos:
#this is from-gender according to pos
for (w_new, pos_new, feat_new) in lemmas[lemma]:
if feat == feat_new and pos.union({to_gender.upper()}).difference({from_gender.upper()}) == pos_new:
options.append(w_new)
if word in manual_mapping_gender:
options = manual_mapping_gender[word]
if lang == 'de':
for lemma, features in words[word]:
if 'GENDER' in features and features['GENDER'] in from_gender:
for w_new, features_new in lemmas[lemma]:
if 'GENDER' in features_new and features_new['GENDER'] == to_gender \
and len(features_new) == len(features):
valid = []
for attr in ['CATEGORY', 'NUMERUS', 'PERSON', 'PTB_TAG', 'TENSE']:
if attr in features:
if attr not in features_new:
valid.append(False)
elif features[attr] != features_new[attr]:
valid.append(False)
if False not in valid:
options.append(w_new)
if len(options) == 0:
return None
if len(options) == 1:
return options[0]
if word in options:
return word
options_common = list(set([opt for opt in options if opt in vocab]))
if len(options_common) == 0 and word in vocab:
return options[0]
options = options_common
# If need to chose at random - do so
if args.rand:
shuffle(options)
return options[0]
# else: choose according to frequency
freqs = {}
for opt in options:
freqs[vocab[opt]] = opt
# return the option with the closest freq to word
return freqs[min(freqs, key=lambda x:abs(x-vocab[word]))]
def is_gendered(word, words, from_gender, lang):
"""
Checks if a given word is gendered or not
"""
if lang == 'it':
for (lemma, pos, feat) in words[word]:
if 'NOUN' in pos:
return False
if feat and from_gender in feat:
return True
if len(pos) > 1 and from_gender.upper() in pos:
return True
if lang =='de':
for lemma, features in words[word]:
if features['CATEGORY'] == 'NN':
return False
if 'GENDER' in features and features['GENDER'] in from_gender:
return True
return False
def extract_all_genders(word, words):
genders = []
for lemma, features in words[word]:
if features['CATEGORY'] == 'NN':
return []
if 'GENDER' in features and features['GENDER'] not in genders and features['GENDER'] in ['fem', 'masc', 'neut']:
genders.append(features['GENDER'])
return list(set(genders))
#############################################################################################################
#### functions for creating the different kinds of dictionaries, for the different lemmatization methods ####
#############################################################################################################
def create_gender_dict(words, lemmas, to_gender, lang):
gender_dict = {}
if lang == 'it':
if to_gender == 'f':
from_gender = 'm'
elif to_gender == 'm':
from_gender = 'f'
else:
raise ValueError('gender is not valid')
if lang == 'de':
if to_gender not in ['fem', 'masc', 'neut']:
raise ValueError('gender is not valid')
from_gender = ['masc', 'fem', 'neut']
from_gender.remove(to_gender)
for w in tqdm(words):
if w in vocab and is_gendered(w, words, from_gender, lang):
value = return_oppos_gender(w, words, lemmas, to_gender, from_gender, lang)
if value:
gender_dict[w] = value
else:
gender_dict[w] = None
return gender_dict
def create_hybrid_dict(words, to_gender):
gender_dict = {}
if to_gender not in ['fem', 'masc', 'neut']:
raise ValueError('gender is not valid')
for w in tqdm(words):
if w in vocab:
genders = extract_all_genders(w, words)
if len(genders) == 0:
continue
if len(genders) == 1 and to_gender in genders:
continue
if len(genders) > 1:
if to_gender in genders:
#take lemma
gender_dict[w] = to_lemma_dict[w]
else:
if to_gender == 'masc':
to_gender_dict = to_male_dict
elif to_gender == 'fem':
to_gender_dict = to_female_dict
elif to_gender == 'neut':
to_gender_dict = to_neut_dict
gender_dict[w] = to_gender_dict[w]
return gender_dict
def create_lemma_dict(words, lang):
lemma_dict = {}
for w in tqdm(words):
if w in vocab:
options = []
if lang == 'it':
for (lemma, pos, feat) in words[w]:
options.append(lemma)
if lang == 'de':
for lemma, features in words[w]:
options.append(lemma)
if len(options) > 0:
if args.first:
options = sorted(options)
else:
shuffle(options)
value = options[0]
lemma_dict[w] = value
else:
lemma_dict[w] = None
if lang =='it' and args.plus:
if w in manual_mapping_lemma:
lemma_dict[w] = manual_mapping_lemma[w]
return lemma_dict
def create_options_dict(words, lang):
lemma_dict = {}
count = 0
for w in tqdm(words):
if w in vocab:
options = []
if lang == 'it':
for (lemma, pos, feat) in words[w]:
options.append(lemma)
if lang == 'de':
for lemma, features in words[w]:
options.append(lemma)
if len(set(options)) > 0:
lemma_dict[w] = list(set(options))
if lang =='it' and args.plus:
if w in manual_mapping_lemma:
lemma_dict[w] = [manual_mapping_lemma[w]]
return lemma_dict
if __name__ == "__main__":
args = parser.parse_args()
with open('../data/mappings/manual_mapping_gender.json', 'r') as datafile:
manual_mapping_gender = json.load(datafile)
with open('../data/mappings/manual_mapping_lemma.json', 'r') as datafile:
manual_mapping_lemma = json.load(datafile)
vocab, w2i = create_vocab()
if args.lang == 'it':
words, lemmas = extract_lemmas(args.lang)
to_male_dict = create_gender_dict(words, lemmas, 'm', args.lang)
to_female_dict = create_gender_dict(words, lemmas, 'f', args.lang)
to_lemma_dict = create_lemma_dict(words, args.lang)
to_options_dict = create_options_dict(words, args.lang)
print_pairs(args.lang, vocab, to_male_dict, to_female_dict, to_lemma_dict, to_options_dict=to_options_dict)
if args.lang == 'de':
words, lemmas = extract_lemmas(args.lang)
to_male_dict = create_gender_dict(words, lemmas, 'masc', args.lang)
to_female_dict = create_gender_dict(words, lemmas, 'fem', args.lang)
to_neut_dict = create_gender_dict(words, lemmas, 'neut', args.lang)
to_lemma_dict = create_lemma_dict(words, args.lang)
to_options_dict = create_options_dict(words, args.lang)
to_male_dict_hybrid = create_hybrid_dict(words, 'masc')
to_female_dict_hybrid = create_hybrid_dict(words, 'fem')
to_neut_dict_hybrid = create_hybrid_dict(words, 'neut')
if args.hybrid:
print_pairs(args.lang, vocab, to_male_dict_hybrid, to_female_dict_hybrid, to_lemma_dict, to_neut_dict_hybrid)
else:
print_pairs(args.lang, vocab, to_male_dict, to_female_dict, to_lemma_dict, to_neut_dict, to_options_dict=to_options_dict)
if args.lang == 'en':
print_pairs(args.lang, vocab)
|
py | b401dd8a2c745f9b6ced1a764f5f7ccb8eaa065f | from django import forms
from .models import Order
class CartForm(forms.Form):
quantity = forms.IntegerField(initial='1')
product_id = forms.IntegerField(widget=forms.HiddenInput)
def __init__(self, request, *args, **kwargs):
self.request = request
super(CartForm, self).__init__(*args, **kwargs)
class CheckoutForm(forms.ModelForm):
class Meta:
model = Order
exclude = ('paid',)
widgets = {
'address': forms.Textarea(attrs={'row': 5, 'col': 8}),
}
|
py | b401de0eab8d7d1cadd1119b21d9f1dc0302276d | _base_ = [
'_base_/models/fpn_r50.py', '_base_/datasets/ade20k.py',
'_base_/default_runtime.py', '_base_/schedules/schedule_80k.py'
]
model = dict(
type='EncoderDecoder',
pretrained='pretrained/pcpvt_small.pth',
backbone=dict(
type='pcpvt_small_v0',
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[64, 128, 320, 512],
out_channels=256,
num_outs=4),
decode_head=dict(num_classes=150),
)
optimizer = dict(type='AdamW', lr=0.0001, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.