ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a52d03ee53c7628d7ee0b595e01ab2578f97812 | from .base_options import BaseOptions
class TrainOptions(BaseOptions):
def initialize(self, parser):
parser = BaseOptions.initialize(self, parser)
parser.add_argument('--display_freq', type=int, default=400, help='frequency of showing training results on screen')
parser.add_argument('--display_ncols', type=int, default=4, help='if positive, display all images in a single visdom web panel with certain number of images per row.')
parser.add_argument('--display_id', type=int, default=1, help='window id of the web display')
parser.add_argument('--display_server', type=str, default="http://localhost", help='visdom server of the web display')
parser.add_argument('--display_env', type=str, default='main', help='visdom display environment name (default is "main")')
parser.add_argument('--display_port', type=int, default=8097, help='visdom port of the web display')
parser.add_argument('--update_html_freq', type=int, default=1000, help='frequency of saving training results to html')
parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console')
parser.add_argument('--save_latest_freq', type=int, default=5000, help='frequency of saving the latest results')
parser.add_argument('--save_epoch_freq', type=int, default=5, help='frequency of saving checkpoints at the end of epochs')
parser.add_argument('--save_by_iter', action='store_true', help='whether saves model by iteration')
parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model')
parser.add_argument('--epoch_count', type=int, default=1, help='the starting epoch count, we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>, ...')
parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc')
parser.add_argument('--niter', type=int, default=100, help='# of iter at starting learning rate')
parser.add_argument('--niter_decay', type=int, default=100, help='# of iter to linearly decay learning rate to zero')
parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam')
parser.add_argument('--g_lr', type=float, default=1e-5, help='generator initial learning rate for adam')
parser.add_argument('--d_lr', type=float, default=2e-4, help='discriminator initial learning rate for adam')
parser.add_argument('--no_lsgan', action='store_true', help='do *not* use least square GAN, if false, use vanilla GAN')
parser.add_argument('--pool_size', type=int, default=50, help='the size of image buffer that stores previously generated images')
parser.add_argument('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/')
parser.add_argument('--lr_policy', type=str, default='lambda', help='learning rate policy: lambda|step|plateau|cosine')
parser.add_argument('--lr_decay_iters', type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations')
parser.add_argument('--g_weight_decay', type=float, default=1e-3, help='weight_decay on generator')
parser.add_argument('--g_grad_clip', type=float, default=1.0, help='gradient clipping on generator')
parser.add_argument('--d_grad_clip', type=float, default=1.0, help='gradient clipping on discriminator')
self.isTrain = True
return parser
|
py | 1a52d05cec3d2cf1c6ad7ba2af59d0fea0cd1fe0 | import os
import shutil
import tempfile
import textwrap
import salt.config
import salt.loader
import salt.modules.cmdmod as cmdmod
import salt.modules.config as configmod
import salt.modules.file as filemod
import salt.utils.data
import salt.utils.files
import salt.utils.platform
import salt.utils.stringutils
from salt.exceptions import CommandExecutionError, SaltInvocationError
from salt.ext import six
from salt.utils.jinja import SaltCacheLoader
from tests.support.helpers import with_tempfile
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import DEFAULT, MagicMock, Mock, mock_open, patch
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import TestCase, skipIf
try:
import pytest
except ImportError:
pytest = None
if salt.utils.platform.is_windows():
import salt.modules.win_file as win_file
import salt.utils.win_dacl as win_dacl
SED_CONTENT = """test
some
content
/var/lib/foo/app/test
here
"""
class DummyStat:
st_mode = 33188
st_ino = 115331251
st_dev = 44
st_nlink = 1
st_uid = 99200001
st_gid = 99200001
st_size = 41743
st_atime = 1552661253
st_mtime = 1552661253
st_ctime = 1552661253
class FileReplaceTestCase(TestCase, LoaderModuleMockMixin):
def setup_loader_modules(self):
return {
filemod: {
"__salt__": {
"config.manage_mode": configmod.manage_mode,
"cmd.run": cmdmod.run,
"cmd.run_all": cmdmod.run_all,
},
"__opts__": {
"test": False,
"file_roots": {"base": "tmp"},
"pillar_roots": {"base": "tmp"},
"cachedir": "tmp",
"grains": {},
},
"__grains__": {"kernel": "Linux"},
"__utils__": {
"files.is_text": MagicMock(return_value=True),
"stringutils.get_diff": salt.utils.stringutils.get_diff,
},
}
}
MULTILINE_STRING = textwrap.dedent(
"""\
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nam rhoncus
enim ac bibendum vulputate. Etiam nibh velit, placerat ac auctor in,
lacinia a turpis. Nulla elit elit, ornare in sodales eu, aliquam sit
amet nisl.
Fusce ac vehicula lectus. Vivamus justo nunc, pulvinar in ornare nec,
sollicitudin id sem. Pellentesque sed ipsum dapibus, dapibus elit id,
malesuada nisi.
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec
venenatis tellus eget massa facilisis, in auctor ante aliquet. Sed nec
cursus metus. Curabitur massa urna, vehicula id porttitor sed, lobortis
quis leo.
"""
)
def setUp(self):
self.tfile = tempfile.NamedTemporaryFile(delete=False, mode="w+")
self.tfile.write(self.MULTILINE_STRING)
self.tfile.close()
def tearDown(self):
os.remove(self.tfile.name)
del self.tfile
def test_replace(self):
filemod.replace(self.tfile.name, r"Etiam", "Salticus", backup=False)
with salt.utils.files.fopen(self.tfile.name, "r") as fp:
self.assertIn("Salticus", salt.utils.stringutils.to_unicode(fp.read()))
def test_replace_append_if_not_found(self):
"""
Check that file.replace append_if_not_found works
"""
args = {
"pattern": "#*baz=(?P<value>.*)",
"repl": "baz=\\g<value>",
"append_if_not_found": True,
}
base = os.linesep.join(["foo=1", "bar=2"])
# File ending with a newline, no match
with tempfile.NamedTemporaryFile("w+b", delete=False) as tfile:
tfile.write(salt.utils.stringutils.to_bytes(base + os.linesep))
tfile.flush()
filemod.replace(tfile.name, **args)
expected = os.linesep.join([base, "baz=\\g<value>"]) + os.linesep
with salt.utils.files.fopen(tfile.name) as tfile2:
self.assertEqual(salt.utils.stringutils.to_unicode(tfile2.read()), expected)
os.remove(tfile.name)
# File not ending with a newline, no match
with tempfile.NamedTemporaryFile("w+b", delete=False) as tfile:
tfile.write(salt.utils.stringutils.to_bytes(base))
tfile.flush()
filemod.replace(tfile.name, **args)
with salt.utils.files.fopen(tfile.name) as tfile2:
self.assertEqual(salt.utils.stringutils.to_unicode(tfile2.read()), expected)
os.remove(tfile.name)
# A newline should not be added in empty files
with tempfile.NamedTemporaryFile("w+b", delete=False) as tfile:
pass
filemod.replace(tfile.name, **args)
expected = args["repl"] + os.linesep
with salt.utils.files.fopen(tfile.name) as tfile2:
self.assertEqual(salt.utils.stringutils.to_unicode(tfile2.read()), expected)
os.remove(tfile.name)
# Using not_found_content, rather than repl
with tempfile.NamedTemporaryFile("w+b", delete=False) as tfile:
tfile.write(salt.utils.stringutils.to_bytes(base))
tfile.flush()
args["not_found_content"] = "baz=3"
expected = os.linesep.join([base, "baz=3"]) + os.linesep
filemod.replace(tfile.name, **args)
with salt.utils.files.fopen(tfile.name) as tfile2:
self.assertEqual(salt.utils.stringutils.to_unicode(tfile2.read()), expected)
os.remove(tfile.name)
# not appending if matches
with tempfile.NamedTemporaryFile("w+b", delete=False) as tfile:
base = os.linesep.join(["foo=1", "baz=42", "bar=2"])
tfile.write(salt.utils.stringutils.to_bytes(base))
tfile.flush()
expected = base
filemod.replace(tfile.name, **args)
with salt.utils.files.fopen(tfile.name) as tfile2:
self.assertEqual(salt.utils.stringutils.to_unicode(tfile2.read()), expected)
def test_backup(self):
fext = ".bak"
bak_file = "{}{}".format(self.tfile.name, fext)
filemod.replace(self.tfile.name, r"Etiam", "Salticus", backup=fext)
self.assertTrue(os.path.exists(bak_file))
os.unlink(bak_file)
def test_nobackup(self):
fext = ".bak"
bak_file = "{}{}".format(self.tfile.name, fext)
filemod.replace(self.tfile.name, r"Etiam", "Salticus", backup=False)
self.assertFalse(os.path.exists(bak_file))
def test_dry_run(self):
before_ctime = os.stat(self.tfile.name).st_mtime
filemod.replace(self.tfile.name, r"Etiam", "Salticus", dry_run=True)
after_ctime = os.stat(self.tfile.name).st_mtime
self.assertEqual(before_ctime, after_ctime)
def test_show_changes(self):
ret = filemod.replace(self.tfile.name, r"Etiam", "Salticus", show_changes=True)
self.assertTrue(ret.startswith("---")) # looks like a diff
def test_noshow_changes(self):
ret = filemod.replace(self.tfile.name, r"Etiam", "Salticus", show_changes=False)
self.assertIsInstance(ret, bool)
def test_re_str_flags(self):
# upper- & lower-case
filemod.replace(
self.tfile.name, r"Etiam", "Salticus", flags=["MULTILINE", "ignorecase"]
)
def test_re_int_flags(self):
filemod.replace(self.tfile.name, r"Etiam", "Salticus", flags=10)
def test_numeric_repl(self):
"""
This test covers cases where the replacement string is numeric, and the
CLI parser yamlifies it into a numeric type. If not converted back to a
string type in file.replace, a TypeError occurs when the replacemen is
attempted. See https://github.com/saltstack/salt/issues/9097 for more
information.
"""
filemod.replace(self.tfile.name, r"Etiam", 123)
def test_search_only_return_true(self):
ret = filemod.replace(self.tfile.name, r"Etiam", "Salticus", search_only=True)
self.assertIsInstance(ret, bool)
self.assertEqual(ret, True)
def test_search_only_return_false(self):
ret = filemod.replace(self.tfile.name, r"Etian", "Salticus", search_only=True)
self.assertIsInstance(ret, bool)
self.assertEqual(ret, False)
class FileCommentLineTestCase(TestCase, LoaderModuleMockMixin):
def setup_loader_modules(self):
return {
filemod: {
"__salt__": {
"config.manage_mode": configmod.manage_mode,
"cmd.run": cmdmod.run,
"cmd.run_all": cmdmod.run_all,
},
"__opts__": {
"test": False,
"file_roots": {"base": "tmp"},
"pillar_roots": {"base": "tmp"},
"cachedir": "tmp",
"grains": {},
},
"__grains__": {"kernel": "Linux"},
"__utils__": {
"files.is_text": MagicMock(return_value=True),
"stringutils.get_diff": salt.utils.stringutils.get_diff,
},
}
}
MULTILINE_STRING = textwrap.dedent(
"""\
Lorem
ipsum
#dolor
"""
)
MULTILINE_STRING = os.linesep.join(MULTILINE_STRING.splitlines())
def setUp(self):
self.tfile = tempfile.NamedTemporaryFile(delete=False, mode="w+")
self.tfile.write(self.MULTILINE_STRING)
self.tfile.close()
def tearDown(self):
os.remove(self.tfile.name)
del self.tfile
def test_comment_line(self):
filemod.comment_line(self.tfile.name, "^ipsum")
with salt.utils.files.fopen(self.tfile.name, "r") as fp:
filecontent = fp.read()
self.assertIn("#ipsum", filecontent)
def test_comment(self):
filemod.comment(self.tfile.name, "^ipsum")
with salt.utils.files.fopen(self.tfile.name, "r") as fp:
filecontent = fp.read()
self.assertIn("#ipsum", filecontent)
def test_comment_different_character(self):
filemod.comment_line(self.tfile.name, "^ipsum", "//")
with salt.utils.files.fopen(self.tfile.name, "r") as fp:
filecontent = fp.read()
self.assertIn("//ipsum", filecontent)
def test_comment_not_found(self):
filemod.comment_line(self.tfile.name, "^sit")
with salt.utils.files.fopen(self.tfile.name, "r") as fp:
filecontent = fp.read()
self.assertNotIn("#sit", filecontent)
self.assertNotIn("sit", filecontent)
def test_uncomment(self):
filemod.uncomment(self.tfile.name, "dolor")
with salt.utils.files.fopen(self.tfile.name, "r") as fp:
filecontent = fp.read()
self.assertIn("dolor", filecontent)
self.assertNotIn("#dolor", filecontent)
class FileBlockReplaceTestCase(TestCase, LoaderModuleMockMixin):
def setup_loader_modules(self):
if salt.utils.platform.is_windows():
grains = {"kernel": "Windows"}
else:
grains = {"kernel": "Linux"}
opts = {
"test": False,
"file_roots": {"base": "tmp"},
"pillar_roots": {"base": "tmp"},
"cachedir": "tmp",
"grains": grains,
}
ret = {
filemod: {
"__salt__": {
"config.manage_mode": MagicMock(),
"cmd.run": cmdmod.run,
"cmd.run_all": cmdmod.run_all,
},
"__opts__": opts,
"__grains__": grains,
"__utils__": {
"files.is_binary": MagicMock(return_value=False),
"files.get_encoding": MagicMock(return_value="utf-8"),
"stringutils.get_diff": salt.utils.stringutils.get_diff,
},
}
}
if salt.utils.platform.is_windows():
ret.update(
{
win_dacl: {"__opts__": opts},
win_file: {"__utils__": {"dacl.check_perms": win_dacl.check_perms}},
}
)
return ret
MULTILINE_STRING = textwrap.dedent(
"""\
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nam rhoncus
enim ac bibendum vulputate. Etiam nibh velit, placerat ac auctor in,
lacinia a turpis. Nulla elit elit, ornare in sodales eu, aliquam sit
amet nisl.
Fusce ac vehicula lectus. Vivamus justo nunc, pulvinar in ornare nec,
sollicitudin id sem. Pellentesque sed ipsum dapibus, dapibus elit id,
malesuada nisi.
first part of start line // START BLOCK : part of start line not removed
to be removed
first part of end line // END BLOCK : part of end line not removed
#-- START BLOCK UNFINISHED
#-- START BLOCK 1
old content part 1
old content part 2
#-- END BLOCK 1
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec
venenatis tellus eget massa facilisis, in auctor ante aliquet. Sed nec
cursus metus. Curabitur massa urna, vehicula id porttitor sed, lobortis
quis leo.
"""
)
MULTILINE_STRING = os.linesep.join(MULTILINE_STRING.splitlines())
def setUp(self):
self.tfile = tempfile.NamedTemporaryFile(
delete=False, prefix="blockrepltmp", mode="w+b"
)
self.tfile.write(salt.utils.stringutils.to_bytes(self.MULTILINE_STRING))
self.tfile.close()
def tearDown(self):
os.remove(self.tfile.name)
del self.tfile
def test_replace_multiline(self):
new_multiline_content = os.linesep.join(
[
"Who's that then?",
"Well, how'd you become king, then?",
"We found them. I'm not a witch.",
"We shall say 'Ni' again to you, if you do not appease us.",
]
)
if salt.utils.platform.is_windows():
check_perms_patch = win_file.check_perms
else:
check_perms_patch = filemod.check_perms
with patch.object(filemod, "check_perms", check_perms_patch):
filemod.blockreplace(
self.tfile.name,
marker_start="#-- START BLOCK 1",
marker_end="#-- END BLOCK 1",
content=new_multiline_content,
backup=False,
append_newline=None,
)
with salt.utils.files.fopen(self.tfile.name, "rb") as fp:
filecontent = fp.read()
self.assertIn(
salt.utils.stringutils.to_bytes(
os.linesep.join(
["#-- START BLOCK 1", new_multiline_content, "#-- END BLOCK 1"]
)
),
filecontent,
)
self.assertNotIn(b"old content part 1", filecontent)
self.assertNotIn(b"old content part 2", filecontent)
def test_replace_append(self):
new_content = "Well, I didn't vote for you."
self.assertRaises(
CommandExecutionError,
filemod.blockreplace,
self.tfile.name,
marker_start="#-- START BLOCK 2",
marker_end="#-- END BLOCK 2",
content=new_content,
append_if_not_found=False,
backup=False,
)
with salt.utils.files.fopen(self.tfile.name, "r") as fp:
self.assertNotIn(
"#-- START BLOCK 2" + "\n" + new_content + "#-- END BLOCK 2",
salt.utils.stringutils.to_unicode(fp.read()),
)
if salt.utils.platform.is_windows():
check_perms_patch = win_file.check_perms
else:
check_perms_patch = filemod.check_perms
with patch.object(filemod, "check_perms", check_perms_patch):
filemod.blockreplace(
self.tfile.name,
marker_start="#-- START BLOCK 2",
marker_end="#-- END BLOCK 2",
content=new_content,
backup=False,
append_if_not_found=True,
)
with salt.utils.files.fopen(self.tfile.name, "rb") as fp:
self.assertIn(
salt.utils.stringutils.to_bytes(
os.linesep.join(
["#-- START BLOCK 2", "{}#-- END BLOCK 2".format(new_content)]
)
),
fp.read(),
)
def test_replace_insert_after(self):
new_content = "Well, I didn't vote for you."
self.assertRaises(
CommandExecutionError,
filemod.blockreplace,
self.tfile.name,
marker_start="#-- START BLOCK 2",
marker_end="#-- END BLOCK 2",
content=new_content,
insert_after_match="not in the text",
backup=False,
)
with salt.utils.files.fopen(self.tfile.name, "r") as fp:
self.assertNotIn(
"#-- START BLOCK 2" + "\n" + new_content + "#-- END BLOCK 2",
salt.utils.stringutils.to_unicode(fp.read()),
)
if salt.utils.platform.is_windows():
check_perms_patch = win_file.check_perms
else:
check_perms_patch = filemod.check_perms
with patch.object(filemod, "check_perms", check_perms_patch):
filemod.blockreplace(
self.tfile.name,
marker_start="#-- START BLOCK 2",
marker_end="#-- END BLOCK 2",
content=new_content,
backup=False,
insert_after_match="malesuada",
)
with salt.utils.files.fopen(self.tfile.name, "rb") as fp:
self.assertIn(
salt.utils.stringutils.to_bytes(
os.linesep.join(
["#-- START BLOCK 2", "{}#-- END BLOCK 2".format(new_content)]
)
),
fp.read(),
)
def test_replace_append_newline_at_eof(self):
"""
Check that file.blockreplace works consistently on files with and
without newlines at end of file.
"""
base = "bar"
args = {
"marker_start": "#start",
"marker_end": "#stop",
"content": "baz",
"append_if_not_found": True,
}
block = os.linesep.join(["#start", "baz#stop"]) + os.linesep
# File ending with a newline
with tempfile.NamedTemporaryFile(mode="w+b", delete=False) as tfile:
tfile.write(salt.utils.stringutils.to_bytes(base + os.linesep))
tfile.flush()
if salt.utils.platform.is_windows():
check_perms_patch = win_file.check_perms
else:
check_perms_patch = filemod.check_perms
with patch.object(filemod, "check_perms", check_perms_patch):
filemod.blockreplace(tfile.name, **args)
expected = os.linesep.join([base, block])
with salt.utils.files.fopen(tfile.name) as tfile2:
self.assertEqual(salt.utils.stringutils.to_unicode(tfile2.read()), expected)
os.remove(tfile.name)
# File not ending with a newline
with tempfile.NamedTemporaryFile(mode="w+b", delete=False) as tfile:
tfile.write(salt.utils.stringutils.to_bytes(base))
tfile.flush()
if salt.utils.platform.is_windows():
check_perms_patch = win_file.check_perms
else:
check_perms_patch = filemod.check_perms
with patch.object(filemod, "check_perms", check_perms_patch):
filemod.blockreplace(tfile.name, **args)
with salt.utils.files.fopen(tfile.name) as tfile2:
self.assertEqual(salt.utils.stringutils.to_unicode(tfile2.read()), expected)
os.remove(tfile.name)
# A newline should not be added in empty files
with tempfile.NamedTemporaryFile(mode="w+b", delete=False) as tfile:
pass
if salt.utils.platform.is_windows():
check_perms_patch = win_file.check_perms
else:
check_perms_patch = filemod.check_perms
with patch.object(filemod, "check_perms", check_perms_patch):
filemod.blockreplace(tfile.name, **args)
with salt.utils.files.fopen(tfile.name) as tfile2:
self.assertEqual(salt.utils.stringutils.to_unicode(tfile2.read()), block)
os.remove(tfile.name)
def test_replace_prepend(self):
new_content = "Well, I didn't vote for you."
self.assertRaises(
CommandExecutionError,
filemod.blockreplace,
self.tfile.name,
marker_start="#-- START BLOCK 2",
marker_end="#-- END BLOCK 2",
content=new_content,
prepend_if_not_found=False,
backup=False,
)
with salt.utils.files.fopen(self.tfile.name, "rb") as fp:
self.assertNotIn(
salt.utils.stringutils.to_bytes(
os.linesep.join(
["#-- START BLOCK 2", "{}#-- END BLOCK 2".format(new_content)]
)
),
fp.read(),
)
if salt.utils.platform.is_windows():
check_perms_patch = win_file.check_perms
else:
check_perms_patch = filemod.check_perms
with patch.object(filemod, "check_perms", check_perms_patch):
filemod.blockreplace(
self.tfile.name,
marker_start="#-- START BLOCK 2",
marker_end="#-- END BLOCK 2",
content=new_content,
backup=False,
prepend_if_not_found=True,
)
with salt.utils.files.fopen(self.tfile.name, "rb") as fp:
self.assertTrue(
fp.read().startswith(
salt.utils.stringutils.to_bytes(
os.linesep.join(
[
"#-- START BLOCK 2",
"{}#-- END BLOCK 2".format(new_content),
]
)
)
)
)
def test_replace_insert_before(self):
new_content = "Well, I didn't vote for you."
self.assertRaises(
CommandExecutionError,
filemod.blockreplace,
self.tfile.name,
marker_start="#-- START BLOCK 2",
marker_end="#-- END BLOCK 2",
content=new_content,
insert_before_match="not in the text",
backup=False,
)
with salt.utils.files.fopen(self.tfile.name, "r") as fp:
self.assertNotIn(
"#-- START BLOCK 2" + "\n" + new_content + "#-- END BLOCK 2",
salt.utils.stringutils.to_unicode(fp.read()),
)
if salt.utils.platform.is_windows():
check_perms_patch = win_file.check_perms
else:
check_perms_patch = filemod.check_perms
with patch.object(filemod, "check_perms", check_perms_patch):
filemod.blockreplace(
self.tfile.name,
marker_start="#-- START BLOCK 2",
marker_end="#-- END BLOCK 2",
content=new_content,
backup=False,
insert_before_match="malesuada",
)
with salt.utils.files.fopen(self.tfile.name, "rb") as fp:
self.assertIn(
salt.utils.stringutils.to_bytes(
os.linesep.join(
["#-- START BLOCK 2", "{}#-- END BLOCK 2".format(new_content)]
)
),
fp.read(),
)
def test_replace_partial_marked_lines(self):
if salt.utils.platform.is_windows():
check_perms_patch = win_file.check_perms
else:
check_perms_patch = filemod.check_perms
with patch.object(filemod, "check_perms", check_perms_patch):
filemod.blockreplace(
self.tfile.name,
marker_start="// START BLOCK",
marker_end="// END BLOCK",
content="new content 1",
backup=False,
)
with salt.utils.files.fopen(self.tfile.name, "r") as fp:
filecontent = salt.utils.stringutils.to_unicode(fp.read())
self.assertIn("new content 1", filecontent)
self.assertNotIn("to be removed", filecontent)
self.assertIn("first part of start line", filecontent)
self.assertNotIn("first part of end line", filecontent)
self.assertIn("part of start line not removed", filecontent)
self.assertIn("part of end line not removed", filecontent)
def test_backup(self):
fext = ".bak"
bak_file = "{}{}".format(self.tfile.name, fext)
if salt.utils.platform.is_windows():
check_perms_patch = win_file.check_perms
else:
check_perms_patch = filemod.check_perms
with patch.object(filemod, "check_perms", check_perms_patch):
filemod.blockreplace(
self.tfile.name,
marker_start="// START BLOCK",
marker_end="// END BLOCK",
content="new content 2",
backup=fext,
)
self.assertTrue(os.path.exists(bak_file))
os.unlink(bak_file)
self.assertFalse(os.path.exists(bak_file))
fext = ".bak"
bak_file = "{}{}".format(self.tfile.name, fext)
if salt.utils.platform.is_windows():
check_perms_patch = win_file.check_perms
else:
check_perms_patch = filemod.check_perms
with patch.object(filemod, "check_perms", check_perms_patch):
filemod.blockreplace(
self.tfile.name,
marker_start="// START BLOCK",
marker_end="// END BLOCK",
content="new content 3",
backup=False,
)
self.assertFalse(os.path.exists(bak_file))
def test_no_modifications(self):
if salt.utils.platform.is_windows():
check_perms_patch = win_file.check_perms
else:
check_perms_patch = filemod.check_perms
with patch.object(filemod, "check_perms", check_perms_patch):
filemod.blockreplace(
self.tfile.name,
marker_start="#-- START BLOCK 1",
marker_end="#-- END BLOCK 1",
content="new content 4",
backup=False,
append_newline=None,
)
before_ctime = os.stat(self.tfile.name).st_mtime
if salt.utils.platform.is_windows():
check_perms_patch = win_file.check_perms
else:
check_perms_patch = filemod.check_perms
with patch.object(filemod, "check_perms", check_perms_patch):
filemod.blockreplace(
self.tfile.name,
marker_start="#-- START BLOCK 1",
marker_end="#-- END BLOCK 1",
content="new content 4",
backup=False,
append_newline=None,
)
after_ctime = os.stat(self.tfile.name).st_mtime
self.assertEqual(before_ctime, after_ctime)
def test_dry_run(self):
before_ctime = os.stat(self.tfile.name).st_mtime
filemod.blockreplace(
self.tfile.name,
marker_start="// START BLOCK",
marker_end="// END BLOCK",
content="new content 5",
dry_run=True,
)
after_ctime = os.stat(self.tfile.name).st_mtime
self.assertEqual(before_ctime, after_ctime)
def test_show_changes(self):
if salt.utils.platform.is_windows():
check_perms_patch = win_file.check_perms
else:
check_perms_patch = filemod.check_perms
with patch.object(filemod, "check_perms", check_perms_patch):
ret = filemod.blockreplace(
self.tfile.name,
marker_start="// START BLOCK",
marker_end="// END BLOCK",
content="new content 6",
backup=False,
show_changes=True,
)
self.assertTrue(ret.startswith("---")) # looks like a diff
ret = filemod.blockreplace(
self.tfile.name,
marker_start="// START BLOCK",
marker_end="// END BLOCK",
content="new content 7",
backup=False,
show_changes=False,
)
self.assertIsInstance(ret, bool)
def test_unfinished_block_exception(self):
self.assertRaises(
CommandExecutionError,
filemod.blockreplace,
self.tfile.name,
marker_start="#-- START BLOCK UNFINISHED",
marker_end="#-- END BLOCK UNFINISHED",
content="foobar",
backup=False,
)
@skipIf(salt.utils.platform.is_windows(), "Skip on windows")
class FileGrepTestCase(TestCase, LoaderModuleMockMixin):
def setup_loader_modules(self):
return {
filemod: {
"__salt__": {
"config.manage_mode": configmod.manage_mode,
"cmd.run": cmdmod.run,
"cmd.run_all": cmdmod.run_all,
},
"__opts__": {
"test": False,
"file_roots": {"base": "tmp"},
"pillar_roots": {"base": "tmp"},
"cachedir": "tmp",
"grains": {},
},
"__grains__": {"kernel": "Linux"},
"__utils__": {
"files.is_text": MagicMock(return_value=True),
"stringutils.get_diff": salt.utils.stringutils.get_diff,
},
}
}
MULTILINE_STRING = textwrap.dedent(
"""\
Lorem ipsum dolor sit amet, consectetur
adipiscing elit. Nam rhoncus enim ac
bibendum vulputate.
"""
)
MULTILINE_STRING = os.linesep.join(MULTILINE_STRING.splitlines())
def setUp(self):
self.tfile = tempfile.NamedTemporaryFile(delete=False, mode="w+")
self.tfile.write(self.MULTILINE_STRING)
self.tfile.close()
def tearDown(self):
os.remove(self.tfile.name)
del self.tfile
def test_grep_query_exists(self):
result = filemod.grep(self.tfile.name, "Lorem ipsum")
self.assertTrue(result, None)
self.assertTrue(result["retcode"] == 0)
self.assertTrue(result["stdout"] == "Lorem ipsum dolor sit amet, consectetur")
self.assertTrue(result["stderr"] == "")
def test_grep_query_not_exists(self):
result = filemod.grep(self.tfile.name, "Lorem Lorem")
self.assertTrue(result["retcode"] == 1)
self.assertTrue(result["stdout"] == "")
self.assertTrue(result["stderr"] == "")
def test_grep_query_exists_with_opt(self):
result = filemod.grep(self.tfile.name, "Lorem ipsum", "-i")
self.assertTrue(result, None)
self.assertTrue(result["retcode"] == 0)
self.assertTrue(result["stdout"] == "Lorem ipsum dolor sit amet, consectetur")
self.assertTrue(result["stderr"] == "")
def test_grep_query_not_exists_opt(self):
result = filemod.grep(self.tfile.name, "Lorem Lorem", "-v")
self.assertTrue(result["retcode"] == 0)
self.assertTrue(result["stdout"] == FileGrepTestCase.MULTILINE_STRING)
self.assertTrue(result["stderr"] == "")
def test_grep_query_too_many_opts(self):
with self.assertRaisesRegex(
SaltInvocationError, "^Passing multiple command line arg"
) as cm:
result = filemod.grep(self.tfile.name, "Lorem Lorem", "-i -b2")
def test_grep_query_exists_wildcard(self):
_file = "{}*".format(self.tfile.name)
result = filemod.grep(_file, "Lorem ipsum")
self.assertTrue(result, None)
self.assertTrue(result["retcode"] == 0)
self.assertTrue(result["stdout"] == "Lorem ipsum dolor sit amet, consectetur")
self.assertTrue(result["stderr"] == "")
def test_grep_file_not_exists_wildcard(self):
_file = "{}-junk*".format(self.tfile.name)
result = filemod.grep(_file, "Lorem ipsum")
self.assertTrue(result, None)
self.assertFalse(result["retcode"] == 0)
self.assertFalse(result["stdout"] == "Lorem ipsum dolor sit amet, consectetur")
_expected_stderr = "grep: {}-junk*: No such file or directory".format(
self.tfile.name
)
self.assertTrue(result["stderr"] == _expected_stderr)
class FileModuleTestCase(TestCase, LoaderModuleMockMixin):
def setup_loader_modules(self):
return {
filemod: {
"__salt__": {
"config.manage_mode": configmod.manage_mode,
"cmd.run": cmdmod.run,
"cmd.run_all": cmdmod.run_all,
},
"__opts__": {
"test": False,
"file_roots": {"base": "tmp"},
"pillar_roots": {"base": "tmp"},
"cachedir": "tmp",
"grains": {},
},
"__grains__": {"kernel": "Linux"},
"__utils__": {"stringutils.get_diff": salt.utils.stringutils.get_diff},
}
}
def test_check_file_meta_binary_contents(self):
"""
Ensure that using the check_file_meta function does not raise a
UnicodeDecodeError when used with binary contents (issue #57184).
"""
contents = b"\xf4\x91"
filemod.check_file_meta(
"test",
"test",
"salt://test",
{},
"root",
"root",
"755",
None,
"base",
contents=contents,
)
@skipIf(salt.utils.platform.is_windows(), "lsattr is not available on Windows")
def test_check_file_meta_no_lsattr(self):
"""
Ensure that we skip attribute comparison if lsattr(1) is not found
"""
source = "salt:///README.md"
name = "/home/git/proj/a/README.md"
source_sum = {}
stats_result = {
"size": 22,
"group": "wheel",
"uid": 0,
"type": "file",
"mode": "0600",
"gid": 0,
"target": name,
"user": "root",
"mtime": 1508356390,
"atime": 1508356390,
"inode": 447,
"ctime": 1508356390,
}
with patch("salt.modules.file.stats") as m_stats:
m_stats.return_value = stats_result
with patch("salt.utils.path.which") as m_which:
m_which.return_value = None
result = filemod.check_file_meta(
name, name, source, source_sum, "root", "root", "755", None, "base"
)
self.assertTrue(result, None)
@skipIf(
salt.utils.platform.is_windows() or salt.utils.platform.is_aix(),
"lsattr is not available on Windows and AIX",
)
def test_cmp_attrs_extents_flag(self):
"""
Test that the cmp_attr function handles the extents flag correctly.
This test specifically tests for a bug described in #57189.
"""
# If the e attribute is not present and shall not be set, it should be
# neither in the added nor in the removed set.
with patch("salt.modules.file.lsattr") as m_lsattr:
m_lsattr.return_value = {"file": ""}
changes = filemod._cmp_attrs("file", "")
self.assertIsNone(changes.added)
self.assertIsNone(changes.removed)
# If the e attribute is present and shall also be set, it should be
# neither in the added nor in the removed set.
with patch("salt.modules.file.lsattr") as m_lsattr:
m_lsattr.return_value = {"file": "e"}
changes = filemod._cmp_attrs("file", "e")
self.assertIsNone(changes.added)
self.assertIsNone(changes.removed)
# If the e attribute is present and shall not be set, it should be
# neither in the added nor in the removed set. One would assume that it
# should be in the removed set, but the e attribute can never be reset,
# so it is correct that both sets are empty.
with patch("salt.modules.file.lsattr") as m_lsattr:
m_lsattr.return_value = {"file": "e"}
changes = filemod._cmp_attrs("file", "")
self.assertIsNone(changes.added)
self.assertIsNone(changes.removed)
# If the e attribute is not present and shall be set, it should be in
# the added, but not in the removed set.
with patch("salt.modules.file.lsattr") as m_lsattr:
m_lsattr.return_value = {"file": ""}
changes = filemod._cmp_attrs("file", "e")
self.assertEqual("e", changes.added)
self.assertIsNone(changes.removed)
@skipIf(salt.utils.platform.is_windows(), "SED is not available on Windows")
def test_sed_limit_escaped(self):
with tempfile.NamedTemporaryFile(mode="w+") as tfile:
tfile.write(SED_CONTENT)
tfile.seek(0, 0)
path = tfile.name
before = "/var/lib/foo"
after = ""
limit = "^{}".format(before)
filemod.sed(path, before, after, limit=limit)
with salt.utils.files.fopen(path, "r") as newfile:
self.assertEqual(
SED_CONTENT.replace(before, ""),
salt.utils.stringutils.to_unicode(newfile.read()),
)
def test_append_newline_at_eof(self):
"""
Check that file.append works consistently on files with and without
newlines at end of file.
"""
# File ending with a newline
with tempfile.NamedTemporaryFile(mode="wb", delete=False) as tfile:
tfile.write(salt.utils.stringutils.to_bytes("foo" + os.linesep))
tfile.flush()
filemod.append(tfile.name, "bar")
expected = os.linesep.join(["foo", "bar", ""])
with salt.utils.files.fopen(tfile.name) as tfile2:
new_file = salt.utils.stringutils.to_unicode(tfile2.read())
self.assertEqual(new_file, expected)
os.remove(tfile.name)
# File not ending with a newline
with tempfile.NamedTemporaryFile(mode="wb", delete=False) as tfile:
tfile.write(salt.utils.stringutils.to_bytes("foo"))
tfile.flush()
filemod.append(tfile.name, "bar")
with salt.utils.files.fopen(tfile.name) as tfile2:
self.assertEqual(salt.utils.stringutils.to_unicode(tfile2.read()), expected)
# A newline should be added in empty files
with tempfile.NamedTemporaryFile(mode="wb", delete=False) as tfile:
filemod.append(tfile.name, salt.utils.stringutils.to_str("bar"))
with salt.utils.files.fopen(tfile.name) as tfile2:
self.assertEqual(
salt.utils.stringutils.to_unicode(tfile2.read()), "bar" + os.linesep
)
os.remove(tfile.name)
def test_extract_hash(self):
"""
Check various hash file formats.
"""
# With file name
with tempfile.NamedTemporaryFile(mode="w+b", delete=False) as tfile:
tfile.write(
salt.utils.stringutils.to_bytes(
"rc.conf ef6e82e4006dee563d98ada2a2a80a27\n"
"ead48423703509d37c4a90e6a0d53e143b6fc268 example.tar.gz\n"
"fe05bcdcdc4928012781a5f1a2a77cbb5398e106 ./subdir/example.tar.gz\n"
"ad782ecdac770fc6eb9a62e44f90873fb97fb26b *foo.tar.bz2\n"
)
)
tfile.flush()
result = filemod.extract_hash(tfile.name, "", "/rc.conf")
self.assertEqual(
result, {"hsum": "ef6e82e4006dee563d98ada2a2a80a27", "hash_type": "md5"}
)
result = filemod.extract_hash(tfile.name, "", "/example.tar.gz")
self.assertEqual(
result,
{"hsum": "ead48423703509d37c4a90e6a0d53e143b6fc268", "hash_type": "sha1"},
)
# All the checksums in this test file are sha1 sums. We run this
# loop three times. The first pass tests auto-detection of hash
# type by length of the hash. The second tests matching a specific
# type. The third tests a failed attempt to match a specific type,
# since sha256 was requested but sha1 is what is in the file.
for hash_type in ("", "sha1", "sha256"):
# Test the source_hash_name argument. Even though there are
# matches in the source_hash file for both the file_name and
# source params, they should be ignored in favor of the
# source_hash_name.
file_name = "/example.tar.gz"
source = "https://mydomain.tld/foo.tar.bz2?key1=val1&key2=val2"
source_hash_name = "./subdir/example.tar.gz"
result = filemod.extract_hash(
tfile.name, hash_type, file_name, source, source_hash_name
)
expected = (
{
"hsum": "fe05bcdcdc4928012781a5f1a2a77cbb5398e106",
"hash_type": "sha1",
}
if hash_type != "sha256"
else None
)
self.assertEqual(result, expected)
# Test both a file_name and source but no source_hash_name.
# Even though there are matches for both file_name and
# source_hash_name, file_name should be preferred.
file_name = "/example.tar.gz"
source = "https://mydomain.tld/foo.tar.bz2?key1=val1&key2=val2"
source_hash_name = None
result = filemod.extract_hash(
tfile.name, hash_type, file_name, source, source_hash_name
)
expected = (
{
"hsum": "ead48423703509d37c4a90e6a0d53e143b6fc268",
"hash_type": "sha1",
}
if hash_type != "sha256"
else None
)
self.assertEqual(result, expected)
# Test both a file_name and source but no source_hash_name.
# Since there is no match for the file_name, the source is
# matched.
file_name = "/somefile.tar.gz"
source = "https://mydomain.tld/foo.tar.bz2?key1=val1&key2=val2"
source_hash_name = None
result = filemod.extract_hash(
tfile.name, hash_type, file_name, source, source_hash_name
)
expected = (
{
"hsum": "ad782ecdac770fc6eb9a62e44f90873fb97fb26b",
"hash_type": "sha1",
}
if hash_type != "sha256"
else None
)
self.assertEqual(result, expected)
os.remove(tfile.name)
# Hash only, no file name (Maven repo checksum format)
# Since there is no name match, the first checksum in the file will
# always be returned, never the second.
with tempfile.NamedTemporaryFile(mode="w+b", delete=False) as tfile:
tfile.write(
salt.utils.stringutils.to_bytes(
"ead48423703509d37c4a90e6a0d53e143b6fc268\n"
"ad782ecdac770fc6eb9a62e44f90873fb97fb26b\n"
)
)
tfile.flush()
for hash_type in ("", "sha1", "sha256"):
result = filemod.extract_hash(tfile.name, hash_type, "/testfile")
expected = (
{
"hsum": "ead48423703509d37c4a90e6a0d53e143b6fc268",
"hash_type": "sha1",
}
if hash_type != "sha256"
else None
)
self.assertEqual(result, expected)
os.remove(tfile.name)
def test_user_to_uid_int(self):
"""
Tests if user is passed as an integer
"""
user = 5034
ret = filemod.user_to_uid(user)
self.assertEqual(ret, user)
def test_group_to_gid_int(self):
"""
Tests if group is passed as an integer
"""
group = 5034
ret = filemod.group_to_gid(group)
self.assertEqual(ret, group)
def test_patch(self):
with patch("os.path.isdir", return_value=False) as mock_isdir, patch(
"salt.utils.path.which", return_value="/bin/patch"
) as mock_which:
cmd_mock = MagicMock(return_value="test_retval")
with patch.dict(filemod.__salt__, {"cmd.run_all": cmd_mock}):
ret = filemod.patch("/path/to/file", "/path/to/patch")
cmd = [
"/bin/patch",
"--forward",
"--reject-file=-",
"-i",
"/path/to/patch",
"/path/to/file",
]
cmd_mock.assert_called_once_with(cmd, python_shell=False)
self.assertEqual("test_retval", ret)
def test_patch_dry_run(self):
with patch("os.path.isdir", return_value=False) as mock_isdir, patch(
"salt.utils.path.which", return_value="/bin/patch"
) as mock_which:
cmd_mock = MagicMock(return_value="test_retval")
with patch.dict(filemod.__salt__, {"cmd.run_all": cmd_mock}):
ret = filemod.patch("/path/to/file", "/path/to/patch", dry_run=True)
cmd = [
"/bin/patch",
"--dry-run",
"--forward",
"--reject-file=-",
"-i",
"/path/to/patch",
"/path/to/file",
]
cmd_mock.assert_called_once_with(cmd, python_shell=False)
self.assertEqual("test_retval", ret)
def test_patch_dir(self):
with patch("os.path.isdir", return_value=True) as mock_isdir, patch(
"salt.utils.path.which", return_value="/bin/patch"
) as mock_which:
cmd_mock = MagicMock(return_value="test_retval")
with patch.dict(filemod.__salt__, {"cmd.run_all": cmd_mock}):
ret = filemod.patch("/path/to/dir", "/path/to/patch")
cmd = [
"/bin/patch",
"--forward",
"--reject-file=-",
"-i",
"/path/to/patch",
"-d",
"/path/to/dir",
"--strip=0",
]
cmd_mock.assert_called_once_with(cmd, python_shell=False)
self.assertEqual("test_retval", ret)
def test_apply_template_on_contents(self):
"""
Tests that the templating engine works on string contents
"""
contents = "This is a {{ template }}."
defaults = {"template": "templated file"}
with patch.object(SaltCacheLoader, "file_client", Mock()):
ret = filemod.apply_template_on_contents(
contents,
template="jinja",
context={"opts": filemod.__opts__},
defaults=defaults,
saltenv="base",
)
self.assertEqual(ret, "This is a templated file.")
def test_get_diff(self):
text1 = textwrap.dedent(
"""\
foo
bar
baz
спам
"""
)
text2 = textwrap.dedent(
"""\
foo
bar
baz
яйца
"""
)
diff_result = textwrap.dedent(
"""\
--- text1
+++ text2
@@ -1,4 +1,4 @@
foo
bar
baz
-спам
+яйца
"""
)
# The below two variables are 8 bytes of data pulled from /dev/urandom
binary1 = b"\xd4\xb2\xa6W\xc6\x8e\xf5\x0f"
binary2 = b",\x13\x04\xa5\xb0\x12\xdf%"
# pylint: disable=no-self-argument
class MockFopen:
"""
Provides a fake filehandle object that has just enough to run
readlines() as file.get_diff does. Any significant changes to
file.get_diff may require this class to be modified.
"""
def __init__(
mockself, path, *args, **kwargs
): # pylint: disable=unused-argument
mockself.path = path
def readlines(mockself): # pylint: disable=unused-argument
return {
"text1": text1.encode("utf8"),
"text2": text2.encode("utf8"),
"binary1": binary1,
"binary2": binary2,
}[mockself.path].splitlines(True)
def __enter__(mockself):
return mockself
def __exit__(mockself, *args): # pylint: disable=unused-argument
pass
# pylint: enable=no-self-argument
fopen = MagicMock(side_effect=lambda x, *args, **kwargs: MockFopen(x))
cache_file = MagicMock(side_effect=lambda x, *args, **kwargs: x.split("/")[-1])
# Mocks for __utils__['files.is_text']
mock_text_text = MagicMock(side_effect=[True, True])
mock_bin_bin = MagicMock(side_effect=[False, False])
mock_text_bin = MagicMock(side_effect=[True, False])
mock_bin_text = MagicMock(side_effect=[False, True])
with patch.dict(filemod.__salt__, {"cp.cache_file": cache_file}), patch.object(
salt.utils.files, "fopen", fopen
):
# Test diffing two text files
with patch.dict(filemod.__utils__, {"files.is_text": mock_text_text}):
# Identical files
ret = filemod.get_diff("text1", "text1")
self.assertEqual(ret, "")
# Non-identical files
ret = filemod.get_diff("text1", "text2")
self.assertEqual(ret, diff_result)
# Repeat the above test with remote file paths. The expectation
# is that the cp.cache_file mock will ensure that we are not
# trying to do an fopen on the salt:// URL, but rather the
# "cached" file path we've mocked.
with patch.object(
filemod, "_binary_replace", MagicMock(return_value="")
):
ret = filemod.get_diff("salt://text1", "salt://text1")
self.assertEqual(ret, "")
ret = filemod.get_diff("salt://text1", "salt://text2")
self.assertEqual(ret, diff_result)
# Test diffing two binary files
with patch.dict(filemod.__utils__, {"files.is_text": mock_bin_bin}):
# Identical files
ret = filemod.get_diff("binary1", "binary1")
self.assertEqual(ret, "")
# Non-identical files
ret = filemod.get_diff("binary1", "binary2")
self.assertEqual(ret, "Replace binary file")
# Test diffing a text file with a binary file
with patch.dict(filemod.__utils__, {"files.is_text": mock_text_bin}):
ret = filemod.get_diff("text1", "binary1")
self.assertEqual(ret, "Replace text file with binary file")
# Test diffing a binary file with a text file
with patch.dict(filemod.__utils__, {"files.is_text": mock_bin_text}):
ret = filemod.get_diff("binary1", "text1")
self.assertEqual(ret, "Replace binary file with text file")
def test_stats(self):
with patch(
"os.path.expanduser", MagicMock(side_effect=lambda path: path)
), patch("os.path.exists", MagicMock(return_value=True)), patch(
"os.stat", MagicMock(return_value=DummyStat())
):
ret = filemod.stats("dummy", None, True)
self.assertEqual(ret["mode"], "0644")
self.assertEqual(ret["type"], "file")
@skipIf(pytest is None, "PyTest required for this set of tests")
class FilemodLineTests(TestCase, LoaderModuleMockMixin):
"""
Unit tests for file.line
"""
def setUp(self):
class AnyAttr:
def __getattr__(self, item):
return 0
def __call__(self, *args, **kwargs):
return self
self._anyattr = AnyAttr()
def tearDown(self):
del self._anyattr
def setup_loader_modules(self):
return {
filemod: {
"__salt__": {
"config.manage_mode": configmod.manage_mode,
"cmd.run": cmdmod.run,
"cmd.run_all": cmdmod.run_all,
},
"__opts__": {
"test": False,
"file_roots": {"base": "tmp"},
"pillar_roots": {"base": "tmp"},
"cachedir": "tmp",
"grains": {},
},
"__grains__": {"kernel": "Linux"},
"__utils__": {"stringutils.get_diff": salt.utils.stringutils.get_diff},
}
}
@staticmethod
def _get_body(content):
"""
The body is written as bytestrings or strings depending on platform.
This func accepts a string of content and returns the appropriate list
of strings back.
"""
ret = content.splitlines(True)
if six.PY2 and salt.utils.platform.is_windows():
return salt.utils.data.encode_list(ret)
else:
return salt.utils.data.decode_list(ret, to_str=True)
def test_set_line_should_raise_command_execution_error_with_no_mode(self):
with self.assertRaises(CommandExecutionError) as err:
filemod._set_line(lines=[], mode=None)
self.assertEqual(
err.exception.args[0], "Mode was not defined. How to process the file?",
)
def test_set_line_should_raise_command_execution_error_with_unknown_mode(self):
with self.assertRaises(CommandExecutionError) as err:
filemod._set_line(lines=[], mode="fnord")
self.assertEqual(
err.exception.args[0], "Unknown mode: fnord",
)
def test_if_content_is_none_and_mode_is_valid_but_not_delete_it_should_raise_command_execution_error(
self,
):
valid_modes = ("insert", "ensure", "replace")
for mode in valid_modes:
with self.assertRaises(CommandExecutionError) as err:
filemod._set_line(lines=[], mode=mode)
self.assertEqual(
err.exception.args[0], "Content can only be empty if mode is delete",
)
def test_if_delete_or_replace_is_called_with_empty_lines_it_should_warn_and_return_empty_body(
self,
):
for mode in ("delete", "replace"):
with patch("salt.modules.file.log.warning", MagicMock()) as fake_warn:
actual_lines = filemod._set_line(mode=mode, lines=[], content="roscivs")
self.assertEqual(actual_lines, [])
fake_warn.assert_called_with(
"Cannot find text to %s. File is empty.", mode
)
def test_if_mode_is_delete_and_not_before_after_or_match_then_content_should_be_used_to_delete_line(
self,
):
lines = ["foo", "roscivs", "bar"]
to_remove = "roscivs"
expected_lines = ["foo", "bar"]
actual_lines = filemod._set_line(mode="delete", lines=lines, content=to_remove)
self.assertEqual(actual_lines, expected_lines)
def test_if_mode_is_replace_and_not_before_after_or_match_and_content_exists_then_lines_should_not_change(
self,
):
original_lines = ["foo", "roscivs", "bar"]
content = "roscivs"
actual_lines = filemod._set_line(
mode="replace", lines=original_lines, content=content
)
self.assertEqual(actual_lines, original_lines)
def test_if_mode_is_replace_and_match_is_set_then_it_should_replace_the_first_match(
self,
):
to_replace = "quuxy"
replacement = "roscivs"
original_lines = ["foo", to_replace, "bar"]
expected_lines = ["foo", replacement, "bar"]
actual_lines = filemod._set_line(
mode="replace", lines=original_lines, content=replacement, match=to_replace,
)
self.assertEqual(actual_lines, expected_lines)
def test_if_mode_is_replace_and_indent_is_true_then_it_should_match_indention_of_existing_line(
self,
):
indents = "\t\t \t \t"
to_replace = indents + "quuxy"
replacement = "roscivs"
original_lines = ["foo", to_replace, "bar"]
expected_lines = ["foo", indents + replacement, "bar"]
actual_lines = filemod._set_line(
mode="replace",
lines=original_lines,
content=replacement,
match=to_replace,
indent=True,
)
self.assertEqual(actual_lines, expected_lines)
def test_if_mode_is_replace_and_indent_is_false_then_it_should_just_use_content(
self,
):
indents = "\t\t \t \t"
to_replace = indents + "quuxy"
replacement = "\t \t\troscivs"
original_lines = ["foo", to_replace, "bar"]
expected_lines = ["foo", replacement, "bar"]
actual_lines = filemod._set_line(
mode="replace",
lines=original_lines,
content=replacement,
match=to_replace,
indent=False,
)
self.assertEqual(actual_lines, expected_lines)
def test_if_mode_is_insert_and_no_location_before_or_after_then_it_should_raise_command_execution_error(
self,
):
with self.assertRaises(CommandExecutionError) as err:
filemod._set_line(
lines=[],
content="fnord",
mode="insert",
location=None,
before=None,
after=None,
)
self.assertEqual(
err.exception.args[0],
'On insert either "location" or "before/after" conditions are required.',
)
def test_if_mode_is_insert_and_location_is_start_it_should_insert_content_at_start(
self,
):
lines = ["foo", "bar", "bang"]
content = "roscivs"
expected_lines = [content] + lines
with patch("os.linesep", ""):
actual_lines = filemod._set_line(
lines=lines, content=content, mode="insert", location="start",
)
self.assertEqual(actual_lines, expected_lines)
def test_if_mode_is_insert_and_lines_have_eol_then_inserted_line_should_have_matching_eol(
self,
):
linesep = "\r\n"
lines = ["foo" + linesep]
content = "roscivs"
expected_lines = [content + linesep] + lines
actual_lines = filemod._set_line(
lines=lines, content=content, mode="insert", location="start",
)
self.assertEqual(actual_lines, expected_lines)
def test_if_mode_is_insert_and_no_lines_then_the_content_should_have_os_linesep_added(
self,
):
content = "roscivs"
fake_linesep = "\U0001f40d"
expected_lines = [content + fake_linesep]
with patch("os.linesep", fake_linesep):
actual_lines = filemod._set_line(
lines=[], content=content, mode="insert", location="start",
)
self.assertEqual(actual_lines, expected_lines)
def test_if_location_is_end_of_empty_file_then_it_should_just_be_content(self):
content = "roscivs"
expected_lines = [content]
actual_lines = filemod._set_line(
lines=[], content=content, mode="insert", location="end",
)
self.assertEqual(actual_lines, expected_lines)
def test_if_location_is_end_of_file_and_indent_is_True_then_line_should_match_previous_indent(
self,
):
content = "roscivs"
indent = " \t\t\t "
original_lines = [indent + "fnord"]
expected_lines = original_lines + [indent + content]
actual_lines = filemod._set_line(
lines=original_lines,
content=content,
mode="insert",
location="end",
indent=True,
)
self.assertEqual(actual_lines, expected_lines)
def test_if_location_is_not_set_but_before_and_after_are_then_line_should_appear_as_the_line_before_before(
self,
):
for indent in ("", " \t \t\t\t "):
content = "roscivs"
after = "after"
before = "before"
original_lines = ["foo", "bar", indent + after, "belowme", indent + before]
expected_lines = [
"foo",
"bar",
indent + after,
"belowme",
indent + content,
indent + before,
]
actual_lines = filemod._set_line(
lines=original_lines,
content=content,
mode="insert",
location=None,
before=before,
after=after,
)
self.assertEqual(actual_lines, expected_lines)
def test_insert_with_after_and_before_with_no_location_should_indent_to_match_before_indent(
self,
):
for indent in ("", " \t \t\t\t "):
content = "roscivs"
after = "after"
before = "before"
original_lines = [
"foo",
"bar",
indent + after,
"belowme",
(indent * 2) + before,
]
expected_lines = [
"foo",
"bar",
indent + after,
"belowme",
(indent * 2) + content,
(indent * 2) + before,
]
actual_lines = filemod._set_line(
lines=original_lines,
content=content,
mode="insert",
location=None,
before=before,
after=after,
)
self.assertEqual(actual_lines, expected_lines)
def test_if_not_location_but_before_and_after_and_more_than_one_after_it_should_CommandExecutionError(
self,
):
after = "one"
before = "two"
original_lines = [after, after, after, after, before]
with self.assertRaises(CommandExecutionError) as err:
filemod._set_line(
lines=original_lines,
content="fnord",
mode="insert",
location=None,
before=before,
after=after,
)
self.assertEqual(
err.exception.args[0],
'Found more than expected occurrences in "after" expression',
)
def test_if_not_location_but_before_and_after_and_more_than_one_before_it_should_CommandExecutionError(
self,
):
after = "one"
before = "two"
original_lines = [after, before, before, before]
with self.assertRaises(CommandExecutionError) as err:
filemod._set_line(
lines=original_lines,
content="fnord",
mode="insert",
location=None,
before=before,
after=after,
)
self.assertEqual(
err.exception.args[0],
'Found more than expected occurrences in "before" expression',
)
def test_if_not_location_or_before_but_after_and_after_has_more_than_one_it_should_CommandExecutionError(
self,
):
location = None
before = None
after = "after"
original_lines = [after, after, after]
with self.assertRaises(CommandExecutionError) as err:
filemod._set_line(
lines=original_lines,
content="fnord",
mode="insert",
location=location,
before=before,
after=after,
)
self.assertEqual(
err.exception.args[0],
'Found more than expected occurrences in "after" expression',
)
def test_if_not_location_or_after_but_before_and_before_has_more_than_one_it_should_CommandExecutionError(
self,
):
location = None
before = "before"
after = None
original_lines = [before, before, before]
with self.assertRaises(CommandExecutionError) as err:
filemod._set_line(
lines=original_lines,
content="fnord",
mode="insert",
location=location,
before=before,
after=after,
)
self.assertEqual(
err.exception.args[0],
'Found more than expected occurrences in "before" expression',
)
def test_if_not_location_or_after_and_no_before_in_lines_it_should_CommandExecutionError(
self,
):
location = None
before = "before"
after = None
original_lines = ["fnord", "fnord"]
with self.assertRaises(CommandExecutionError) as err:
filemod._set_line(
lines=original_lines,
content="fnord",
mode="insert",
location=location,
before=before,
after=after,
)
self.assertEqual(
err.exception.args[0], "Neither before or after was found in file",
)
def test_if_not_location_or_before_and_no_after_in_lines_it_should_CommandExecutionError(
self,
):
location = None
before = None
after = "after"
original_lines = ["fnord", "fnord"]
with self.assertRaises(CommandExecutionError) as err:
filemod._set_line(
lines=original_lines,
content="fnord",
mode="insert",
location=location,
before=before,
after=after,
)
self.assertEqual(
err.exception.args[0], "Neither before or after was found in file",
)
def test_if_not_location_or_before_but_after_then_line_should_be_inserted_after_after(
self,
):
location = before = None
after = "indessed"
content = "roscivs"
indent = "\t\t\t "
original_lines = ["foo", indent + after, "bar"]
expected_lines = ["foo", indent + after, indent + content, "bar"]
actual_lines = filemod._set_line(
lines=original_lines,
content=content,
mode="insert",
location=location,
before=before,
after=after,
)
self.assertEqual(actual_lines, expected_lines)
def test_insert_with_after_should_ignore_line_endings_on_comparison(self):
after = "after"
content = "roscivs"
line_endings = "\r\n\r\n"
original_lines = [after, content + line_endings]
actual_lines = filemod._set_line(
lines=original_lines[:], content=content, mode="insert", after=after,
)
self.assertEqual(actual_lines, original_lines)
def test_insert_with_before_should_ignore_line_endings_on_comparison(self):
before = "before"
content = "bottia"
line_endings = "\r\n\r\n"
original_lines = [content + line_endings, before]
actual_lines = filemod._set_line(
lines=original_lines[:], content=content, mode="insert", before=before,
)
self.assertEqual(actual_lines, original_lines)
def test_if_not_location_or_before_but_after_and_indent_False_then_line_should_be_inserted_after_after_without_indent(
self,
):
location = before = None
after = "indessed"
content = "roscivs"
indent = "\t\t\t "
original_lines = ["foo", indent + after, "bar"]
expected_lines = ["foo", indent + after, content, "bar"]
actual_lines = filemod._set_line(
lines=original_lines,
content=content,
mode="insert",
location=location,
before=before,
after=after,
indent=False,
)
self.assertEqual(actual_lines, expected_lines)
def test_if_not_location_or_after_but_before_then_line_should_be_inserted_before_before(
self,
):
location = after = None
before = "indessed"
content = "roscivs"
indent = "\t\t\t "
original_lines = [indent + "foo", indent + before, "bar"]
expected_lines = [indent + "foo", indent + content, indent + before, "bar"]
actual_lines = filemod._set_line(
lines=original_lines,
content=content,
mode="insert",
location=location,
before=before,
after=after,
)
self.assertEqual(actual_lines, expected_lines)
def test_if_not_location_or_after_but_before_and_indent_False_then_line_should_be_inserted_before_before_without_indent(
self,
):
location = after = None
before = "indessed"
content = "roscivs"
indent = "\t\t\t "
original_lines = [indent + "foo", before, "bar"]
expected_lines = [indent + "foo", content, before, "bar"]
actual_lines = filemod._set_line(
lines=original_lines,
content=content,
mode="insert",
location=location,
before=before,
after=after,
indent=False,
)
self.assertEqual(actual_lines, expected_lines)
def test_insert_after_the_last_line_should_work(self):
location = before = None
after = "indessed"
content = "roscivs"
original_lines = [after]
expected_lines = [after, content]
actual_lines = filemod._set_line(
lines=original_lines,
content=content,
mode="insert",
location=location,
before=before,
after=after,
indent=True,
)
self.assertEqual(actual_lines, expected_lines)
def test_insert_should_work_just_like_ensure_on_before(self):
# I'm pretty sure that this is or should be a bug, but that
# is how things currently work, so I'm calling it out here.
#
# If that should change, then this test should change.
before = "indessed"
content = "roscivs"
original_lines = [content, before]
actual_lines = filemod._set_line(
lines=original_lines[:], content=content, mode="insert", before=before,
)
self.assertEqual(actual_lines, original_lines)
def test_insert_should_work_just_like_ensure_on_after(self):
# I'm pretty sure that this is or should be a bug, but that
# is how things currently work, so I'm calling it out here.
#
# If that should change, then this test should change.
after = "indessed"
content = "roscivs"
original_lines = [after, content]
actual_lines = filemod._set_line(
# If we don't pass in a copy of the lines then it modifies
# them, and our test fails. Oops.
lines=original_lines[:],
content=content,
mode="insert",
after=after,
)
self.assertEqual(actual_lines, original_lines)
def test_insert_before_the_first_line_should_work(self):
location = after = None
before = "indessed"
content = "roscivs"
original_lines = [before]
expected_lines = [content, before]
actual_lines = filemod._set_line(
lines=original_lines,
content=content,
mode="insert",
location=location,
before=before,
after=after,
indent=True,
)
self.assertEqual(actual_lines, expected_lines)
def test_ensure_with_before_and_too_many_after_should_CommandExecutionError(self):
location = None
before = "before"
after = "after"
lines = [after, after, before]
content = "fnord"
with self.assertRaises(CommandExecutionError) as err:
filemod._set_line(
lines=lines,
content=content,
mode="ensure",
location=location,
before=before,
after=after,
)
self.assertEqual(
err.exception.args[0],
'Found more than expected occurrences in "after" expression',
)
def test_ensure_with_too_many_after_should_CommandExecutionError(self):
after = "fnord"
bad_lines = [after, after]
with self.assertRaises(CommandExecutionError) as err:
filemod._set_line(
lines=bad_lines, content="asdf", after=after, mode="ensure",
)
self.assertEqual(
err.exception.args[0],
'Found more than expected occurrences in "after" expression',
)
def test_ensure_with_after_and_too_many_before_should_CommandExecutionError(self):
location = None
before = "before"
after = "after"
lines = [after, before, before]
content = "fnord"
with self.assertRaises(CommandExecutionError) as err:
filemod._set_line(
lines=lines,
content=content,
mode="ensure",
location=location,
before=before,
after=after,
)
self.assertEqual(
err.exception.args[0],
'Found more than expected occurrences in "before" expression',
)
def test_ensure_with_too_many_before_should_CommandExecutionError(self):
before = "fnord"
bad_lines = [before, before]
with self.assertRaises(CommandExecutionError) as err:
filemod._set_line(
lines=bad_lines, content="asdf", before=before, mode="ensure",
)
self.assertEqual(
err.exception.args[0],
'Found more than expected occurrences in "before" expression',
)
def test_ensure_with_before_and_after_that_already_contains_the_line_should_return_original_info(
self,
):
before = "before"
after = "after"
content = "roscivs"
original_lines = [after, content, before]
actual_lines = filemod._set_line(
lines=original_lines,
content=content,
mode="ensure",
after=after,
before=before,
)
self.assertEqual(actual_lines, original_lines)
def test_ensure_with_too_many_lines_between_before_and_after_should_CommandExecutionError(
self,
):
before = "before"
after = "after"
content = "roscivs"
original_lines = [after, "fnord", "fnord", before]
with self.assertRaises(CommandExecutionError) as err:
filemod._set_line(
lines=original_lines,
content=content,
mode="ensure",
after=after,
before=before,
)
self.assertEqual(
err.exception.args[0],
'Found more than one line between boundaries "before" and "after".',
)
def test_ensure_with_no_lines_between_before_and_after_should_insert_a_line(self):
for indent in ("", " \t \t\t\t "):
before = "before"
after = "after"
content = "roscivs"
original_lines = [indent + after, before]
expected_lines = [indent + after, indent + content, before]
actual_lines = filemod._set_line(
lines=original_lines,
content=content,
before=before,
after=after,
mode="ensure",
indent=True,
)
self.assertEqual(actual_lines, expected_lines)
def test_ensure_with_existing_but_different_line_should_set_the_line(self):
for indent in ("", " \t \t\t\t "):
before = "before"
after = "after"
content = "roscivs"
original_lines = [indent + after, "fnord", before]
expected_lines = [indent + after, indent + content, before]
actual_lines = filemod._set_line(
lines=original_lines,
content=content,
before=before,
after=after,
mode="ensure",
indent=True,
)
self.assertEqual(actual_lines, expected_lines)
def test_ensure_with_after_and_existing_content_should_return_same_lines(self):
for indent in ("", " \t \t\t\t "):
before = None
after = "after"
content = "roscivs"
original_lines = [indent + after, indent + content, "fnord"]
actual_lines = filemod._set_line(
lines=original_lines,
content=content,
before=before,
after=after,
mode="ensure",
indent=True,
)
self.assertEqual(actual_lines, original_lines)
def test_ensure_with_after_and_missing_content_should_add_it(self):
for indent in ("", " \t \t\t\t "):
before = None
after = "after"
content = "roscivs"
original_lines = [indent + after, "more fnord", "fnord"]
expected_lines = [indent + after, indent + content, "more fnord", "fnord"]
actual_lines = filemod._set_line(
lines=original_lines,
content=content,
before=before,
after=after,
mode="ensure",
indent=True,
)
self.assertEqual(actual_lines, expected_lines)
def test_ensure_with_after_and_content_at_the_end_should_not_add_duplicate(self):
after = "after"
content = "roscivs"
original_lines = [after, content + "\n"]
actual_lines = filemod._set_line(
lines=original_lines, content=content, after=after, mode="ensure",
)
self.assertEqual(actual_lines, original_lines)
def test_ensure_with_before_and_missing_content_should_add_it(self):
for indent in ("", " \t \t\t\t "):
before = "before"
after = None
content = "roscivs"
original_lines = [indent + "fnord", indent + "fnord", before]
expected_lines = [
indent + "fnord",
indent + "fnord",
indent + content,
before,
]
actual_lines = filemod._set_line(
lines=original_lines,
content=content,
before=before,
after=after,
mode="ensure",
indent=True,
)
self.assertEqual(actual_lines, expected_lines)
def test_ensure_with_before_and_existing_content_should_return_same_lines(self):
for indent in ("", " \t \t\t\t "):
before = "before"
after = None
content = "roscivs"
original_lines = [indent + "fnord", indent + content, before]
actual_lines = filemod._set_line(
lines=original_lines,
content=content,
before=before,
after=after,
mode="ensure",
indent=True,
)
self.assertEqual(actual_lines, original_lines)
def test_ensure_without_before_and_after_should_CommandExecutionError(self):
before = "before"
after = "after"
bad_lines = ["fnord", "fnord1", "fnord2"]
with self.assertRaises(CommandExecutionError) as err:
filemod._set_line(
lines=bad_lines,
before=before,
after=after,
content="aardvark",
mode="ensure",
)
self.assertEqual(
err.exception.args[0],
"Wrong conditions? Unable to ensure line without knowing where"
" to put it before and/or after.",
)
@patch("os.path.realpath", MagicMock(wraps=lambda x: x))
@patch("os.path.isfile", MagicMock(return_value=True))
def test_delete_line_in_empty_file(self):
"""
Tests that when calling file.line with ``mode=delete``,
the function doesn't stack trace if the file is empty.
Should return ``False``.
See Issue #38438.
"""
for mode in ["delete", "replace"]:
_log = MagicMock()
with patch("salt.utils.files.fopen", mock_open(read_data="")), patch(
"os.stat", self._anyattr
), patch("salt.modules.file.log", _log):
self.assertFalse(
filemod.line("/dummy/path", content="foo", match="bar", mode=mode)
)
warning_call = _log.warning.call_args_list[0][0]
warning_log_msg = warning_call[0] % warning_call[1:]
self.assertIn("Cannot find text to {}".format(mode), warning_log_msg)
@patch("os.path.realpath", MagicMock())
@patch("os.path.isfile", MagicMock(return_value=True))
@patch("os.stat", MagicMock())
def test_line_delete_no_match(self):
"""
Tests that when calling file.line with ``mode=delete``,
with not matching pattern to delete returns False
:return:
"""
file_content = os.linesep.join(
["file_roots:", " base:", " - /srv/salt", " - /srv/custom"]
)
match = "not matching"
for mode in ["delete", "replace"]:
files_fopen = mock_open(read_data=file_content)
with patch("salt.utils.files.fopen", files_fopen):
atomic_opener = mock_open()
with patch("salt.utils.atomicfile.atomic_open", atomic_opener):
self.assertFalse(
filemod.line("foo", content="foo", match=match, mode=mode)
)
@patch("os.path.realpath", MagicMock(wraps=lambda x: x))
@patch("os.path.isfile", MagicMock(return_value=True))
def test_line_modecheck_failure(self):
"""
Test for file.line for empty or wrong mode.
Calls unknown or empty mode and expects failure.
:return:
"""
for mode, err_msg in [
(None, "How to process the file"),
("nonsense", "Unknown mode"),
]:
with pytest.raises(CommandExecutionError) as exc_info:
filemod.line("foo", mode=mode)
self.assertIn(err_msg, str(exc_info.value))
@patch("os.path.realpath", MagicMock(wraps=lambda x: x))
@patch("os.path.isfile", MagicMock(return_value=True))
def test_line_no_content(self):
"""
Test for file.line for an empty content when not deleting anything.
:return:
"""
for mode in ["insert", "ensure", "replace"]:
with pytest.raises(CommandExecutionError) as exc_info:
filemod.line("foo", mode=mode)
self.assertIn(
'Content can only be empty if mode is "delete"', str(exc_info.value),
)
@patch("os.path.realpath", MagicMock(wraps=lambda x: x))
@patch("os.path.isfile", MagicMock(return_value=True))
@patch("os.stat", MagicMock())
def test_line_insert_no_location_no_before_no_after(self):
"""
Test for file.line for insertion but define no location/before/after.
:return:
"""
files_fopen = mock_open(read_data="test data")
with patch("salt.utils.files.fopen", files_fopen):
with pytest.raises(CommandExecutionError) as exc_info:
filemod.line("foo", content="test content", mode="insert")
self.assertIn('"location" or "before/after"', str(exc_info.value))
@with_tempfile()
def test_line_insert_after_no_pattern(self, name):
"""
Test for file.line for insertion after specific line, using no pattern.
See issue #38670
:return:
"""
file_content = os.linesep.join(["file_roots:", " base:", " - /srv/salt"])
file_modified = os.linesep.join(
["file_roots:", " base:", " - /srv/salt", " - /srv/custom"]
)
cfg_content = "- /srv/custom"
isfile_mock = MagicMock(side_effect=lambda x: True if x == name else DEFAULT)
with patch("os.path.isfile", isfile_mock), patch(
"os.stat", MagicMock(return_value=DummyStat())
), patch("salt.utils.files.fopen", mock_open(read_data=file_content)), patch(
"salt.utils.atomicfile.atomic_open", mock_open()
) as atomic_open_mock:
filemod.line(name, content=cfg_content, after="- /srv/salt", mode="insert")
handles = atomic_open_mock.filehandles[name]
# We should only have opened the file once
open_count = len(handles)
assert open_count == 1, open_count
# We should only have invoked .writelines() once...
writelines_content = handles[0].writelines_calls
writelines_count = len(writelines_content)
assert writelines_count == 1, writelines_count
# ... with the updated content
expected = self._get_body(file_modified)
assert writelines_content[0] == expected, (writelines_content[0], expected)
@with_tempfile()
def test_line_insert_after_pattern(self, name):
"""
Test for file.line for insertion after specific line, using pattern.
See issue #38670
:return:
"""
file_content = os.linesep.join(
[
"file_boots:",
" - /rusty",
"file_roots:",
" base:",
" - /srv/salt",
" - /srv/sugar",
]
)
file_modified = os.linesep.join(
[
"file_boots:",
" - /rusty",
"file_roots:",
" custom:",
" - /srv/custom",
" base:",
" - /srv/salt",
" - /srv/sugar",
]
)
cfg_content = os.linesep.join([" custom:", " - /srv/custom"])
isfile_mock = MagicMock(side_effect=lambda x: True if x == name else DEFAULT)
for after_line in ["file_r.*", ".*roots"]:
with patch("os.path.isfile", isfile_mock), patch(
"os.stat", MagicMock(return_value=DummyStat())
), patch(
"salt.utils.files.fopen", mock_open(read_data=file_content)
), patch(
"salt.utils.atomicfile.atomic_open", mock_open()
) as atomic_open_mock:
filemod.line(
name,
content=cfg_content,
after=after_line,
mode="insert",
indent=False,
)
handles = atomic_open_mock.filehandles[name]
# We should only have opened the file once
open_count = len(handles)
assert open_count == 1, open_count
# We should only have invoked .writelines() once...
writelines_content = handles[0].writelines_calls
writelines_count = len(writelines_content)
assert writelines_count == 1, writelines_count
# ... with the updated content
expected = self._get_body(file_modified)
# We passed cfg_content with a newline in the middle, so it
# will be written as two lines in the same element of the list
# passed to .writelines()
expected[3] = expected[3] + expected.pop(4)
assert writelines_content[0] == expected, (
writelines_content[0],
expected,
)
@with_tempfile()
def test_line_insert_multi_line_content_after_unicode(self, name):
"""
Test for file.line for insertion after specific line with Unicode
See issue #48113
:return:
"""
file_content = "This is a line{}This is another line".format(os.linesep)
file_modified = salt.utils.stringutils.to_str(
"This is a line{}"
"This is another line{}"
"This is a line with unicode Ŷ".format(os.linesep, os.linesep)
)
cfg_content = "This is a line with unicode Ŷ"
isfile_mock = MagicMock(side_effect=lambda x: True if x == name else DEFAULT)
for after_line in ["This is another line"]:
with patch("os.path.isfile", isfile_mock), patch(
"os.stat", MagicMock(return_value=DummyStat())
), patch(
"salt.utils.files.fopen", mock_open(read_data=file_content)
), patch(
"salt.utils.atomicfile.atomic_open", mock_open()
) as atomic_open_mock:
filemod.line(
name,
content=cfg_content,
after=after_line,
mode="insert",
indent=False,
)
handles = atomic_open_mock.filehandles[name]
# We should only have opened the file once
open_count = len(handles)
assert open_count == 1, open_count
# We should only have invoked .writelines() once...
writelines_content = handles[0].writelines_calls
writelines_count = len(writelines_content)
assert writelines_count == 1, writelines_count
# ... with the updated content
expected = self._get_body(file_modified)
assert writelines_content[0] == expected, (
writelines_content[0],
expected,
)
@with_tempfile()
def test_line_insert_before(self, name):
"""
Test for file.line for insertion before specific line, using pattern and no patterns.
See issue #38670
:return:
"""
file_content = os.linesep.join(
["file_roots:", " base:", " - /srv/salt", " - /srv/sugar"]
)
file_modified = os.linesep.join(
[
"file_roots:",
" base:",
" - /srv/custom",
" - /srv/salt",
" - /srv/sugar",
]
)
cfg_content = "- /srv/custom"
isfile_mock = MagicMock(side_effect=lambda x: True if x == name else DEFAULT)
for before_line in ["/srv/salt", "/srv/sa.*t"]:
with patch("os.path.isfile", isfile_mock), patch(
"os.stat", MagicMock(return_value=DummyStat())
), patch(
"salt.utils.files.fopen", mock_open(read_data=file_content)
), patch(
"salt.utils.atomicfile.atomic_open", mock_open()
) as atomic_open_mock:
filemod.line(
name, content=cfg_content, before=before_line, mode="insert"
)
handles = atomic_open_mock.filehandles[name]
# We should only have opened the file once
open_count = len(handles)
assert open_count == 1, open_count
# We should only have invoked .writelines() once...
writelines_content = handles[0].writelines_calls
writelines_count = len(writelines_content)
assert writelines_count == 1, writelines_count
# ... with the updated content
expected = self._get_body(file_modified)
# assert writelines_content[0] == expected, (writelines_content[0], expected)
self.assertEqual(writelines_content[0], expected)
@patch("os.path.realpath", MagicMock(wraps=lambda x: x))
@patch("os.path.isfile", MagicMock(return_value=True))
@patch("os.stat", MagicMock())
def test_line_assert_exception_pattern(self):
"""
Test for file.line for exception on insert with too general pattern.
:return:
"""
file_content = os.linesep.join(
["file_roots:", " base:", " - /srv/salt", " - /srv/sugar"]
)
cfg_content = "- /srv/custom"
for before_line in ["/sr.*"]:
files_fopen = mock_open(read_data=file_content)
with patch("salt.utils.files.fopen", files_fopen):
atomic_opener = mock_open()
with patch("salt.utils.atomicfile.atomic_open", atomic_opener):
with self.assertRaises(CommandExecutionError) as cm:
filemod.line(
"foo",
content=cfg_content,
before=before_line,
mode="insert",
)
self.assertEqual(
cm.exception.strerror,
'Found more than expected occurrences in "before" expression',
)
@with_tempfile()
def test_line_insert_before_after(self, name):
"""
Test for file.line for insertion before specific line, using pattern and no patterns.
See issue #38670
:return:
"""
file_content = os.linesep.join(
[
"file_roots:",
" base:",
" - /srv/salt",
" - /srv/pepper",
" - /srv/sugar",
]
)
file_modified = os.linesep.join(
[
"file_roots:",
" base:",
" - /srv/salt",
" - /srv/pepper",
" - /srv/coriander",
" - /srv/sugar",
]
)
cfg_content = "- /srv/coriander"
isfile_mock = MagicMock(side_effect=lambda x: True if x == name else DEFAULT)
for b_line, a_line in [("/srv/sugar", "/srv/salt")]:
with patch("os.path.isfile", isfile_mock), patch(
"os.stat", MagicMock(return_value=DummyStat())
), patch(
"salt.utils.files.fopen", mock_open(read_data=file_content)
), patch(
"salt.utils.atomicfile.atomic_open", mock_open()
) as atomic_open_mock:
filemod.line(
name,
content=cfg_content,
before=b_line,
after=a_line,
mode="insert",
)
handles = atomic_open_mock.filehandles[name]
# We should only have opened the file once
open_count = len(handles)
assert open_count == 1, open_count
# We should only have invoked .writelines() once...
writelines_content = handles[0].writelines_calls
writelines_count = len(writelines_content)
assert writelines_count == 1, writelines_count
# ... with the updated content
expected = self._get_body(file_modified)
self.assertEqual(writelines_content[0], expected)
@with_tempfile()
def test_line_insert_start(self, name):
"""
Test for file.line for insertion at the beginning of the file
:return:
"""
cfg_content = "everything: fantastic"
file_content = os.linesep.join(
["file_roots:", " base:", " - /srv/salt", " - /srv/sugar"]
)
file_modified = os.linesep.join(
[
cfg_content,
"file_roots:",
" base:",
" - /srv/salt",
" - /srv/sugar",
]
)
isfile_mock = MagicMock(side_effect=lambda x: True if x == name else DEFAULT)
with patch("os.path.isfile", isfile_mock), patch(
"os.stat", MagicMock(return_value=DummyStat())
), patch("salt.utils.files.fopen", mock_open(read_data=file_content)), patch(
"salt.utils.atomicfile.atomic_open", mock_open()
) as atomic_open_mock:
filemod.line(name, content=cfg_content, location="start", mode="insert")
handles = atomic_open_mock.filehandles[name]
# We should only have opened the file once
open_count = len(handles)
assert open_count == 1, open_count
# We should only have invoked .writelines() once...
writelines_content = handles[0].writelines_calls
writelines_count = len(writelines_content)
assert writelines_count == 1, writelines_count
# ... with the updated content
expected = self._get_body(file_modified)
assert writelines_content[0] == expected, (writelines_content[0], expected)
@with_tempfile()
def test_line_insert_end(self, name):
"""
Test for file.line for insertion at the end of the file (append)
:return:
"""
cfg_content = "everything: fantastic"
file_content = os.linesep.join(
["file_roots:", " base:", " - /srv/salt", " - /srv/sugar"]
)
file_modified = os.linesep.join(
[
"file_roots:",
" base:",
" - /srv/salt",
" - /srv/sugar",
" " + cfg_content,
]
)
isfile_mock = MagicMock(side_effect=lambda x: True if x == name else DEFAULT)
with patch("os.path.isfile", isfile_mock), patch(
"os.stat", MagicMock(return_value=DummyStat())
), patch("salt.utils.files.fopen", mock_open(read_data=file_content)), patch(
"salt.utils.atomicfile.atomic_open", mock_open()
) as atomic_open_mock:
filemod.line(name, content=cfg_content, location="end", mode="insert")
handles = atomic_open_mock.filehandles[name]
# We should only have opened the file once
open_count = len(handles)
assert open_count == 1, open_count
# We should only have invoked .writelines() once...
writelines_content = handles[0].writelines_calls
writelines_count = len(writelines_content)
assert writelines_count == 1, writelines_count
# ... with the updated content
expected = self._get_body(file_modified)
assert writelines_content[0] == expected, (writelines_content[0], expected)
@with_tempfile()
def test_line_insert_ensure_before(self, name):
"""
Test for file.line for insertion ensuring the line is before
:return:
"""
cfg_content = "/etc/init.d/someservice restart"
file_content = os.linesep.join(["#!/bin/bash", "", "exit 0"])
file_modified = os.linesep.join(["#!/bin/bash", "", cfg_content, "exit 0"])
isfile_mock = MagicMock(side_effect=lambda x: True if x == name else DEFAULT)
with patch("os.path.isfile", isfile_mock), patch(
"os.stat", MagicMock(return_value=DummyStat())
), patch("salt.utils.files.fopen", mock_open(read_data=file_content)), patch(
"salt.utils.atomicfile.atomic_open", mock_open()
) as atomic_open_mock:
filemod.line(name, content=cfg_content, before="exit 0", mode="ensure")
handles = atomic_open_mock.filehandles[name]
# We should only have opened the file once
open_count = len(handles)
assert open_count == 1, open_count
# We should only have invoked .writelines() once...
writelines_content = handles[0].writelines_calls
writelines_count = len(writelines_content)
assert writelines_count == 1, writelines_count
# ... with the updated content
expected = self._get_body(file_modified)
assert writelines_content[0] == expected, (writelines_content[0], expected)
@with_tempfile()
def test_line_insert_duplicate_ensure_before(self, name):
"""
Test for file.line for insertion ensuring the line is before
:return:
"""
cfg_content = "/etc/init.d/someservice restart"
file_content = os.linesep.join(["#!/bin/bash", "", cfg_content, "exit 0"])
file_modified = os.linesep.join(["#!/bin/bash", "", cfg_content, "exit 0"])
isfile_mock = MagicMock(side_effect=lambda x: True if x == name else DEFAULT)
with patch("os.path.isfile", isfile_mock), patch(
"os.stat", MagicMock(return_value=DummyStat())
), patch("salt.utils.files.fopen", mock_open(read_data=file_content)), patch(
"salt.utils.atomicfile.atomic_open", mock_open()
) as atomic_open_mock:
filemod.line(name, content=cfg_content, before="exit 0", mode="ensure")
# If file not modified no handlers in dict
assert atomic_open_mock.filehandles.get(name) is None
@with_tempfile()
def test_line_insert_ensure_before_first_line(self, name):
"""
Test for file.line for insertion ensuring the line is before first line
:return:
"""
cfg_content = "#!/bin/bash"
file_content = os.linesep.join(["/etc/init.d/someservice restart", "exit 0"])
file_modified = os.linesep.join(
[cfg_content, "/etc/init.d/someservice restart", "exit 0"]
)
isfile_mock = MagicMock(side_effect=lambda x: True if x == name else DEFAULT)
with patch("os.path.isfile", isfile_mock), patch(
"os.stat", MagicMock(return_value=DummyStat())
), patch("salt.utils.files.fopen", mock_open(read_data=file_content)), patch(
"salt.utils.atomicfile.atomic_open", mock_open()
) as atomic_open_mock:
filemod.line(
name,
content=cfg_content,
before="/etc/init.d/someservice restart",
mode="ensure",
)
handles = atomic_open_mock.filehandles[name]
# We should only have opened the file once
open_count = len(handles)
assert open_count == 1, open_count
# We should only have invoked .writelines() once...
writelines_content = handles[0].writelines_calls
writelines_count = len(writelines_content)
assert writelines_count == 1, writelines_count
# ... with the updated content
expected = self._get_body(file_modified)
assert writelines_content[0] == expected, (writelines_content[0], expected)
@with_tempfile()
def test_line_insert_ensure_after(self, name):
"""
Test for file.line for insertion ensuring the line is after
:return:
"""
cfg_content = "exit 0"
file_content = os.linesep.join(
["#!/bin/bash", "/etc/init.d/someservice restart"]
)
file_modified = os.linesep.join(
["#!/bin/bash", "/etc/init.d/someservice restart", cfg_content]
)
isfile_mock = MagicMock(side_effect=lambda x: True if x == name else DEFAULT)
with patch("os.path.isfile", isfile_mock), patch(
"os.stat", MagicMock(return_value=DummyStat())
), patch("salt.utils.files.fopen", mock_open(read_data=file_content)), patch(
"salt.utils.atomicfile.atomic_open", mock_open()
) as atomic_open_mock:
filemod.line(
name,
content=cfg_content,
after="/etc/init.d/someservice restart",
mode="ensure",
)
handles = atomic_open_mock.filehandles[name]
# We should only have opened the file once
open_count = len(handles)
assert open_count == 1, open_count
# We should only have invoked .writelines() once...
writelines_content = handles[0].writelines_calls
writelines_count = len(writelines_content)
assert writelines_count == 1, writelines_count
# ... with the updated content
expected = self._get_body(file_modified)
assert writelines_content[0] == expected, (writelines_content[0], expected)
@with_tempfile()
def test_line_insert_duplicate_ensure_after(self, name):
"""
Test for file.line for insertion ensuring the line is after
:return:
"""
cfg_content = "exit 0"
file_content = os.linesep.join(
["#!/bin/bash", "/etc/init.d/someservice restart", cfg_content]
)
file_modified = os.linesep.join(
["#!/bin/bash", "/etc/init.d/someservice restart", cfg_content]
)
isfile_mock = MagicMock(side_effect=lambda x: True if x == name else DEFAULT)
with patch("os.path.isfile", isfile_mock), patch(
"os.stat", MagicMock(return_value=DummyStat())
), patch("salt.utils.files.fopen", mock_open(read_data=file_content)), patch(
"salt.utils.atomicfile.atomic_open", mock_open()
) as atomic_open_mock:
filemod.line(
name,
content=cfg_content,
after="/etc/init.d/someservice restart",
mode="ensure",
)
# If file not modified no handlers in dict
assert atomic_open_mock.filehandles.get(name) is None
@with_tempfile()
def test_line_insert_ensure_beforeafter_twolines(self, name):
"""
Test for file.line for insertion ensuring the line is between two lines
:return:
"""
cfg_content = 'EXTRA_GROUPS="dialout cdrom floppy audio video plugdev users"'
# pylint: disable=W1401
file_content = os.linesep.join(
[
r'NAME_REGEX="^[a-z][-a-z0-9_]*\$"',
'SKEL_IGNORE_REGEX="dpkg-(old|new|dist|save)"',
]
)
# pylint: enable=W1401
after, before = file_content.split(os.linesep)
file_modified = os.linesep.join([after, cfg_content, before])
isfile_mock = MagicMock(side_effect=lambda x: True if x == name else DEFAULT)
for (_after, _before) in [(after, before), ("NAME_.*", "SKEL_.*")]:
with patch("os.path.isfile", isfile_mock), patch(
"os.stat", MagicMock(return_value=DummyStat())
), patch(
"salt.utils.files.fopen", mock_open(read_data=file_content)
), patch(
"salt.utils.atomicfile.atomic_open", mock_open()
) as atomic_open_mock:
filemod.line(
name,
content=cfg_content,
after=_after,
before=_before,
mode="ensure",
)
handles = atomic_open_mock.filehandles[name]
# We should only have opened the file once
open_count = len(handles)
assert open_count == 1, open_count
# We should only have invoked .writelines() once...
writelines_content = handles[0].writelines_calls
writelines_count = len(writelines_content)
assert writelines_count == 1, writelines_count
# ... with the updated content
expected = self._get_body(file_modified)
assert writelines_content[0] == expected, (
writelines_content[0],
expected,
)
@with_tempfile()
def test_line_insert_ensure_beforeafter_twolines_exists(self, name):
"""
Test for file.line for insertion ensuring the line is between two lines
where content already exists
"""
cfg_content = 'EXTRA_GROUPS="dialout"'
# pylint: disable=W1401
file_content = os.linesep.join(
[
r'NAME_REGEX="^[a-z][-a-z0-9_]*\$"',
'EXTRA_GROUPS="dialout"',
'SKEL_IGNORE_REGEX="dpkg-(old|new|dist|save)"',
]
)
# pylint: enable=W1401
after, before = (
file_content.split(os.linesep)[0],
file_content.split(os.linesep)[2],
)
isfile_mock = MagicMock(side_effect=lambda x: True if x == name else DEFAULT)
for (_after, _before) in [(after, before), ("NAME_.*", "SKEL_.*")]:
with patch("os.path.isfile", isfile_mock), patch(
"os.stat", MagicMock(return_value=DummyStat())
), patch(
"salt.utils.files.fopen", mock_open(read_data=file_content)
), patch(
"salt.utils.atomicfile.atomic_open", mock_open()
) as atomic_open_mock:
result = filemod.line(
"foo",
content=cfg_content,
after=_after,
before=_before,
mode="ensure",
)
# We should not have opened the file
assert not atomic_open_mock.filehandles
# No changes should have been made
assert result is False
@patch("os.path.realpath", MagicMock(wraps=lambda x: x))
@patch("os.path.isfile", MagicMock(return_value=True))
@patch("os.stat", MagicMock())
def test_line_insert_ensure_beforeafter_rangelines(self):
"""
Test for file.line for insertion ensuring the line is between two lines
within the range. This expected to bring no changes.
"""
cfg_content = 'EXTRA_GROUPS="dialout cdrom floppy audio video plugdev users"'
# pylint: disable=W1401
file_content = (
r'NAME_REGEX="^[a-z][-a-z0-9_]*\$"{}SETGID_HOME=no{}ADD_EXTRA_GROUPS=1{}'
'SKEL_IGNORE_REGEX="dpkg-(old|new|dist|save)"'.format(
os.linesep, os.linesep, os.linesep
)
)
# pylint: enable=W1401
after, before = (
file_content.split(os.linesep)[0],
file_content.split(os.linesep)[-1],
)
for (_after, _before) in [(after, before), ("NAME_.*", "SKEL_.*")]:
files_fopen = mock_open(read_data=file_content)
with patch("salt.utils.files.fopen", files_fopen):
atomic_opener = mock_open()
with patch("salt.utils.atomicfile.atomic_open", atomic_opener):
with pytest.raises(CommandExecutionError) as exc_info:
filemod.line(
"foo",
content=cfg_content,
after=_after,
before=_before,
mode="ensure",
)
self.assertIn(
'Found more than one line between boundaries "before" and "after"',
str(exc_info.value),
)
@with_tempfile()
def test_line_delete(self, name):
"""
Test for file.line for deletion of specific line
:return:
"""
file_content = os.linesep.join(
[
"file_roots:",
" base:",
" - /srv/salt",
" - /srv/pepper",
" - /srv/sugar",
]
)
file_modified = os.linesep.join(
["file_roots:", " base:", " - /srv/salt", " - /srv/sugar"]
)
isfile_mock = MagicMock(side_effect=lambda x: True if x == name else DEFAULT)
for content in ["/srv/pepper", "/srv/pepp*", "/srv/p.*", "/sr.*pe.*"]:
files_fopen = mock_open(read_data=file_content)
with patch("os.path.isfile", isfile_mock), patch(
"os.stat", MagicMock(return_value=DummyStat())
), patch("salt.utils.files.fopen", files_fopen), patch(
"salt.utils.atomicfile.atomic_open", mock_open()
) as atomic_open_mock:
filemod.line(name, content=content, mode="delete")
handles = atomic_open_mock.filehandles[name]
# We should only have opened the file once
open_count = len(handles)
assert open_count == 1, open_count
# We should only have invoked .writelines() once...
writelines_content = handles[0].writelines_calls
writelines_count = len(writelines_content)
assert writelines_count == 1, writelines_count
# ... with the updated content
expected = self._get_body(file_modified)
assert writelines_content[0] == expected, (
writelines_content[0],
expected,
)
@with_tempfile()
def test_line_replace(self, name):
"""
Test for file.line for replacement of specific line
:return:
"""
file_content = os.linesep.join(
[
"file_roots:",
" base:",
" - /srv/salt",
" - /srv/pepper",
" - /srv/sugar",
]
)
file_modified = os.linesep.join(
[
"file_roots:",
" base:",
" - /srv/salt",
" - /srv/natrium-chloride",
" - /srv/sugar",
]
)
isfile_mock = MagicMock(side_effect=lambda x: True if x == name else DEFAULT)
for match in ["/srv/pepper", "/srv/pepp*", "/srv/p.*", "/sr.*pe.*"]:
files_fopen = mock_open(read_data=file_content)
with patch("os.path.isfile", isfile_mock), patch(
"os.stat", MagicMock(return_value=DummyStat())
), patch("salt.utils.files.fopen", files_fopen), patch(
"salt.utils.atomicfile.atomic_open", mock_open()
) as atomic_open_mock:
filemod.line(
name, content="- /srv/natrium-chloride", match=match, mode="replace"
)
handles = atomic_open_mock.filehandles[name]
# We should only have opened the file once
open_count = len(handles)
assert open_count == 1, open_count
# We should only have invoked .writelines() once...
writelines_content = handles[0].writelines_calls
writelines_count = len(writelines_content)
assert writelines_count == 1, writelines_count
# ... with the updated content
expected = self._get_body(file_modified)
assert writelines_content[0] == expected, (
writelines_content[0],
expected,
)
class FileBasicsTestCase(TestCase, LoaderModuleMockMixin):
def setup_loader_modules(self):
return {
filemod: {
"__salt__": {
"config.manage_mode": configmod.manage_mode,
"cmd.run": cmdmod.run,
"cmd.run_all": cmdmod.run_all,
},
"__opts__": {
"test": False,
"file_roots": {"base": "tmp"},
"pillar_roots": {"base": "tmp"},
"cachedir": "tmp",
"grains": {},
},
"__grains__": {"kernel": "Linux"},
}
}
def setUp(self):
self.directory = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.directory)
self.addCleanup(delattr, self, "directory")
with tempfile.NamedTemporaryFile(delete=False, mode="w+") as self.tfile:
self.tfile.write("Hi hello! I am a file.")
self.tfile.close()
self.addCleanup(os.remove, self.tfile.name)
self.addCleanup(delattr, self, "tfile")
self.myfile = os.path.join(RUNTIME_VARS.TMP, "myfile")
with salt.utils.files.fopen(self.myfile, "w+") as fp:
fp.write(salt.utils.stringutils.to_str("Hello\n"))
self.addCleanup(os.remove, self.myfile)
self.addCleanup(delattr, self, "myfile")
@skipIf(salt.utils.platform.is_windows(), "os.symlink is not available on Windows")
def test_symlink_already_in_desired_state(self):
os.symlink(self.tfile.name, self.directory + "/a_link")
self.addCleanup(os.remove, self.directory + "/a_link")
result = filemod.symlink(self.tfile.name, self.directory + "/a_link")
self.assertTrue(result)
@skipIf(salt.utils.platform.is_windows(), "os.link is not available on Windows")
def test_hardlink_sanity(self):
target = os.path.join(self.directory, "a_hardlink")
self.addCleanup(os.remove, target)
result = filemod.link(self.tfile.name, target)
self.assertTrue(result)
@skipIf(salt.utils.platform.is_windows(), "os.link is not available on Windows")
def test_hardlink_numlinks(self):
target = os.path.join(self.directory, "a_hardlink")
self.addCleanup(os.remove, target)
result = filemod.link(self.tfile.name, target)
name_i = os.stat(self.tfile.name).st_nlink
self.assertTrue(name_i > 1)
@skipIf(salt.utils.platform.is_windows(), "os.link is not available on Windows")
def test_hardlink_working(self):
target = os.path.join(self.directory, "a_hardlink")
self.addCleanup(os.remove, target)
result = filemod.link(self.tfile.name, target)
name_i = os.stat(self.tfile.name).st_ino
target_i = os.stat(target).st_ino
self.assertTrue(name_i == target_i)
def test_source_list_for_list_returns_file_from_dict_via_http(self):
with patch("salt.modules.file.os.remove") as remove:
remove.return_value = None
with patch.dict(
filemod.__salt__,
{
"cp.list_master": MagicMock(return_value=[]),
"cp.list_master_dirs": MagicMock(return_value=[]),
"cp.cache_file": MagicMock(return_value="/tmp/http.conf"),
},
):
ret = filemod.source_list(
[{"http://t.est.com/http/httpd.conf": "filehash"}], "", "base"
)
self.assertEqual(
list(ret), ["http://t.est.com/http/httpd.conf", "filehash"]
)
def test_source_list_for_list_returns_existing_file(self):
with patch.dict(
filemod.__salt__,
{
"cp.list_master": MagicMock(return_value=["http/httpd.conf.fallback"]),
"cp.list_master_dirs": MagicMock(return_value=[]),
},
):
ret = filemod.source_list(
["salt://http/httpd.conf", "salt://http/httpd.conf.fallback"],
"filehash",
"base",
)
self.assertEqual(list(ret), ["salt://http/httpd.conf.fallback", "filehash"])
def test_source_list_for_list_returns_file_from_other_env(self):
def list_master(env):
dct = {"base": [], "dev": ["http/httpd.conf"]}
return dct[env]
with patch.dict(
filemod.__salt__,
{
"cp.list_master": MagicMock(side_effect=list_master),
"cp.list_master_dirs": MagicMock(return_value=[]),
},
):
ret = filemod.source_list(
[
"salt://http/httpd.conf?saltenv=dev",
"salt://http/httpd.conf.fallback",
],
"filehash",
"base",
)
self.assertEqual(
list(ret), ["salt://http/httpd.conf?saltenv=dev", "filehash"]
)
def test_source_list_for_list_returns_file_from_dict(self):
with patch.dict(
filemod.__salt__,
{
"cp.list_master": MagicMock(return_value=["http/httpd.conf"]),
"cp.list_master_dirs": MagicMock(return_value=[]),
},
):
ret = filemod.source_list(
[{"salt://http/httpd.conf": ""}], "filehash", "base"
)
self.assertEqual(list(ret), ["salt://http/httpd.conf", "filehash"])
def test_source_list_for_list_returns_existing_local_file_slash(self):
with patch.dict(
filemod.__salt__,
{
"cp.list_master": MagicMock(return_value=[]),
"cp.list_master_dirs": MagicMock(return_value=[]),
},
):
ret = filemod.source_list(
[self.myfile + "-foo", self.myfile], "filehash", "base"
)
self.assertEqual(list(ret), [self.myfile, "filehash"])
def test_source_list_for_list_returns_existing_local_file_proto(self):
with patch.dict(
filemod.__salt__,
{
"cp.list_master": MagicMock(return_value=[]),
"cp.list_master_dirs": MagicMock(return_value=[]),
},
):
ret = filemod.source_list(
["file://" + self.myfile + "-foo", "file://" + self.myfile],
"filehash",
"base",
)
self.assertEqual(list(ret), ["file://" + self.myfile, "filehash"])
def test_source_list_for_list_returns_local_file_slash_from_dict(self):
with patch.dict(
filemod.__salt__,
{
"cp.list_master": MagicMock(return_value=[]),
"cp.list_master_dirs": MagicMock(return_value=[]),
},
):
ret = filemod.source_list([{self.myfile: ""}], "filehash", "base")
self.assertEqual(list(ret), [self.myfile, "filehash"])
def test_source_list_for_list_returns_local_file_proto_from_dict(self):
with patch.dict(
filemod.__salt__,
{
"cp.list_master": MagicMock(return_value=[]),
"cp.list_master_dirs": MagicMock(return_value=[]),
},
):
ret = filemod.source_list(
[{"file://" + self.myfile: ""}], "filehash", "base"
)
self.assertEqual(list(ret), ["file://" + self.myfile, "filehash"])
class LsattrTests(TestCase, LoaderModuleMockMixin):
def setup_loader_modules(self):
return {
filemod: {"__salt__": {"cmd.run": cmdmod.run}},
}
def run(self, result=None):
patch_aix = patch("salt.utils.platform.is_aix", Mock(return_value=False),)
patch_exists = patch("os.path.exists", Mock(return_value=True),)
patch_which = patch("salt.utils.path.which", Mock(return_value="fnord"),)
with patch_aix, patch_exists, patch_which:
super().run(result)
def test_if_lsattr_is_missing_it_should_return_None(self):
patch_which = patch("salt.utils.path.which", Mock(return_value=None),)
with patch_which:
actual = filemod.lsattr("foo")
assert actual is None, actual
def test_on_aix_lsattr_should_be_None(self):
patch_aix = patch("salt.utils.platform.is_aix", Mock(return_value=True),)
with patch_aix:
# SaltInvocationError will be raised if filemod.lsattr
# doesn't early exit
actual = filemod.lsattr("foo")
self.assertIsNone(actual)
def test_SaltInvocationError_should_be_raised_when_file_is_missing(self):
patch_exists = patch("os.path.exists", Mock(return_value=False),)
with patch_exists, self.assertRaises(SaltInvocationError):
filemod.lsattr("foo")
def test_if_chattr_version_is_less_than_required_flags_should_ignore_extended(self):
fname = "/path/to/fnord"
with_extended = (
textwrap.dedent(
"""
aAcCdDeijPsStTu---- {}
"""
)
.strip()
.format(fname)
)
expected = set("acdijstuADST")
patch_has_ext = patch(
"salt.modules.file._chattr_has_extended_attrs", Mock(return_value=False),
)
patch_run = patch.dict(
filemod.__salt__, {"cmd.run": Mock(return_value=with_extended)},
)
with patch_has_ext, patch_run:
actual = set(filemod.lsattr(fname)[fname])
msg = "Actual: {!r} Expected: {!r}".format(
actual, expected
) # pylint: disable=E1322
assert actual == expected, msg
def test_if_chattr_version_is_high_enough_then_extended_flags_should_be_returned(
self,
):
fname = "/path/to/fnord"
with_extended = (
textwrap.dedent(
"""
aAcCdDeijPsStTu---- {}
"""
)
.strip()
.format(fname)
)
expected = set("aAcCdDeijPsStTu")
patch_has_ext = patch(
"salt.modules.file._chattr_has_extended_attrs", Mock(return_value=True),
)
patch_run = patch.dict(
filemod.__salt__, {"cmd.run": Mock(return_value=with_extended)},
)
with patch_has_ext, patch_run:
actual = set(filemod.lsattr(fname)[fname])
msg = "Actual: {!r} Expected: {!r}".format(
actual, expected
) # pylint: disable=E1322
assert actual == expected, msg
def test_if_supports_extended_but_there_are_no_flags_then_none_should_be_returned(
self,
):
fname = "/path/to/fnord"
with_extended = (
textwrap.dedent(
"""
------------------- {}
"""
)
.strip()
.format(fname)
)
expected = set("")
patch_has_ext = patch(
"salt.modules.file._chattr_has_extended_attrs", Mock(return_value=True),
)
patch_run = patch.dict(
filemod.__salt__, {"cmd.run": Mock(return_value=with_extended)},
)
with patch_has_ext, patch_run:
actual = set(filemod.lsattr(fname)[fname])
msg = "Actual: {!r} Expected: {!r}".format(
actual, expected
) # pylint: disable=E1322
assert actual == expected, msg
# This should create a merge conflict with ChattrVersionTests when
# a merge forward to develop happens. Develop's changes are made
# obsolete by this ChattrTests class, and should be removed in favor
# of this change.
@skipIf(salt.utils.platform.is_windows(), "Chattr shouldn't be available on Windows")
class ChattrTests(TestCase, LoaderModuleMockMixin):
def setup_loader_modules(self):
return {
filemod: {
"__salt__": {"cmd.run": cmdmod.run},
"__opts__": {"test": False},
},
}
def run(self, result=None):
patch_aix = patch("salt.utils.platform.is_aix", Mock(return_value=False),)
patch_exists = patch("os.path.exists", Mock(return_value=True),)
patch_which = patch("salt.utils.path.which", Mock(return_value="some/tune2fs"),)
with patch_aix, patch_exists, patch_which:
super().run(result)
def test_chattr_version_returns_None_if_no_tune2fs_exists(self):
patch_which = patch("salt.utils.path.which", Mock(return_value=""),)
with patch_which:
actual = filemod._chattr_version()
self.assertIsNone(actual)
def test_on_aix_chattr_version_should_be_None_even_if_tune2fs_exists(self):
patch_which = patch("salt.utils.path.which", Mock(return_value="fnord"),)
patch_aix = patch("salt.utils.platform.is_aix", Mock(return_value=True),)
mock_run = MagicMock(return_value="fnord")
patch_run = patch.dict(filemod.__salt__, {"cmd.run": mock_run})
with patch_which, patch_aix, patch_run:
actual = filemod._chattr_version()
self.assertIsNone(actual)
mock_run.assert_not_called()
def test_chattr_version_should_return_version_from_tune2fs(self):
expected = "1.43.4"
sample_output = textwrap.dedent(
"""
tune2fs 1.43.4 (31-Jan-2017)
Usage: tune2fs [-c max_mounts_count] [-e errors_behavior] [-f] [-g group]
[-i interval[d|m|w]] [-j] [-J journal_options] [-l]
[-m reserved_blocks_percent] [-o [^]mount_options[,...]]
[-p mmp_update_interval] [-r reserved_blocks_count] [-u user]
[-C mount_count] [-L volume_label] [-M last_mounted_dir]
[-O [^]feature[,...]] [-Q quota_options]
[-E extended-option[,...]] [-T last_check_time] [-U UUID]
[-I new_inode_size] [-z undo_file] device
"""
)
patch_which = patch("salt.utils.path.which", Mock(return_value="fnord"),)
patch_run = patch.dict(
filemod.__salt__, {"cmd.run": MagicMock(return_value=sample_output)},
)
with patch_which, patch_run:
actual = filemod._chattr_version()
self.assertEqual(actual, expected)
def test_if_tune2fs_has_no_version_version_should_be_None(self):
patch_which = patch("salt.utils.path.which", Mock(return_value="fnord"),)
patch_run = patch.dict(
filemod.__salt__, {"cmd.run": MagicMock(return_value="fnord")},
)
with patch_which, patch_run:
actual = filemod._chattr_version()
self.assertIsNone(actual)
def test_chattr_has_extended_attrs_should_return_False_if_chattr_version_is_None(
self,
):
patch_chattr = patch(
"salt.modules.file._chattr_version", Mock(return_value=None),
)
with patch_chattr:
actual = filemod._chattr_has_extended_attrs()
assert not actual, actual
def test_chattr_has_extended_attrs_should_return_False_if_version_is_too_low(self):
below_expected = "0.1.1"
patch_chattr = patch(
"salt.modules.file._chattr_version", Mock(return_value=below_expected),
)
with patch_chattr:
actual = filemod._chattr_has_extended_attrs()
assert not actual, actual
def test_chattr_has_extended_attrs_should_return_False_if_version_is_equal_threshold(
self,
):
threshold = "1.41.12"
patch_chattr = patch(
"salt.modules.file._chattr_version", Mock(return_value=threshold),
)
with patch_chattr:
actual = filemod._chattr_has_extended_attrs()
assert not actual, actual
def test_chattr_has_extended_attrs_should_return_True_if_version_is_above_threshold(
self,
):
higher_than = "1.41.13"
patch_chattr = patch(
"salt.modules.file._chattr_version", Mock(return_value=higher_than),
)
with patch_chattr:
actual = filemod._chattr_has_extended_attrs()
assert actual, actual
# We're skipping this on Windows as it tests the check_perms function in
# file.py which is specifically for Linux. The Windows version resides in
# win_file.py
@skipIf(salt.utils.platform.is_windows(), "Skip on Windows")
def test_check_perms_should_report_no_attr_changes_if_there_are_none(self):
filename = "/path/to/fnord"
attrs = "aAcCdDeijPsStTu"
higher_than = "1.41.13"
patch_chattr = patch(
"salt.modules.file._chattr_version", Mock(return_value=higher_than),
)
patch_exists = patch("os.path.exists", Mock(return_value=True),)
patch_stats = patch(
"salt.modules.file.stats",
Mock(return_value={"user": "foo", "group": "bar", "mode": "123"}),
)
patch_run = patch.dict(
filemod.__salt__,
{"cmd.run": MagicMock(return_value="--------- " + filename)},
)
with patch_chattr, patch_exists, patch_stats, patch_run:
actual_ret, actual_perms = filemod.check_perms(
name=filename,
ret=None,
user="foo",
group="bar",
mode="123",
attrs=attrs,
follow_symlinks=False,
)
assert actual_ret.get("changes", {}).get("attrs") is None, actual_ret
# We're skipping this on Windows as it tests the check_perms function in
# file.py which is specifically for Linux. The Windows version resides in
# win_file.py
@skipIf(salt.utils.platform.is_windows(), "Skip on Windows")
def test_check_perms_should_report_attrs_new_and_old_if_they_changed(self):
filename = "/path/to/fnord"
attrs = "aAcCdDeijPsStTu"
existing_attrs = "aeiu"
expected = {
"attrs": {"old": existing_attrs, "new": attrs},
}
higher_than = "1.41.13"
patch_chattr = patch(
"salt.modules.file._chattr_version", Mock(return_value=higher_than),
)
patch_stats = patch(
"salt.modules.file.stats",
Mock(return_value={"user": "foo", "group": "bar", "mode": "123"}),
)
patch_cmp = patch(
"salt.modules.file._cmp_attrs",
MagicMock(
side_effect=[
filemod.AttrChanges(added="aAcCdDeijPsStTu", removed="",),
filemod.AttrChanges(None, None,),
]
),
)
patch_chattr = patch("salt.modules.file.chattr", MagicMock(),)
def fake_cmd(cmd, *args, **kwargs):
if cmd == ["lsattr", "/path/to/fnord"]:
return textwrap.dedent(
"""
{}---- {}
""".format(
existing_attrs, filename
)
).strip()
else:
assert False, "not sure how to handle {}".format(cmd)
patch_run = patch.dict(
filemod.__salt__, {"cmd.run": MagicMock(side_effect=fake_cmd)},
)
patch_ver = patch(
"salt.modules.file._chattr_has_extended_attrs",
MagicMock(return_value=True),
)
with patch_chattr, patch_stats, patch_cmp, patch_run, patch_ver:
actual_ret, actual_perms = filemod.check_perms(
name=filename,
ret=None,
user="foo",
group="bar",
mode="123",
attrs=attrs,
follow_symlinks=False,
)
self.assertDictEqual(actual_ret["changes"], expected)
@skipIf(salt.modules.selinux.getenforce() != "Enforcing", "Skip if selinux not enabled")
class FileSelinuxTestCase(TestCase, LoaderModuleMockMixin):
def setup_loader_modules(self):
return {
filemod: {
"__salt__": {
"cmd.run": cmdmod.run,
"cmd.run_all": cmdmod.run_all,
"cmd.retcode": cmdmod.retcode,
"selinux.fcontext_add_policy": MagicMock(
return_value={"retcode": 0, "stdout": ""}
),
},
"__opts__": {"test": False},
}
}
def setUp(self):
# Read copy 1
self.tfile1 = tempfile.NamedTemporaryFile(delete=False, mode="w+")
# Edit copy 2
self.tfile2 = tempfile.NamedTemporaryFile(delete=False, mode="w+")
# Edit copy 3
self.tfile3 = tempfile.NamedTemporaryFile(delete=False, mode="w+")
def tearDown(self):
os.remove(self.tfile1.name)
del self.tfile1
os.remove(self.tfile2.name)
del self.tfile2
os.remove(self.tfile3.name)
del self.tfile3
def test_selinux_getcontext(self):
"""
Test get selinux context
Assumes default selinux attributes on temporary files
"""
result = filemod.get_selinux_context(self.tfile1.name)
self.assertEqual(result, "unconfined_u:object_r:user_tmp_t:s0")
def test_selinux_setcontext(self):
"""
Test set selinux context
Assumes default selinux attributes on temporary files
"""
result = filemod.set_selinux_context(self.tfile2.name, user="system_u")
self.assertEqual(result, "system_u:object_r:user_tmp_t:s0")
def test_selinux_setcontext_persist(self):
"""
Test set selinux context with persist=True
Assumes default selinux attributes on temporary files
"""
result = filemod.set_selinux_context(
self.tfile2.name, user="system_u", persist=True
)
self.assertEqual(result, "system_u:object_r:user_tmp_t:s0")
def test_file_check_perms(self):
expected_result = (
{
"comment": "The file {} is set to be changed".format(self.tfile3.name),
"changes": {
"selinux": {"New": "Type: lost_found_t", "Old": "Type: user_tmp_t"},
"mode": "0644",
},
"name": self.tfile3.name,
"result": True,
},
{"luser": "root", "lmode": "0600", "lgroup": "root"},
)
# Disable lsattr calls
with patch("salt.utils.path.which") as m_which:
m_which.return_value = None
result = filemod.check_perms(
self.tfile3.name,
{},
"root",
"root",
644,
seuser=None,
serole=None,
setype="lost_found_t",
serange=None,
)
self.assertEqual(result, expected_result)
|
py | 1a52d1bfb8ebc960745f763ad5e23e997697d514 | from plenum.test.test_node import ensure_node_disconnected, getNonPrimaryReplicas
from indy_node.test.helper import addRawAttribute
from indy_client.test.conftest import nodeSet
from indy_common.test.conftest import config_helper_class, node_config_helper_class
def test_n_minus_f_pool_processes_attrib(looper, nodeSet, up,
steward, stewardWallet):
"""
The pool N-f nodes should be able to process ATTRIB txn.
https://jira.hyperledger.org/browse/INDY-245
"""
make_pool_n_minus_f_nodes(looper, nodeSet)
addRawAttribute(looper, steward, stewardWallet,
'foo', 'bar')
def make_pool_n_minus_f_nodes(looper, nodeSet):
non_primary, other_nodes = get_any_non_primary_and_others(nodeSet)
disconnect_node(looper, non_primary, other_nodes)
def get_any_non_primary_and_others(node_set):
non_primary_node = getNonPrimaryReplicas(node_set, 0)[0].node
other_nodes = [n for n in node_set if n != non_primary_node]
return non_primary_node, other_nodes
def disconnect_node(looper, node, other_nodes):
node.stop()
looper.removeProdable(node)
ensure_node_disconnected(looper, node, other_nodes)
check_if_pool_n_minus_f(other_nodes)
def check_if_pool_n_minus_f(nodes):
for node in nodes:
min_connection = node.minimumNodes - 1 # subtract node itself
assert len(node.nodestack.connecteds) == min_connection, \
"the pool should have minimum (N-f) nodes connected"
|
py | 1a52d23596ba80966baeb265275cfc5084cbc5db | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'itaigisite.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
py | 1a52d27e6863abe4c073f87e2fe8767b02082b6b | import sys
sys.path.append(".")
import numpy as np
from dctree.core.basemodel import BaseModel
class AdaboostClassifier(BaseModel):
def __init__(self,base_estimator:BaseModel,n_estimators=5,learning_rate=1.0,base_estimator_params={}):
"""
An AdaBoost classifier \n
Params:
base_estimator : The base estimator from which the boosted ensemble is built
n_estimators : The maximum number of estimators at which boosting is terminated
learning_rate : Weight applied to each classifier at each boosting iteration
base_estimator_params : The parameters of base estimators
"""
self.estimator = base_estimator
self.estimator_params = base_estimator_params
self.learning_rate = learning_rate
self.n_estimators = n_estimators
def init_estimators(self):
"""
Initialize base estimators.
"""
estimators = []
for _ in range(self.n_estimators):
estimator = self.estimator()
for key,value in self.estimator_params.items():
setattr(estimator,key,value)
estimators.append(estimator)
return estimators
def init_sample_weights(self,sample_size:int,init_weight:float=None):
"""
Initialize the sample weights.
"""
if init_weight is not None:
weights = np.full(sample_size,init_weight)
else:
weight = 1 / sample_size
weights = np.full(sample_size,weight)
return weights
def calculate_error_rate(self,estimator,X:np.ndarray,Y:np.ndarray,W:np.ndarray=None):
"""
Calculate the error rate of base estimator
"""
if W is not None:
return 1 - estimator.score(X,Y,W)
else:
return 1 - estimator.score(X,Y)
def score(self,X:np.ndarray,Y:np.ndarray):
"""
Return the mean accuracy on the given test data and labels \n
Params:
X : Test samples
Y : True labels for X
"""
Y_pred = self.predict(X)
Y_comp = (Y_pred==Y).astype(np.int8)
sum = np.sum(Y_comp)
return sum / Y_comp.shape[0]
def calculate_model_coefficient(self,error_rate,n_classes,epsilon=1e-6):
"""
Calculate the coefficient of base estimator
"""
alpha = self.learning_rate * (np.log((1-error_rate) / (error_rate + epsilon)) +\
np.log(n_classes-1)) #SAMME
return alpha
def calculate_new_weights(self,coef,Y_pred:np.ndarray,Y:np.ndarray,W:np.ndarray):
"""
Calculate new weights
"""
W_new = np.zeros_like(W)
for i,w in enumerate(W):
y_pred = Y_pred[i]
y = Y[i]
param = coef * int(y_pred != y)
w_new = w * np.exp(param)
W_new[i] = w_new
return W_new
def _fit(self,X:np.ndarray,Y:np.ndarray):
sample_size = X.shape[0]
self.n_classes = len(np.unique(Y)) #计算Y的分类数
n_classes = self.n_classes
self.estimators = self.init_estimators() #初始化学习器
self.W = self.init_sample_weights(sample_size) #初始化权重
self.coefs = np.zeros(len(self.estimators)) #初始化模型系数
for i,estimator in enumerate(self.estimators):
W = self.W
estimator.fit(X,Y,sample_weight=W)
error = self.calculate_error_rate(estimator,X,Y,W)
coef = self.calculate_model_coefficient(error,n_classes)
self.coefs[i] = coef
Y_pred = estimator.predict(X)
self.W = self.calculate_new_weights(coef,Y_pred,Y,W)
def _predict(self,X:np.ndarray):
len_X = X.shape[0]
Y_pred = np.zeros(len_X,dtype=np.int32) #初始化分类结果
for i,row in enumerate(X):
x = row.reshape(1,-1)
W = np.zeros(self.n_classes)
for j,estimator in enumerate(self.estimators):
y_pred = estimator.predict(x)
W[y_pred] += self.coefs[j]
Y_pred[i] = np.argmax(W)
return Y_pred
|
py | 1a52d56ef6a0270fa4574265f929a1a51f44af29 | # Generated by Django 3.1.13 on 2021-12-01 13:26
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
("beneficiary", "0017_auto_20211201_0954"),
]
operations = [
migrations.RenameField(
model_name="lab",
old_name="date",
new_name="requested_date",
),
migrations.AddField(
model_name="lab",
name="created",
field=models.DateTimeField(
auto_now_add=True, default=django.utils.timezone.now
),
preserve_default=False,
),
migrations.AddField(
model_name="lab",
name="results",
field=models.TextField(blank=True, null=True, verbose_name="Lab Results"),
),
migrations.AddField(
model_name="lab",
name="results_status",
field=models.CharField(
blank=True, max_length=200, null=True, verbose_name="Lab Results Status"
),
),
migrations.CreateModel(
name="BeneficiaryService",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"provider_comments",
models.TextField(
blank=True, null=True, verbose_name="Extra Details/Comment"
),
),
("interaction_date", models.DateTimeField(blank=True, null=True)),
(
"no_of_days",
models.IntegerField(
blank=True, null=True, verbose_name="No of Days"
),
),
(
"when_to_take",
models.TextField(
blank=True,
max_length=500,
null=True,
verbose_name="When to Take",
),
),
("creeated", models.DateTimeField(blank=True, null=True)),
(
"beneficiary",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="beneficiary.beneficiary",
),
),
(
"facility",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="beneficiary.facility",
),
),
(
"lab",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="beneficiary.lab",
),
),
(
"prescription",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="beneficiary.prescription",
),
),
(
"service",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="beneficiary.service",
),
),
(
"service_provider",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.PROTECT,
to="beneficiary.serviceproviderpersonel",
),
),
],
options={
"verbose_name": "Beneficiary Service",
"verbose_name_plural": "Beneficiary Services",
},
),
]
|
py | 1a52d5a20406be41a11f6001daf32c9d50bd4061 | """Test functions for the sparse.linalg._onenormest module
"""
import numpy as np
from numpy.testing import assert_allclose, assert_equal, assert_
import pytest
import scipy.linalg
import scipy.sparse.linalg
from scipy.sparse.linalg._onenormest import _onenormest_core, _algorithm_2_2
class MatrixProductOperator(scipy.sparse.linalg.LinearOperator):
"""
This is purely for onenormest testing.
"""
def __init__(self, A, B):
if A.ndim != 2 or B.ndim != 2:
raise ValueError('expected ndarrays representing matrices')
if A.shape[1] != B.shape[0]:
raise ValueError('incompatible shapes')
self.A = A
self.B = B
self.ndim = 2
self.shape = (A.shape[0], B.shape[1])
def _matvec(self, x):
return np.dot(self.A, np.dot(self.B, x))
def _rmatvec(self, x):
return np.dot(np.dot(x, self.A), self.B)
def _matmat(self, X):
return np.dot(self.A, np.dot(self.B, X))
@property
def T(self):
return MatrixProductOperator(self.B.T, self.A.T)
class TestOnenormest:
@pytest.mark.xslow
def test_onenormest_table_3_t_2(self):
# This will take multiple seconds if your computer is slow like mine.
# It is stochastic, so the tolerance could be too strict.
np.random.seed(1234)
t = 2
n = 100
itmax = 5
nsamples = 5000
observed = []
expected = []
nmult_list = []
nresample_list = []
for i in range(nsamples):
A = scipy.linalg.inv(np.random.randn(n, n))
est, v, w, nmults, nresamples = _onenormest_core(A, A.T, t, itmax)
observed.append(est)
expected.append(scipy.linalg.norm(A, 1))
nmult_list.append(nmults)
nresample_list.append(nresamples)
observed = np.array(observed, dtype=float)
expected = np.array(expected, dtype=float)
relative_errors = np.abs(observed - expected) / expected
# check the mean underestimation ratio
underestimation_ratio = observed / expected
assert_(0.99 < np.mean(underestimation_ratio) < 1.0)
# check the max and mean required column resamples
assert_equal(np.max(nresample_list), 2)
assert_(0.05 < np.mean(nresample_list) < 0.2)
# check the proportion of norms computed exactly correctly
nexact = np.count_nonzero(relative_errors < 1e-14)
proportion_exact = nexact / float(nsamples)
assert_(0.9 < proportion_exact < 0.95)
# check the average number of matrix*vector multiplications
assert_(3.5 < np.mean(nmult_list) < 4.5)
@pytest.mark.xslow
def test_onenormest_table_4_t_7(self):
# This will take multiple seconds if your computer is slow like mine.
# It is stochastic, so the tolerance could be too strict.
np.random.seed(1234)
t = 7
n = 100
itmax = 5
nsamples = 5000
observed = []
expected = []
nmult_list = []
nresample_list = []
for i in range(nsamples):
A = np.random.randint(-1, 2, size=(n, n))
est, v, w, nmults, nresamples = _onenormest_core(A, A.T, t, itmax)
observed.append(est)
expected.append(scipy.linalg.norm(A, 1))
nmult_list.append(nmults)
nresample_list.append(nresamples)
observed = np.array(observed, dtype=float)
expected = np.array(expected, dtype=float)
relative_errors = np.abs(observed - expected) / expected
# check the mean underestimation ratio
underestimation_ratio = observed / expected
assert_(0.90 < np.mean(underestimation_ratio) < 0.99)
# check the required column resamples
assert_equal(np.max(nresample_list), 0)
# check the proportion of norms computed exactly correctly
nexact = np.count_nonzero(relative_errors < 1e-14)
proportion_exact = nexact / float(nsamples)
assert_(0.15 < proportion_exact < 0.25)
# check the average number of matrix*vector multiplications
assert_(3.5 < np.mean(nmult_list) < 4.5)
def test_onenormest_table_5_t_1(self):
# "note that there is no randomness and hence only one estimate for t=1"
t = 1
n = 100
itmax = 5
alpha = 1 - 1e-6
A = -scipy.linalg.inv(np.identity(n) + alpha*np.eye(n, k=1))
first_col = np.array([1] + [0]*(n-1))
first_row = np.array([(-alpha)**i for i in range(n)])
B = -scipy.linalg.toeplitz(first_col, first_row)
assert_allclose(A, B)
est, v, w, nmults, nresamples = _onenormest_core(B, B.T, t, itmax)
exact_value = scipy.linalg.norm(B, 1)
underest_ratio = est / exact_value
assert_allclose(underest_ratio, 0.05, rtol=1e-4)
assert_equal(nmults, 11)
assert_equal(nresamples, 0)
# check the non-underscored version of onenormest
est_plain = scipy.sparse.linalg.onenormest(B, t=t, itmax=itmax)
assert_allclose(est, est_plain)
@pytest.mark.xslow
def test_onenormest_table_6_t_1(self):
#TODO this test seems to give estimates that match the table,
#TODO even though no attempt has been made to deal with
#TODO complex numbers in the one-norm estimation.
# This will take multiple seconds if your computer is slow like mine.
# It is stochastic, so the tolerance could be too strict.
np.random.seed(1234)
t = 1
n = 100
itmax = 5
nsamples = 5000
observed = []
expected = []
nmult_list = []
nresample_list = []
for i in range(nsamples):
A_inv = np.random.rand(n, n) + 1j * np.random.rand(n, n)
A = scipy.linalg.inv(A_inv)
est, v, w, nmults, nresamples = _onenormest_core(A, A.T, t, itmax)
observed.append(est)
expected.append(scipy.linalg.norm(A, 1))
nmult_list.append(nmults)
nresample_list.append(nresamples)
observed = np.array(observed, dtype=float)
expected = np.array(expected, dtype=float)
relative_errors = np.abs(observed - expected) / expected
# check the mean underestimation ratio
underestimation_ratio = observed / expected
underestimation_ratio_mean = np.mean(underestimation_ratio)
assert_(0.90 < underestimation_ratio_mean < 0.99)
# check the required column resamples
max_nresamples = np.max(nresample_list)
assert_equal(max_nresamples, 0)
# check the proportion of norms computed exactly correctly
nexact = np.count_nonzero(relative_errors < 1e-14)
proportion_exact = nexact / float(nsamples)
assert_(0.7 < proportion_exact < 0.8)
# check the average number of matrix*vector multiplications
mean_nmult = np.mean(nmult_list)
assert_(4 < mean_nmult < 5)
def _help_product_norm_slow(self, A, B):
# for profiling
C = np.dot(A, B)
return scipy.linalg.norm(C, 1)
def _help_product_norm_fast(self, A, B):
# for profiling
t = 2
itmax = 5
D = MatrixProductOperator(A, B)
est, v, w, nmults, nresamples = _onenormest_core(D, D.T, t, itmax)
return est
@pytest.mark.slow
def test_onenormest_linear_operator(self):
# Define a matrix through its product A B.
# Depending on the shapes of A and B,
# it could be easy to multiply this product by a small matrix,
# but it could be annoying to look at all of
# the entries of the product explicitly.
np.random.seed(1234)
n = 6000
k = 3
A = np.random.randn(n, k)
B = np.random.randn(k, n)
fast_estimate = self._help_product_norm_fast(A, B)
exact_value = self._help_product_norm_slow(A, B)
assert_(fast_estimate <= exact_value <= 3*fast_estimate,
'fast: %g\nexact:%g' % (fast_estimate, exact_value))
def test_returns(self):
np.random.seed(1234)
A = scipy.sparse.rand(50, 50, 0.1)
s0 = scipy.linalg.norm(A.todense(), 1)
s1, v = scipy.sparse.linalg.onenormest(A, compute_v=True)
s2, w = scipy.sparse.linalg.onenormest(A, compute_w=True)
s3, v2, w2 = scipy.sparse.linalg.onenormest(A, compute_w=True, compute_v=True)
assert_allclose(s1, s0, rtol=1e-9)
assert_allclose(np.linalg.norm(A.dot(v), 1), s0*np.linalg.norm(v, 1), rtol=1e-9)
assert_allclose(A.dot(v), w, rtol=1e-9)
class TestAlgorithm_2_2:
def test_randn_inv(self):
np.random.seed(1234)
n = 20
nsamples = 100
for i in range(nsamples):
# Choose integer t uniformly between 1 and 3 inclusive.
t = np.random.randint(1, 4)
# Choose n uniformly between 10 and 40 inclusive.
n = np.random.randint(10, 41)
# Sample the inverse of a matrix with random normal entries.
A = scipy.linalg.inv(np.random.randn(n, n))
# Compute the 1-norm bounds.
g, ind = _algorithm_2_2(A, A.T, t)
|
py | 1a52d5a86eac0256b67d795d9e2885288f29692e | # -*- encoding: utf-8 -*-
#
# Author: John Tran <[email protected]>
# Julien Danjou <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import distutils.version as dist_version
import os
import migrate
from migrate.versioning import util as migrate_util
import sqlalchemy
from ceilometer.openstack.common import log
INIT_VERSION = 1
LOG = log.getLogger(__name__)
@migrate_util.decorator
def patched_with_engine(f, *a, **kw):
url = a[0]
engine = migrate_util.construct_engine(url, **kw)
try:
kw['engine'] = engine
return f(*a, **kw)
finally:
if isinstance(engine, migrate_util.Engine) and engine is not url:
migrate_util.log.debug('Disposing SQLAlchemy engine %s', engine)
engine.dispose()
# TODO(jkoelker) When migrate 0.7.3 is released and nova depends
# on that version or higher, this can be removed
MIN_PKG_VERSION = dist_version.StrictVersion('0.7.3')
if (not hasattr(migrate, '__version__')
or dist_version.StrictVersion(migrate.__version__) < MIN_PKG_VERSION):
migrate_util.with_engine = patched_with_engine
# NOTE(jkoelker) Delay importing migrate until we are patched
from migrate import exceptions as versioning_exceptions
from migrate.versioning import api as versioning_api
from migrate.versioning.repository import Repository
_REPOSITORY = None
def db_sync(engine, version=None):
if version is not None:
try:
version = int(version)
except ValueError:
raise Exception(_("version should be an integer"))
current_version = db_version(engine)
repository = _find_migrate_repo()
if version is None or version > current_version:
return versioning_api.upgrade(engine, repository, version)
else:
return versioning_api.downgrade(engine, repository,
version)
def db_version(engine):
repository = _find_migrate_repo()
try:
return versioning_api.db_version(engine,
repository)
except versioning_exceptions.DatabaseNotControlledError:
meta = sqlalchemy.MetaData()
meta.reflect(bind=engine)
tables = meta.tables
if len(tables) == 0:
db_version_control(engine, 0)
return versioning_api.db_version(engine, repository)
def db_version_control(engine, version=None):
repository = _find_migrate_repo()
versioning_api.version_control(engine, repository, version)
return version
def _find_migrate_repo():
"""Get the path for the migrate repository."""
global _REPOSITORY
path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'migrate_repo')
assert os.path.exists(path)
if _REPOSITORY is None:
_REPOSITORY = Repository(path)
return _REPOSITORY
|
py | 1a52d64f3906bd1f32332b161c684bced6898a5c | from django.db.models.signals import post_migrate
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import Permission
def add_view_permissions(sender, **kwargs):
"""
This syncdb hooks takes care of adding a view permission too all our
content types.
"""
# for each of our content types
for content_type in ContentType.objects.all():
# build our permission slug
if not content_type.model:
continue
codename = "view_%s" % content_type.model
# if it doesn't exist..
if not Permission.objects.filter(content_type=content_type,
codename=codename):
# add it
Permission.objects.create(content_type=content_type,
codename=codename,
name="Can view %s" % content_type.name)
print("Added view permission for %s" % content_type.name)
post_migrate.connect(add_view_permissions)
|
py | 1a52d6c64db131b5632b860a5f1b0bccc5712a30 |
# file: sim.py
# author: Jingyi Wang
# created: 10/13/2017
# modified: 10/13/2017
# input: m, n, lambda, mu, outdir
# output: u.tsv, c.tsv, t.dot, sample.vcf
##########
# Import #
##########
from __future__ import division
import sys
import os
import argparse
import vcf
import itertools
import random
import operator
import datetime
import shutil
import numpy as np
import graphviz as gv
import chrm_prof as chpr
import gene_prof as gnpr
sys.path.insert(0, 'helper/')
import combine_copy_nums as ccn
#############
# Functions #
#############
def printnow(s, newline = True):
s = str(s)
if newline:
s += '\n'
sys.stdout.write(s)
sys.stdout.flush()
def main(argv):
args = get_args(argv)
# input arguments
m = args['m']
n = args['n']
num_mutes = args['num_mutes']
directory = os.path.dirname(os.path.realpath(__file__))
# for num_mutes in [10, 50, 100]:
# for m in [1, 3, 5, 10]:
# for n in [2, 3, 4, 5]:
# print 'n:', n, 'm:', m, 'num_mutes:', num_mutes
size_mutes = args['size_mutes']
metaFile = args['meta_file']
output_folder = args['output_folder']
constants_dict = dict()
constants_dict['mut_types'] = ['amp', 'rem', 'inv']
constants_dict['exp_mut_size'] = size_mutes # default exp_mut_size is 5745000
constants_dict['exp_mut_count'] = num_mutes / ( 2 * n - 2)
constants_dict['cov'] = 20
constants_dict['read_len'] = 300
constants_dict['num_patients'] = 5
# remove chrom_dict later
chrom_dict = dict()
chrom_dict[('1', 0)] = chpr.ChrmProf(248956422)
chrom_dict[('1', 1)] = chpr.ChrmProf(248956422)
chrom_dict[('2', 0)] = chpr.ChrmProf(242193529)
chrom_dict[('2', 1)] = chpr.ChrmProf(242193529)
chrom_dict[('3', 0)] = chpr.ChrmProf(198295559)
chrom_dict[('3', 1)] = chpr.ChrmProf(198295559)
# sub_folder_name = 'n_' + str(n) + '_m_' + str(m) + '_l_' + str(num_mutes)
for patient_idx in range(1, 1 + constants_dict['num_patients']):
patient_folder_name = 'patient' + str(patient_idx)
# outputFolder = directory + '/sim_data' + '/' + sub_folder_name + '/' + patient_folder_name
outputFolder = output_folder + '/' + patient_folder_name
# clean up existing files under outputFolder
if os.path.exists(outputFolder):
shutil.rmtree(outputFolder)
os.makedirs(outputFolder)
l = random_get_tree(n) # list
edge_list = get_edges(l)
gp = gnpr.GeneProf(chrom_dict, constants_dict)
t = Tree(edge_list, gp)
geneprof_list = list()
t.add_mutations_along_edges(t.rootNode, geneprof_list)
generate_t(t, 'T.dot', outputFolder)
U = random_get_usages(m, 2 * n - 1)
l, sv_cn_idx_dict = get_bp_copy_num_idx_dict(t, n, constants_dict)
r, seg_cn_idx_dict, seg_bgn_idx_dict, seg_end_idx_dict = get_seg_copy_num_idx_dict(t, n)
C = generate_c(t, n, constants_dict)
c_p, c_m = generate_seg_cp_paternal(t, n)
F = generate_f(U, C)
a, h, mate_dict = get_a_h_mate_dict(t, n, constants_dict)
output_tsv(U, '/U.tsv', outputFolder)
output_tsv(C, '/C.tsv', outputFolder)
output_tsv(F, '/F.tsv', outputFolder)
generate_s(metaFile, t, l, sv_cn_idx_dict, r, seg_cn_idx_dict, seg_bgn_idx_dict, seg_end_idx_dict, F, U, C, c_p, c_m, a, h, mate_dict, outputFolder)
# given a number n, generate all possible directed binary trees with n nodes.
# eg. if n = 4, return [ [1,[1,[1,1]]], [1,[[1,1],1]], [[1,1],[1,1]], [[1,[1,1]],1], [[[1,1],1],1] ]
# each 1 in the list represents a tree node.
def all_possible_trees(n):
l = list()
for tree in all_possible_trees_helper(n):
l.append(tree)
return l
def all_possible_trees_helper(n):
# base case
if n == 1:
yield 1
# recursive case
for i in range(1, n):
left_list = all_possible_trees_helper(i)
right_list = all_possible_trees_helper(n - i)
for left, right in itertools.product(left_list, right_list):
yield [left, right]
# input n (number of leaves), output a random tree represented by a list.
# tree node is represented by 1.
def random_get_tree(n):
l = all_possible_trees(n)
idx = random.randint(0, len(l) - 1)
return l[idx]
# input a tree (list), return number of leaves
def get_number_of_leaves(l):
if l[0] == 1 and l[1] == 1:
return 2
elif l[0] == 1 and l[1] != 1:
return 1 + get_number_of_leaves(l[1])
elif [0] != 1 and l[1] == 1:
return 1 + get_number_of_leaves(l[0])
else:
return get_number_of_leaves(l[0]) + get_number_of_leaves(l[1])
# given a tree (list), return list of edges(tuple). tuple format: (id, parent_id, l/r)
# l/r: left or right child
def get_edges(l):
n = get_number_of_leaves(l)
p = 2 * n - 1 # idx of root node
leaf_list = list(range(1, n + 1)) # indices of leaf nodes
result = list()
get_edges_helper(l, n, leaf_list, p, result)
return result
def get_edges_helper(l, n, leaf_list, p, result):
left, right = l[0], l[1]
if left == 1 and right == 1:
result.append((leaf_list[0], p, 'l'))
result.append((leaf_list[1], p, 'r'))
elif left == 1 and right != 1:
result.append((leaf_list[0], p, 'l'))
result.append((p - 1, p, 'r'))
get_edges_helper(l[1], n, leaf_list[1:], p - 1, result)
elif right == 1 and left != 1:
result.append((leaf_list[-1], p, 'r'))
result.append((p-1, p, 'l'))
get_edges_helper(l[0], n-1, leaf_list[:-1], p - 1, result)
else:
n_left = get_number_of_leaves(l[0])
leaf_list_left = list(range(1, n_left + 1))
leaf_list_right = list(range(n_left + 1, n + 1))
result.append((p - 2, p, 'l')) # left
result.append((p - 1, p, 'r')) # right
get_edges_helper(l[0], n_left, leaf_list_left, p - 2, result) # left
get_edges_helper(l[1], n, leaf_list_right, p - 1, result) # right
return result
# pwd: os.path.dirname(os.path.realpath(__file__))
# generate a dot file to save the random tree with n nodes and random mutations
def generate_t(tree, filename, directory):
dot = gv.Digraph()
nodes = tree.node_list
edges = get_graph_edges(tree.edge_list)
add_nodes(dot, nodes)
add_edges(dot, edges)
# print dot.source
dot.save(filename, directory)
return
def add_nodes(graph, nodes):
for n in nodes:
if isinstance(n, tuple):
graph.node(str(n[0]), **n[1])
else:
graph.node(str(n))
return graph
def add_edges(graph, edges):
for e in edges:
if isinstance(e[0], tuple):
graph.edge(*e[0], **e[1])
else:
graph.edge(*e)
return graph
# edge_list: list of (node_id (int), parent_node_id (int)) tuples.
# return list of (from (str), to (str)) tuples.
def get_graph_edges(edge_list):
result = list()
for(node_id, parent_node_id, lr) in edge_list:
result.append((str(parent_node_id), str(node_id)))
return result
# return a 1 by n np array sum to one
def get_usage_for_one_patient(n):
a = np.random.dirichlet(np.ones(n),size = 1)
return a
# return m by n np array, each row sum to one
def random_get_usages(m, n):
a = get_usage_for_one_patient(n)
for i in range(1, m):
b = get_usage_for_one_patient(n)
a = np.concatenate((a, b), axis = 0)
return a
# input a tree (Tree object) and n (number of leaf nodes)
# output a l (number of bps) and a dictionary
# key: chrom
# val: dictionary
# key: (pos, isLeft) tuple. When two bps have the same pos, the one with isLeft == True comes first.
# val: index of the bp
def get_bp_copy_num_idx_dict(tree, n, constants_dict):
# put all bps in leaf nodes into dictionary d1
d1 = dict()
for idx in range(1, n + 1):
temp_bp_dict = tree.idx_node_dict[idx].geneProf.get_sv_read_nums_dict(constants_dict['cov'], constants_dict['read_len'])
for chrom in temp_bp_dict.keys():
if chrom not in d1:
d1[chrom] = set()
for (pos, isLeft) in temp_bp_dict[chrom]:
if (pos, isLeft) not in d1[chrom]:
d1[chrom].add((pos, isLeft))
# sort vals in d1 based on pos and isLeft (isLeft == True comes first) and chrom
idx = 0
sorted_chrom = sorted(d1.keys())
d2 = dict()
for chrom in sorted_chrom:
d2[chrom] = dict()
sorted_pos_list = sorted(list(set(map(operator.itemgetter(0), d1[chrom]))))
for pos in sorted_pos_list:
if (pos, True) in d1[chrom]:
d2[chrom][(pos, True)] = idx
idx += 1
if (pos, False) in d1[chrom]:
d2[chrom][(pos, False)] = idx
idx += 1
return idx, d2
# input a tree (Tree object) and n (number of leaf nodes)
# output a r and a dictionary
# key: chrom
# val: dictionary
# key: (bgn, end) tuple
# val: index
def get_seg_copy_num_idx_dict(tree, n):
d1 = dict()
for idx in range(1, n + 1):
temp_copy_nums_dict = tree.idx_node_dict[idx].geneProf.get_copy_nums_dict()
for chrom in temp_copy_nums_dict.keys():
if chrom not in d1:
d1[chrom] = list()
(bgns, ends, cps) = temp_copy_nums_dict[chrom]
d1[chrom].append([bgns, ends, cps])
idx = 0
sorted_chrom = sorted(d1.keys())
d2 = dict()
bgn2idx = dict()
end2idx = dict()
for chrom in sorted_chrom:
d2[chrom] = dict()
# random generate usages to match the input format
usages = [float(1/len(d1[chrom]))] * len(d1[chrom])
[res_bgns, res_ends, res_cps] = ccn.combine_copy_nums(d1[chrom], usages)
for i in range(len(res_bgns)):
d2[chrom][(res_bgns[i], res_ends[i])] = idx
bgn2idx[(chrom, res_bgns[i])] = idx
end2idx[(chrom, res_ends[i])] = idx
idx += 1
return idx, d2, bgn2idx, end2idx
def make_2d_list(rows, cols):
result = list()
for r in range(rows):
result.append([0] * cols)
return result
# loop through each node in tree(Tree),
# for each treeNode: use self.get_copy_nums_dict() to get bgns, ends, cps list for each chromosomes
# use self.get_sv_read_nums_dict(cov, read_len) to get bps and their corresponding information for each chromosomes
# output c ((2n-1)*(l+r) matrix)
def generate_c(tree, n, constants_dict):
l, sv_cn_idx_dict = get_bp_copy_num_idx_dict(tree, n, constants_dict)
r, seg_cn_idx_dict, seg_bgn_idx_dict, seg_end_idx_dict = get_seg_copy_num_idx_dict(tree, n)
c = make_2d_list(len(tree.node_list), (l + r))
for idx in tree.node_list:
row = idx - 1
# add copy number for break points
temp_bp_dict = tree.idx_node_dict[idx].geneProf.get_sv_read_nums_dict(constants_dict['cov'], constants_dict['read_len'])
for chrom in temp_bp_dict:
for (pos, isLeft) in temp_bp_dict[chrom]:
cp = temp_bp_dict[chrom][(pos, isLeft)]["copy_num"]
col = sv_cn_idx_dict[chrom][(pos, isLeft)]
c[row][col] = cp
# add copy number for segments
temp_copy_nums_dict = tree.idx_node_dict[idx].geneProf.get_copy_nums_dict()
for chrom in temp_copy_nums_dict:
(bgns, ends, cps) = temp_copy_nums_dict[chrom]
for i in range(len(bgns)):
cp = cps[i]
seg_indices_list = get_indices_for_segment(seg_bgn_idx_dict, seg_end_idx_dict, (chrom, bgns[i]), (chrom, ends[i]))
for j in range(len(seg_indices_list)):
col = seg_indices_list[j] + l
c[row][col] = cp
result = np.array(c)
return result
# given start and end position, output list of segment indices (continuous)
# s = (chrom, bgn_pos), e = (chrom, end_pos)
def get_indices_for_segment(bgn2idx, end2idx, s, e):
result = list()
firstIdx = bgn2idx[s]
lastIdx = end2idx[e]
for i in range(firstIdx, lastIdx + 1):
result.append(i)
return result
# return a ((2n-1) * r) matrix contains paternal chrom copy number for each segment
# and a ((2n-1) * r) matrix contains maternal chrom copy number for each segment
def generate_seg_cp_paternal(tree, n):
r, seg_cn_idx_dict, seg_bgn_idx_dict, seg_end_idx_dict = get_seg_copy_num_idx_dict(tree, n)
c_p = make_2d_list(len(tree.node_list), r)
c_m = make_2d_list(len(tree.node_list), r)
for idx in tree.node_list:
row = idx - 1
temp_chrom_dict = tree.idx_node_dict[idx].geneProf.chrom_dict
# temp_copy_nums_dict = tree.idx_node_dict[idx].geneProf.get_copy_nums_dict()
for chrom in list(filter(lambda x: x[1] == 0, temp_chrom_dict.keys())):
(bgns, ends, cps) = temp_chrom_dict[chrom].get_copy_nums()
for i in range(len(bgns)):
cp = cps[i]
seg_indices_list = get_indices_for_segment(seg_bgn_idx_dict, seg_end_idx_dict, (chrom[0], bgns[i]), (chrom[0], ends[i]))
for col in seg_indices_list:
c_p[row][col] = cp
for chrom in list(filter(lambda x: x[1] == 1, temp_chrom_dict.keys())):
(bgns, ends, cps) = temp_chrom_dict[chrom].get_copy_nums()
for i in range(len(bgns)):
cp = cps[i]
seg_indices_list = get_indices_for_segment(seg_bgn_idx_dict, seg_end_idx_dict, (chrom[0], bgns[i]), (chrom[0], ends[i]))
for col in seg_indices_list:
c_m[row][col] = cp
return c_p, c_m
# given u (m * (2n-1) matrix) and c ((2n-1)*(l+r) matrix), output f (m * (l+r) matrix)
def generate_f(u, c):
return np.dot(u, c)
# return matrix a, matrix h, and dictionary mate_dict
# matrix a: a ((2n-1) * l) matrix contains mated_reads info
# matrix h: a ((2n-1) * l) matrix contains total_reads info
# mate_dict:
# key: (chrom, pos, isLeft)
# val: (mate_chrom, mate_pos, mate_isLeft)
def get_a_h_mate_dict(tree, n, constants_dict):
l, sv_cn_idx_dict = get_bp_copy_num_idx_dict(tree, n, constants_dict)
a, h = make_2d_list(len(tree.node_list), l), make_2d_list(len(tree.node_list), l)
mate_dict = {}
for node_name in tree.node_list:
gene_prof = tree.idx_node_dict[node_name].geneProf
sv_dict = gene_prof.get_sv_read_nums_dict(constants_dict['cov'], constants_dict['read_len'])
for chrm in sv_dict.keys():
for cur_pos, cur_is_left in sv_dict[chrm]:
mat_pos, mat_is_left = sv_dict[chrm][(cur_pos, cur_is_left)]['mate']
cur_key, mat_key = (chrm, cur_pos, cur_is_left), (chrm, mat_pos, mat_is_left)
if cur_key not in mate_dict:
mate_dict[cur_key] = mat_key
elif mate_dict[cur_key] != mat_key:
print 'There was an error generating SVs. Rerun sim.py until there are no errors.'
print 'cur_key:\t' + str(cur_key)
print 'mat_key:\t' + str(mat_key)
print 'cur_key was already mated with:\t' + str(mate_dict[cur_key])
exit()
if mat_key not in mate_dict:
mate_dict[mat_key] = cur_key
elif mate_dict[mat_key] != cur_key:
print 'There was an error generating SVs. Rerun sim.py until there are no errors.'
print 'cur_key:\t' + str(cur_key)
print 'mat_key:\t' + str(mat_key)
print 'mat_key was already mated with:\t' + str(mate_dict[mat_key])
exit()
j = sv_cn_idx_dict[chrm][(cur_pos, cur_is_left)]
a[node_name - 1][j] = sv_dict[chrm][(cur_pos, cur_is_left)]['mated_reads']
h[node_name - 1][j] = sv_dict[chrm][(cur_pos, cur_is_left)]['total_reads']
return a, h, mate_dict
# given a matrix, save as tsv file
def output_tsv(mtx, output_file, output_folder):
with open(output_folder + output_file, "w") as f:
f.write("\n".join("\t".join(map(str, x)) for x in mtx))
# given cnv_idx(int) and r(int), output rec_id(str)
# eg. given rec_idx = 1, r = 12, output 'cnv01'
def get_cnv_rec_id(cnv_idx, r):
return 'cnv' + str(cnv_idx).zfill(len(str(r)))
# given sv_idx(int) and r(int), output rec_id(str)
def get_sv_rec_id(sv_idx, l):
return 'sv' + str(sv_idx).zfill(len(str(l)))
# a, h, mate_dict = get_a_h_mate_dict(t, n, constants_dict)
# generate a vcf file for each sample
def generate_s(metaFile, tree, l, sv_cn_idx_dict, r, seg_cn_idx_dict, seg_bgn_idx_dict, seg_end_idx_dict, F, U, C, c_p, c_m, a, h, mate_dict, outputFolder):
vcf_reader = vcf.Reader(open(metaFile, 'r'))
vcf_reader.metadata['filedate'][0] = datetime.datetime.now().date().strftime('%Y%m%d') # set date to current date
f_p = np.dot(U, c_p)
f_m = np.dot(U, c_m)
mixed_a = np.dot(U, a) # m * l
mixed_h = np.dot(U, h) # m * l
for i in range(len(U)):
sample_idx = i + 1
temp_file = outputFolder + '/sample' + str(sample_idx) + '.vcf'
temp_writer = vcf.Writer(open(temp_file, 'w'), vcf_reader)
alt_type, gt_cnv = 'CNV', '1|1' # constants for all cnv records
for chrom in sorted(seg_cn_idx_dict.keys()):
for (key, val) in sorted(seg_cn_idx_dict[chrom].items(), key = lambda x: x[1]):
pos = key[0]
rec_id = get_cnv_rec_id(val, r)
info_end = key[1]
cn = [f_p[i][val], f_m[i][val]]
temp_writer.write_record(generate_cnv(chrom, pos, rec_id, alt_type, info_end, gt_cnv, cn))
alt_ori, alt_cS, alt_wMA, gt_sv = True, str(), True, '1|0' # constants for all sv records
for chrom in sorted(sv_cn_idx_dict.keys()):
for (key, val) in sorted(sv_cn_idx_dict[chrom].items(), key = lambda x: x[1]):
pos, isLeft = key[0], key[1]
rec_id = get_sv_rec_id(val, l)
(mate_chrom, mate_pos, mate_isLeft) = mate_dict[(chrom, pos, isLeft)]
mate_id = sv_cn_idx_dict[mate_chrom][(mate_pos, mate_isLeft)]
alt_chr, alt_pos = mate_chrom, mate_pos
cnadj = F[i][val]
bdp, dp = int(round(mixed_a[i][val])), int(round(mixed_h[i][val]))
info_mateid = get_sv_rec_id(mate_id, l)
alt_rO = False if mate_isLeft == True else True
temp_writer.write_record(generate_sv(chrom, pos, rec_id, alt_chr, alt_pos, alt_ori, alt_rO, alt_cS, alt_wMA, info_mateid, gt_sv, cnadj, bdp, dp))
# chrom(str), pos(int), rec_id(str), ref(str), qual = None, filter(list), fmt = 'GT:CNADJ', sample = ['TUMOR', 'NORMAL']
# type(alts): list
# type(alts[0]) = class 'vcf.model._Breakend'
# dir(alts[0]): [..., 'chr', 'connectingSequence', 'orientation', 'pos', 'remoteOrientation', 'type', 'withinMainAssembly']
# eg. alts[0] = ]1:149965077]
# 'chr' = str(1), 'connectingSequence' = str(), 'orientation' = True, 'pos' = int(149965077)
# 'remoteOrientation' = False, 'type' = 'BND', 'withinMainAssembly' = True
# info(dict), info['SVTYPE'] = 'BND', info['MATEID'] = mate_sv_rec_id (str)
def generate_sv(chrm, pos, rec_id, alt_chr, alt_pos, alt_ori, alt_rO, alt_cS, alt_wMA, info_mateid, gt, cnadj, bdp, dp):
ref = '.'
alts = list()
alts.append(vcf.parser._Breakend(alt_chr, alt_pos, alt_ori, alt_rO, alt_cS, alt_wMA))
qual = None
filt = list()
info = dict()
info['SVTYPE'] = 'BND'
info['MATEID'] = info_mateid
fmt = 'GT:CNADJ:BDP:DP'
samples = ['TUMOR', 'NORMAL']
calls = [vcf.model._Call(0, 'TUMOR', svCallData(gt,cnadj,bdp,dp)), vcf.model._Call(1, 'NORMAL', svCallData('0|0',0, 0, dp))]
newRec = vcf.model._Record(chrm, pos, rec_id, ref, alts, qual, filt, info, fmt, samples, calls)
return newRec
def generate_cnv(chrm, pos, rec_id, alt_type, info_end, gt, cn):
ref = '.'
alts = list()
alts.append(vcf.model._SV(alt_type))
qual = None
filt = list()
info = dict()
info['IMPRECISE'] = True
info['END'] = info_end
fmt = 'GT:CN'
samples = ['TUMOR', 'NORMAL']
calls = [vcf.model._Call(0, 'TUMOR', cnvCallData(gt, cn)), vcf.model._Call(1, 'NORMAL', cnvCallData('0|0',[1, 1]))]
newRec = vcf.model._Record(chrm, pos, rec_id, ref, alts, qual, filt, info, fmt, samples, calls)
return newRec
def is_cnv_record(rec):
return rec.ID[0:3] == 'cnv'
def is_sv_record(rec):
return rec.ID[0:2] == 'sv'
#########
# Class #
#########
class Tree:
def __init__(self, edge_list, gp):
self.edge_list = edge_list
self.geneProf = gp # without any mutation
self.node_list = self.get_node_list()
self.rootNode, self.idx_node_dict = self.construct_tree()
def get_node_list(self):
l = list()
for (idx, parent_idx, lr) in self.edge_list:
if idx not in l:
l.append(idx)
if parent_idx not in l:
l.append(parent_idx)
return l
# construct the tree structure, return root node (treeNode) and a dictionary
# key: treeNode.index, val: treeNode
def construct_tree(self):
d = dict()
for (idx, parent_idx, lr) in self.edge_list:
# add new node to d
if parent_idx not in d:
parentNode = TreeNode(parent_idx, self.geneProf)
d[parent_idx] = parentNode
if idx not in d:
childNode = TreeNode(idx, self.geneProf)
d[idx] = childNode
# add left or right child
if lr == 'l': # left child
d[parent_idx].left = d[idx]
elif lr == 'r': # right child
d[parent_idx].right = d[idx]
# add parent node
d[idx].parent = d[parent_idx]
for nodeIdx in d:
if d[nodeIdx].parent == None: # root node
rootNode = d[nodeIdx]
return rootNode, d
def print_tree_info(self):
for idx in self.idx_node_dict:
print 'node:', idx
print self.idx_node_dict[idx].geneProf.print_chrm_seq()
# print current node index, parent node index, left child index, and right child index
def print_node_relation(self):
for idx in self.idx_node_dict:
print 'node:', self.idx_node_dict[idx].index
if self.idx_node_dict[idx].parent != None:
print 'parent node:', self.idx_node_dict[idx].parent.index
else:
print 'no parent node!'
if self.idx_node_dict[idx].left != None:
print 'left node:', self.idx_node_dict[idx].left.index
else:
print 'no left child node!'
if self.idx_node_dict[idx].right != None:
print 'right node:', self.idx_node_dict[idx].right.index
else:
print 'no right child node!'
print ""
def get_number_of_leaves(self, l):
if l[0] == 1 and l[1] == 1:
return 2
elif l[0] == 1 and l[1] != 1:
return 1 + get_number_of_leaves(l[1])
elif [0] != 1 and l[1] == 1:
return 1 + get_number_of_leaves(l[0])
else:
return get_number_of_leaves(l[0]) + get_number_of_leaves(l[1])
def print_node_info(self):
for idx in self.idx_node_dict:
print 'node', idx, ':', self.idx_node_dict[idx].geneProf.print_info()
def print_node_gp(self):
for idx in self.idx_node_dict:
print 'node', idx, ':', self.idx_node_dict[idx].geneProf.print_chrm_seq()
def add_mutations_along_edges(self, node, geneprof_list):
if not node:
return
curr_gp = node.geneProf
geneprof_list.append(curr_gp)
# print 'node:', node.index, 'geneprof_list:', geneprof_list
if node.left != None:
curr_gp_copied_left = curr_gp.deepcopy()
# reset copied_node.geneProf.mutCount and copied_node.geneProf.maxCount
curr_gp_copied_left.mutCount, curr_gp_copied_left.maxCount = 0, curr_gp_copied_left.get_mut_count()
curr_gp_copied_left.multi_mutations(geneprof_list)
node.left.geneProf = curr_gp_copied_left
self.add_mutations_along_edges(node.left, geneprof_list)
if node.right != None:
curr_gp_copied_right = curr_gp.deepcopy()
# reset copied_node.geneProf.mutCount and copied_node.geneProf.maxCount
curr_gp_copied_right.mutCount, curr_gp_copied_right.maxCount = 0, curr_gp_copied_right.get_mut_count()
curr_gp_copied_right.multi_mutations(geneprof_list)
node.right.geneProf = curr_gp_copied_right
self.add_mutations_along_edges(node.right, geneprof_list)
return
class TreeNode:
def __init__(self, index, gp):
self.index = index
self.geneProf = gp
self.left = None
self.right = None
self.parent = None
class svCallData:
def __init__(self, gt = '0|0', cnadj = '0', bdp = '0', dp = '100'):
self.GT = gt
self.CNADJ = str(cnadj)
self.BDP = str(bdp)
self.DP = str(dp)
self.__getitem__ = self
def __call__(self, var):
return [self.CNADJ, self.BDP, self.DP]
class cnvCallData:
def __init__(self, gt = '0|0', cns = [1, 1]):
self.GT = gt
self.CN = cns
self.__getitem__ = self
def __call__(self, var):
return [','.join(str(x) for x in self.CN)]
###########################################
##### COMMAND LINE ARGUMENT FUNCTIONS #####
###########################################
def get_args(argv):
parser = argparse.ArgumentParser(prog = 'sim.py', description = "generate U.tsv, C.tsv, T.dot, sample.vcf")
parser.add_argument('-f', '--metadata_file', type = str, dest = "meta_file", required = True)
parser.add_argument('-m', '--num_samples', type = int, dest = "m", required = True)
parser.add_argument('-n', '--num_leaves', type = int, dest = "n", required = True)
parser.add_argument('-c', '--total_number_of_mutations', type = int, dest = "num_mutes", required = True)
parser.add_argument('-s', '--expect_mut_len', type = int, dest = "size_mutes", required = True)
parser.add_argument('-o', '--output_folder', type = str, dest = "output_folder", required = True)
return vars(parser.parse_args(argv))
##############################
##### CALL MAIN FUNCTION #####
##############################
if __name__ == "__main__":
main(sys.argv[1:]) |
py | 1a52d720f33ab602533348010a6dd2a91dc8e64b | from __future__ import absolute_import, unicode_literals
from tasks import app as celery_app
__all__ = ['celery_app'] |
py | 1a52d763629b8c0dabacd02d7f26968b448d0c6b | """Macaw colormap.
This is self-defined colormap similar to viridis generated with viscm.
BSD 3-Clause License
Copyright (c) 2020-2021, Daniel Nagel
All rights reserved.
"""
# ~~~ IMPORT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
from matplotlib import colors as clr
# ~~~ CMAP ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
CM_MACAW_DATA = [
[0.19058156, 0.05187944, 0.50289419],
[0.18954119, 0.05806460, 0.50959124],
[0.18847439, 0.06392324, 0.51625442],
[0.18730591, 0.06950876, 0.52298851],
[0.18602844, 0.07486335, 0.52979742],
[0.18462615, 0.08001914, 0.53669617],
[0.18310049, 0.08500270, 0.54367455],
[0.18143568, 0.08983444, 0.55074552],
[0.17962612, 0.09453176, 0.55790684],
[0.17765937, 0.09910869, 0.56516442],
[0.17553274, 0.10357790, 0.57251009],
[0.17322480, 0.10794869, 0.57995868],
[0.17073274, 0.11223064, 0.58749950],
[0.16803817, 0.11643087, 0.59513984],
[0.16512046, 0.12055543, 0.60288710],
[0.16196874, 0.12461064, 0.61073458],
[0.15855956, 0.12860117, 0.61868760],
[0.15487150, 0.13253595, 0.62673743],
[0.15083313, 0.13656790, 0.63463945],
[0.14631781, 0.14075836, 0.64234417],
[0.14125338, 0.14513510, 0.64979801],
[0.13554961, 0.14972929, 0.65693645],
[0.12909038, 0.15457590, 0.66368238],
[0.12175680, 0.15973889, 0.66986869],
[0.11335556, 0.16526020, 0.67537829],
[0.10368472, 0.17121161, 0.67997378],
[0.09249210, 0.17766098, 0.68337230],
[0.07951637, 0.18464906, 0.68525372],
[0.06463115, 0.19217895, 0.68522460],
[0.04792058, 0.20011790, 0.68310082],
[0.03049412, 0.20821928, 0.67897102],
[0.01673951, 0.21619067, 0.67327099],
[0.00758631, 0.22382123, 0.66652851],
[0.00269288, 0.23100257, 0.65925719],
[0.00158900, 0.23772009, 0.65176198],
[0.00373727, 0.24400003, 0.64423947],
[0.00863564, 0.24987935, 0.63682943],
[0.01592935, 0.25540460, 0.62957975],
[0.02526202, 0.26061394, 0.62255010],
[0.03644893, 0.26554722, 0.61572782],
[0.04808908, 0.27026523, 0.60918586],
[0.05799278, 0.27486365, 0.60295340],
[0.06662470, 0.27935889, 0.59700827],
[0.07427470, 0.28376252, 0.59134582],
[0.08112675, 0.28808472, 0.58596186],
[0.08730584, 0.29233460, 0.58085244],
[0.09290131, 0.29652027, 0.57601351],
[0.09797941, 0.30064901, 0.57144114],
[0.10258160, 0.30472823, 0.56713256],
[0.10675656, 0.30876306, 0.56308197],
[0.11053742, 0.31275862, 0.55928464],
[0.11394960, 0.31671971, 0.55573576],
[0.11701374, 0.32065082, 0.55242977],
[0.11974671, 0.32455583, 0.54936183],
[0.12216208, 0.32843854, 0.54652563],
[0.12427086, 0.33230225, 0.54391559],
[0.12608149, 0.33615017, 0.54152519],
[0.12760054, 0.33998509, 0.53934850],
[0.12883214, 0.34380985, 0.53737833],
[0.12977917, 0.34762677, 0.53560881],
[0.13044216, 0.35143829, 0.53403281],
[0.13081987, 0.35524663, 0.53264340],
[0.13090933, 0.35905383, 0.53143386],
[0.13069151, 0.36286329, 0.53039973],
[0.13017051, 0.36667556, 0.52953264],
[0.12933735, 0.37049236, 0.52882549],
[0.12818021, 0.37431527, 0.52827171],
[0.12668440, 0.37814575, 0.52786479],
[0.12480372, 0.38198787, 0.52760245],
[0.12253981, 0.38584057, 0.52747446],
[0.11986539, 0.38970506, 0.52747450],
[0.11672338, 0.39358453, 0.52759944],
[0.11307467, 0.39747956, 0.52784292],
[0.10925654, 0.40136808, 0.52809131],
[0.10577722, 0.40522092, 0.52820478],
[0.10265366, 0.40903876, 0.52819928],
[0.09991901, 0.41282127, 0.52808615],
[0.09772109, 0.41656205, 0.52784963],
[0.09601252, 0.42026534, 0.52751928],
[0.09486795, 0.42392856, 0.52709424],
[0.09434407, 0.42754991, 0.52657606],
[0.09438345, 0.43113307, 0.52598810],
[0.09509197, 0.43467296, 0.52531564],
[0.09635834, 0.43817505, 0.52458762],
[0.09822100, 0.44163693, 0.52379717],
[0.10061058, 0.44506123, 0.52295921],
[0.10350092, 0.44844829, 0.52207617],
[0.10683689, 0.45179979, 0.52115587],
[0.11057809, 0.45511651, 0.52020322],
[0.11465448, 0.45840131, 0.51922642],
[0.11905762, 0.46165331, 0.51822257],
[0.12367500, 0.46487844, 0.51721006],
[0.12856585, 0.46807181, 0.51617148],
[0.13360141, 0.47124106, 0.51513069],
[0.13878242, 0.47438574, 0.51408241],
[0.14410420, 0.47750565, 0.51302374],
[0.14949549, 0.48060578, 0.51196760],
[0.15495525, 0.48368624, 0.51091074],
[0.16049172, 0.48674623, 0.50984867],
[0.16604880, 0.48979059, 0.50879143],
[0.17161672, 0.49282052, 0.50773901],
[0.17720503, 0.49583556, 0.50668647],
[0.18280577, 0.49883682, 0.50563324],
[0.18839033, 0.50182730, 0.50458623],
[0.19395551, 0.50480816, 0.50354212],
[0.19949671, 0.50778037, 0.50250156],
[0.20501472, 0.51074450, 0.50146239],
[0.21051968, 0.51370023, 0.50041836],
[0.21598949, 0.51665032, 0.49937626],
[0.22142483, 0.51959567, 0.49833147],
[0.22682313, 0.52253702, 0.49728571],
[0.23218567, 0.52547515, 0.49623434],
[0.23751130, 0.52841072, 0.49517762],
[0.24280071, 0.53134438, 0.49411291],
[0.24805496, 0.53427675, 0.49303727],
[0.25327337, 0.53720832, 0.49195140],
[0.25845821, 0.54013961, 0.49085082],
[0.26361046, 0.54307107, 0.48973375],
[0.26873137, 0.54600306, 0.48859829],
[0.27382187, 0.54893593, 0.48744343],
[0.27888467, 0.55186996, 0.48626493],
[0.28392435, 0.55480540, 0.48505484],
[0.28900262, 0.55773063, 0.48382426],
[0.29413974, 0.56064222, 0.48256931],
[0.29933930, 0.56353921, 0.48128992],
[0.30460443, 0.56642066, 0.47998659],
[0.30993814, 0.56928561, 0.47865969],
[0.31534355, 0.57213311, 0.47730916],
[0.32082389, 0.57496214, 0.47593445],
[0.32638070, 0.57777180, 0.47453762],
[0.33201619, 0.58056111, 0.47311926],
[0.33773289, 0.58332911, 0.47167917],
[0.34353170, 0.58607490, 0.47021948],
[0.34941643, 0.58879691, 0.46874323],
[0.35539061, 0.59149377, 0.46725090],
[0.36144955, 0.59416573, 0.46574422],
[0.36759411, 0.59681194, 0.46422404],
[0.37382414, 0.59943167, 0.46269235],
[0.38013888, 0.60202427, 0.46115158],
[0.38653831, 0.60458903, 0.45960306],
[0.39302085, 0.60712544, 0.45804979],
[0.39958560, 0.60963295, 0.45649371],
[0.40623102, 0.61211111, 0.45493730],
[0.41295537, 0.61455952, 0.45338306],
[0.41975689, 0.61697782, 0.45183335],
[0.42663689, 0.61936474, 0.45029309],
[0.43359518, 0.62171941, 0.44876629],
[0.44062362, 0.62404336, 0.44725157],
[0.44771948, 0.62633657, 0.44575159],
[0.45488058, 0.62859894, 0.44426831],
[0.46210390, 0.63083057, 0.44280444],
[0.46938718, 0.63303146, 0.44136181],
[0.47672758, 0.63520176, 0.43994273],
[0.48412245, 0.63734164, 0.43854929],
[0.49156972, 0.63945118, 0.43718289],
[0.49906579, 0.64153081, 0.43584635],
[0.50660936, 0.64358054, 0.43454029],
[0.51420161, 0.64559928, 0.43327054],
[0.52183597, 0.64758876, 0.43203524],
[0.52951071, 0.64954923, 0.43083420],
[0.53722227, 0.65148137, 0.42966989],
[0.54496971, 0.65338524, 0.42854256],
[0.55275129, 0.65526109, 0.42745314],
[0.56056486, 0.65710934, 0.42640291],
[0.56840818, 0.65893047, 0.42539318],
[0.57628132, 0.66072433, 0.42442350],
[0.58418211, 0.66249141, 0.42349515],
[0.59210876, 0.66423213, 0.42260915],
[0.60005987, 0.66594682, 0.42176622],
[0.60803548, 0.66763539, 0.42096604],
[0.61603390, 0.66929830, 0.42020960],
[0.62405442, 0.67093569, 0.41949718],
[0.63209596, 0.67254786, 0.41882938],
[0.64015622, 0.67413550, 0.41820762],
[0.64823610, 0.67569830, 0.41763113],
[0.65633376, 0.67723686, 0.41710106],
[0.66445362, 0.67874915, 0.41662226],
[0.67259044, 0.68023734, 0.41619149],
[0.68074383, 0.68170156, 0.41580848],
[0.68891283, 0.68314217, 0.41547379],
[0.69709647, 0.68455952, 0.41518798],
[0.70529471, 0.68595364, 0.41495099],
[0.71350597, 0.68732515, 0.41476373],
[0.72173070, 0.68867392, 0.41462583],
[0.73000998, 0.68998466, 0.41450613],
[0.73829277, 0.69128478, 0.41430330],
[0.74659018, 0.69257004, 0.41401107],
[0.75491011, 0.69383731, 0.41362380],
[0.76324925, 0.69508807, 0.41313892],
[0.77160955, 0.69632176, 0.41254970],
[0.77998812, 0.69753979, 0.41185185],
[0.78838979, 0.69874052, 0.41103463],
[0.79681252, 0.69992514, 0.41009100],
[0.80525224, 0.70109576, 0.40901451],
[0.81371833, 0.70224898, 0.40778666],
[0.82218335, 0.70339787, 0.40640285],
[0.83066641, 0.70453472, 0.40484057],
[0.83916828, 0.70565993, 0.40308387],
[0.84768783, 0.70677497, 0.40111375],
[0.85619810, 0.70789456, 0.39890385],
[0.86470812, 0.70901596, 0.39642621],
[0.87321308, 0.71014341, 0.39364913],
[0.88168496, 0.71129398, 0.39052555],
[0.89011144, 0.71247634, 0.38702014],
[0.89847380, 0.71370378, 0.38307790],
[0.90671642, 0.71500913, 0.37864563],
[0.91477514, 0.71643005, 0.37367920],
[0.92257363, 0.71801064, 0.36815123],
[0.92998748, 0.71981955, 0.36207705],
[0.93690366, 0.72191679, 0.35554231],
[0.94320799, 0.72435942, 0.34871695],
[0.94880339, 0.72719287, 0.34182459],
[0.95368361, 0.73041301, 0.33507907],
[0.95791320, 0.73397897, 0.32863186],
[0.96156968, 0.73784557, 0.32255616],
[0.96474147, 0.74196301, 0.31687987],
[0.96751348, 0.74628535, 0.31158078],
[0.96995295, 0.75077649, 0.30662850],
[0.97208994, 0.75542021, 0.30199675],
[0.97401256, 0.76017018, 0.29763355],
[0.97570333, 0.76503552, 0.29352109],
[0.97723918, 0.76997570, 0.28962258],
[0.97860610, 0.77499845, 0.28591054],
[0.97982931, 0.78009040, 0.28236357],
[0.98093791, 0.78523620, 0.27896562],
[0.98193715, 0.79043341, 0.27569295],
[0.98283325, 0.79567880, 0.27252878],
[0.98359296, 0.80099206, 0.26937692],
[0.98434061, 0.80631328, 0.26612485],
[0.98507077, 0.81164585, 0.26275080],
[0.98578487, 0.81698903, 0.25925215],
[0.98648115, 0.82234384, 0.25561899],
[0.98715714, 0.82771170, 0.25183903],
[0.98782455, 0.83308608, 0.24793183],
[0.98847119, 0.83847386, 0.24386181],
[0.98909511, 0.84387611, 0.23961519],
[0.98970390, 0.84928864, 0.23520039],
[0.99029313, 0.85471390, 0.23059595],
[0.99085534, 0.86015592, 0.22577035],
[0.99140774, 0.86560540, 0.22075415],
[0.99192644, 0.87107513, 0.21546858],
[0.99243373, 0.87655320, 0.20995525],
[0.99291025, 0.88204984, 0.20413959],
[0.99336479, 0.88756033, 0.19802097],
[0.99379712, 0.89308477, 0.19156975],
[0.99420035, 0.89862665, 0.18472928],
[0.99457573, 0.90418523, 0.17745945],
[0.99492529, 0.90975938, 0.16971376],
[0.99524338, 0.91535187, 0.16140537],
[0.99552698, 0.92096406, 0.15243487],
[0.99578712, 0.92659034, 0.14273134],
[0.99600800, 0.93223841, 0.13207549],
[0.99619108, 0.93790735, 0.12024698],
[0.99632933, 0.94360034, 0.10686924],
[0.99641293, 0.94932183, 0.09131728],
[0.99632399, 0.95512747, 0.07162400],
]
def _macaw():
return clr.LinearSegmentedColormap.from_list('macaw', CM_MACAW_DATA)
|
py | 1a52d77395208a59a3a115eb3e9f7562f6459615 | # -*- coding: utf-8 -*-
import logging
import subprocess
from imghdr import what as determinetype
from django.core.files.base import ContentFile
from django.core.files.temp import NamedTemporaryFile
from easy_thumbnails.optimize.conf import settings
try:
from subprocess import check_output
except ImportError:
def check_output(*popenargs, **kwargs):
"""
Run command with arguments and return its output as a byte string.
Backported from Python 2.7 as it's implemented as pure python on stdlib.
"""
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
error = subprocess.CalledProcessError(retcode, cmd)
error.output = output
raise error
return output
logger = logging.getLogger('easy_thumbnails.optimize')
def optimize_thumbnail(thumbnail):
'''Optimize thumbnail images by removing unnecessary data'''
try:
optimize_command = settings.THUMBNAIL_OPTIMIZE_COMMAND[determinetype(thumbnail.path)]
if not optimize_command:
return
except (TypeError, KeyError, NotImplementedError):
return
storage = thumbnail.storage
try:
with NamedTemporaryFile() as temp_file:
thumbnail.seek(0)
temp_file.write(thumbnail.read())
temp_file.flush()
optimize_command = optimize_command.format(filename=temp_file.name)
output = check_output(optimize_command, stderr=subprocess.STDOUT, shell=True)
if output:
logger.warn('{0} returned {1}'.format(optimize_command, output))
else:
logger.info('{0} returned nothing'.format(optimize_command))
with open(temp_file.name, 'rb') as f:
thumbnail.file = ContentFile(f.read())
storage.delete(thumbnail.path)
storage.save(thumbnail.path, thumbnail)
except Exception as e:
logger.error(e)
|
py | 1a52d85b2d4e0fa51a57076633c51940d2a9fd3b | # Autogenerated, do not edit. All changes will be undone.
from typing import List
from uuid import UUID
from pyhap.characteristic import (
Characteristic,
CharacteristicPermission,
)
class Logs(Characteristic):
@property
def characteristic_uuid(self) -> UUID:
return UUID('0000001F-0000-1000-8000-0026BB765291')
@property
def characteristic_type(self) -> str:
return 'public.hap.characteristic.logs'
@property
def characteristic_format(self) -> str:
return 'tlv'
@property
def permissions(self) -> List[CharacteristicPermission]:
return [
CharacteristicPermission.pair_read,
CharacteristicPermission.notify,
]
|
py | 1a52d87065cb9173f32a03405fc1d86a5e178e00 | from No import No
from Estado import Estado
from collections import deque
def executaBFS():
"""
Esta função executa a pesquisa BFS usando uma fila
"""
#criar fila
fila = deque([])
#por ser um gráfico, criamos uma lista de visitantes
visitados = []
#criar nó raiz
estadoInicial = Estado()
raiz = No(estadoInicial)
#adicionar à fila e lista de visitados
fila .append(raiz)
visitados.append(raiz.estado.nome)
# verifique se há algo na para retirar da fila (dar o dequeue)
while len(fila) > 0:
#obtem o primeiro item da fila
noAtual = fila.popleft()
print ("-- dequeue --", noAtual.estado.nome)
#verifica se é o estado meta
if noAtual.estado.funcaoObjetivo():
print ("Atingiu o estado objetivo")
#faz o print do caminho
print ("----------------------")
print ("Caminho")
noAtual.printCaminho()
break
#pega os nos filhos
estadosFilhos = noAtual.estado.funcaoSucessora()
for estadoFilho in estadosFilhos:
noFilho = No(Estado(estadoFilho))
#verifica se o no ainda não foi visitado
if noFilho.estado.nome not in visitados:
#coloca na lista de nos visitados
visitados.append(noFilho.estado.nome )
#coloca na arvore e na fila
noAtual.addFilho(noFilho)
fila.append(noFilho)
#print arvore
print ("----------------------")
print ("Arvore")
raiz.printArvore()
executaBFS() |
py | 1a52d89bbb41348dd37b32c86e816eeb94df70b5 | """This module defines the funtions byref_at(cobj, offset)
and cast_field(struct, fieldname, fieldtype).
"""
from ctypes import *
def _calc_offset():
# Internal helper function that calculates where the object
# returned by a byref() call stores the pointer.
# The definition of PyCArgObject in C code (that is the type of
# object that a byref() call returns):
class PyCArgObject(Structure):
class value(Union):
_fields_ = [("c", c_char),
("h", c_short),
("i", c_int),
("l", c_long),
("q", c_longlong),
("d", c_double),
("f", c_float),
("p", c_void_p)]
#
# Thanks to Lenard Lindstrom for this tip:
# sizeof(PyObject_HEAD) is the same as object.__basicsize__.
#
_fields_ = [("PyObject_HEAD", c_byte * object.__basicsize__),
("pffi_type", c_void_p),
("tag", c_char),
("value", value),
("obj", c_void_p),
("size", c_int)]
_anonymous_ = ["value"]
# additional checks to make sure that everything works as expected
if sizeof(PyCArgObject) != type(byref(c_int())).__basicsize__:
raise RuntimeError("sizeof(PyCArgObject) invalid")
obj = c_int()
ref = byref(obj)
argobj = PyCArgObject.from_address(id(ref))
if argobj.obj != id(obj) or \
argobj.p != addressof(obj) or \
argobj.tag != 'P':
raise RuntimeError("PyCArgObject field definitions incorrect")
return PyCArgObject.p.offset # offset of the pointer field
################################################################
#
# byref_at
#
def byref_at(obj, offset,
_byref=byref,
_c_void_p_from_address = c_void_p.from_address,
_byref_pointer_offset = _calc_offset()
):
"""byref_at(cobj, offset) behaves similar this C code:
(((char *)&obj) + offset)
In other words, the returned 'pointer' points to the address of
'cobj' + 'offset'. 'offset' is in units of bytes.
"""
ref = _byref(obj)
# Change the pointer field in the created byref object by adding
# 'offset' to it:
_c_void_p_from_address(id(ref)
+ _byref_pointer_offset).value += offset
return ref
################################################################
#
# cast_field
#
def cast_field(struct, fieldname, fieldtype, offset=0,
_POINTER=POINTER,
_byref_at=byref_at,
_byref=byref,
_divmod=divmod,
_sizeof=sizeof,
):
"""cast_field(struct, fieldname, fieldtype)
Return the contents of a struct field as it it were of type
'fieldtype'.
"""
fieldoffset = getattr(type(struct), fieldname).offset
return cast(_byref_at(struct, fieldoffset),
_POINTER(fieldtype))[0]
__all__ = ["byref_at", "cast_field"]
|
py | 1a52da4de32c96569aa52cc376cbb17b831af2a1 | import torch.nn as nn
class NLayerDiscriminator(nn.Module):
"""
A PatchGAN
"""
def __init__(self, input_nc, ndf=64, n_layers=3,
norm_layer=nn.BatchNorm2d):
super(NLayerDiscriminator, self).__init__()
kwidth = 4
padw = 1
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kwidth,
stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
nf_mult = 1
nf_mult_prev = 1
# gradually increase the number of filters
for i in range(1, n_layers):
nf_mult_prev = nf_mult
nf_mult = min(2 ** i, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
kernel_size=kwidth, stride=2, padding=padw, bias=False),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2 ** n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
kernel_size=kwidth, stride=1, padding=padw, bias=False),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
# output 1 channel prediction map
sequence += [nn.Conv2d(ndf * nf_mult, 1,
kernel_size=kwidth, stride=1, padding=padw)]
self.model = nn.Sequential(*sequence)
def forward(self, input_x):
return self.model(input_x)
|
py | 1a52da598aba93e174058e15f4fe99f95364b27b | import os
src = 'media/frame_avata_adam_gt_fit/train'
dst = 'media/frame_avata_adam_gt_fit_cyaudio/train'
number = |
py | 1a52db82633c01908f5580c46239f1634a0e7fbe | # coding=utf-8
#
# Copyright 2012 keyes.ie
#
# License: http://jkeyes.mit-license.org/
#
""" Impression module.
>>> from python_intercom import Intercom
>>> Intercom.app_id = 'dummy-app-id'
>>> Intercom.api_key = 'dummy-api-key'
>>> from python_intercom import Impression
"""
from . import Intercom
from .user import UserId
class Impression(UserId):
""" An Impression represents an interaction between a User and your
application. """
@classmethod
def create(cls, user_id=None, email=None, user_ip=None, user_agent=None,
location=None):
""" Create an Impression.
>>> Impression.create(email="[email protected]",
... location="/pricing/upgrade",
... user_ip="1.2.3.4",
... user_agent="my-service-iphone-app-1.2")
{u'unread_messages': 1}
"""
resp = Intercom.create_impression(
user_id=user_id, email=email, user_ip=user_ip,
user_agent=user_agent, location=location)
return cls(resp)
def save(self):
""" Create an Impression from this objects properties:
>>> impression = Impression()
>>> impression.email = "[email protected]"
>>> impression.location = "/pricing/upgrade"
>>> impression.user_ip = "1.2.3.4"
>>> impression.user_agent = "my-service-iphone-app-1.2"
>>> impression.save()
>>> impression.unread_messages
1
"""
resp = Intercom.create_impression(**self)
self.update(resp)
@property
def user_ip(self):
""" The IP address where this Impression originated. """
return dict.get(self, 'user_ip', None)
@user_ip.setter
def user_ip(self, user_ip):
""" Set the user IP address. """
self['user_ip'] = user_ip
@property
def location(self):
""" The location where this Impression originated e.g.
/pricing/upgrade or 'DesktopApp: Pricing' or 'iOS'. """
return dict.get(self, 'location', None)
@location.setter
def location(self, location):
""" Set the location. """
self['location'] = location
@property
def user_agent(self):
""" The User Agent that created this Impression e.g. the browser
User Agent, or the name and version of an application. """
return dict.get(self, 'user_agent', None)
@user_agent.setter
def user_agent(self, user_agent):
""" Set the User Agent. """
self['user_agent'] = user_agent
@property
def unread_messages(self):
""" The number of unread messages for the User who made the
Impression for the current location. """
return dict.get(self, 'unread_messages', None)
|
py | 1a52dc250d7614c3f323ce970234fd22a22a22ec | #!/usr/bin/python3
from keras.datasets import mnist
from keras.preprocessing.image import load_img, array_to_img
from keras.utils.np_utils import to_categorical
from keras.models import Sequential
from keras.layers import Dense
import numpy as np
import matplotlib.pyplot as plt
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# Jugando con el DataSet
#
# print(X_train.size) # No se bien que mustra
# print(X_train.shape) # Totan de imagenes, dimencion x, dimencion y
# print(y_train.shape)
# print(X_test.shape)
# print(y_test.shape)
# Mostrando el 5
# print(X_train.shape, ' is ', y_train[0])
# plt.imshow(X_train[0], cmap="gray")
# plt.show(block=True)
# Preprocessing the image data
#
image_height, image_width = 28, 28
# Redimencionar los 60k de ejemplos
X_train = X_train.reshape(60000, image_height * image_width)
# print(X_train.shape) # Totan de imagenes, dimencion en una linea
X_test = X_test.reshape(10000, image_height * image_width)
# print(X_test.shape)
# print(X_train[0])
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255.0
X_test /= 255.0
# print(X_train[0])
# Build a model
#
# Reprecentan 10 categorias
y_train = to_categorical(y_train, 10)
y_test = to_categorical(y_test, 10)
# print(y_train.shape)
# print(y_test.shape)
# Modelo
model = Sequential()
# Modelo con tres capas
# capa 1 con 512 neuronas
# model.add(Dense(512, activation='relu', input_shape=(784,)))
model.add(Dense(512, activation='relu', input_shape=(image_height * image_width,)))
model.add(Dense(512, activation='relu'))
# capa 3 con 10 neuronas y 10 salidas
model.add(Dense(10, activation='softmax'))
# Compile the model
# Creo que categorical_crossentropy es porque usamos clases
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
# Train the model
EPOCHS = 20 # epocas
history = model.fit(X_train, y_train, epochs=EPOCHS, validation_data=(X_test, y_test))
# What is the accuracy of the model?
#
# Plot the accuracy of the training model
plt.plot(history.history['acc'])
# Plot the accuracy of training and validation set
plt.plot(history.history['val_acc'])
# Accuracy of training and validation with loss
plt.plot(history.history['loss'])
plt.show()
# Evaluating the model
score = model.evaluate(X_test, y_test)
print(score)
|
py | 1a52dc51601a35116aee6d656ad33ca1c1841e16 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright (c) 2002-2019 "Neo4j,"
# Neo4j Sweden AB [http://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import SkipTest
from pytest import raises
from neo4j.work.simple import Statement, SessionError
from neo4j.exceptions import CypherError, ClientError, TransientError
from neo4j.graph import Node, Relationship
def test_can_run_simple_statement(session):
result = session.run("RETURN 1 AS n")
for record in result:
assert record[0] == 1
assert record["n"] == 1
with raises(KeyError):
_ = record["x"]
assert record["n"] == 1
with raises(KeyError):
_ = record["x"]
with raises(TypeError):
_ = record[object()]
assert repr(record)
assert len(record) == 1
def test_can_run_simple_statement_with_params(session):
count = 0
for record in session.run("RETURN $x AS n",
{"x": {"abc": ["d", "e", "f"]}}):
assert record[0] == {"abc": ["d", "e", "f"]}
assert record["n"] == {"abc": ["d", "e", "f"]}
assert repr(record)
assert len(record) == 1
count += 1
assert count == 1
def test_autocommit_transactions_use_bookmarks(neo4j_driver):
bookmarks = []
# Generate an initial bookmark
with neo4j_driver.session() as session:
session.run("CREATE ()").consume()
bookmark = session.last_bookmark()
assert bookmark is not None
bookmarks.append(bookmark)
# Propagate into another session
with neo4j_driver.session(bookmarks=bookmarks) as session:
assert list(session.next_bookmarks()) == bookmarks
session.run("CREATE ()").consume()
bookmark = session.last_bookmark()
assert bookmark is not None
assert bookmark not in bookmarks
def test_fails_on_bad_syntax(session):
with raises(CypherError):
session.run("X").consume()
def test_fails_on_missing_parameter(session):
with raises(CypherError):
session.run("RETURN {x}").consume()
def test_can_run_statement_that_returns_multiple_records(session):
count = 0
for record in session.run("unwind(range(1, 10)) AS z RETURN z"):
assert 1 <= record[0] <= 10
count += 1
assert count == 10
def test_can_use_with_to_auto_close_session(session):
record_list = list(session.run("RETURN 1"))
assert len(record_list) == 1
for record in record_list:
assert record[0] == 1
def test_can_return_node(neo4j_driver):
with neo4j_driver.session() as session:
record_list = list(session.run("CREATE (a:Person {name:'Alice'}) "
"RETURN a"))
assert len(record_list) == 1
for record in record_list:
alice = record[0]
assert isinstance(alice, Node)
assert alice.labels == {"Person"}
assert dict(alice) == {"name": "Alice"}
def test_can_return_relationship(neo4j_driver):
with neo4j_driver.session() as session:
record_list = list(session.run("CREATE ()-[r:KNOWS {since:1999}]->() "
"RETURN r"))
assert len(record_list) == 1
for record in record_list:
rel = record[0]
assert isinstance(rel, Relationship)
assert rel.type == "KNOWS"
assert dict(rel) == {"since": 1999}
# TODO: re-enable after server bug is fixed
# def test_can_return_path(session):
# with self.driver.session() as session:
# record_list = list(session.run("MERGE p=({name:'Alice'})-[:KNOWS]->"
# "({name:'Bob'}) RETURN p"))
# assert len(record_list) == 1
# for record in record_list:
# path = record[0]
# assert isinstance(path, Path)
# assert path.start_node["name"] == "Alice"
# assert path.end_node["name"] == "Bob"
# assert path.relationships[0].type == "KNOWS"
# assert len(path.nodes) == 2
# assert len(path.relationships) == 1
def test_can_handle_cypher_error(session):
with raises(CypherError):
session.run("X").consume()
def test_keys_are_available_before_and_after_stream(session):
result = session.run("UNWIND range(1, 10) AS n RETURN n")
assert list(result.keys()) == ["n"]
list(result)
assert list(result.keys()) == ["n"]
def test_keys_with_an_error(session):
with raises(CypherError):
result = session.run("X")
list(result.keys())
def test_should_not_allow_empty_statements(session):
with raises(ValueError):
_ = session.run("")
def test_statement_object(session):
value = session.run(Statement("RETURN $x"), x=1).single().value()
assert value == 1
def test_autocommit_transactions_should_support_metadata(session):
metadata_in = {"foo": "bar"}
try:
statement = Statement("CALL dbms.getTXMetaData", metadata=metadata_in)
metadata_out = session.run(statement).single().value()
except ClientError as e:
if e.code == "Neo.ClientError.Procedure.ProcedureNotFound":
raise SkipTest("Cannot assert correct metadata as Neo4j edition "
"does not support procedure dbms.getTXMetaData")
else:
raise
else:
assert metadata_in == metadata_out
def test_autocommit_transactions_should_support_timeout(neo4j_driver):
with neo4j_driver.session() as s1:
s1.run("CREATE (a:Node)").consume()
with neo4j_driver.session() as s2:
tx1 = s1.begin_transaction()
tx1.run("MATCH (a:Node) SET a.property = 1").consume()
with raises(TransientError):
s2.run(Statement("MATCH (a:Node) SET a.property = 2",
timeout=0.25)).consume()
def test_regex_in_parameter(session):
matches = session.run("UNWIND ['A', 'B', 'C', 'A B', 'B C', 'A B C', "
"'A BC', 'AB C'] AS t WITH t "
"WHERE t =~ $re RETURN t", re=r'.*\bB\b.*').value()
assert matches == ["B", "A B", "B C", "A B C"]
def test_regex_inline(session):
matches = session.run(r"UNWIND ['A', 'B', 'C', 'A B', 'B C', 'A B C', "
r"'A BC', 'AB C'] AS t WITH t "
r"WHERE t =~ '.*\\bB\\b.*' RETURN t").value()
assert matches == ["B", "A B", "B C", "A B C"]
def test_automatic_reset_after_failure(session):
try:
session.run("X").consume()
except CypherError:
result = session.run("RETURN 1")
record = next(iter(result))
assert record[0] == 1
else:
assert False, "A Cypher error should have occurred"
def test_session_error(bolt_driver):
session = bolt_driver.session()
session.close()
with raises(SessionError):
session.run("RETURN 1")
def test_large_values(bolt_driver):
for i in range(1, 7):
with bolt_driver.session() as session:
session.run("RETURN '{}'".format("A" * 2 ** 20))
|
py | 1a52dd8b4de826f491ceead6fcefae8e2a41beae | #!/usr/bin/env python -tt
#
# Copyright (c) 2007 Red Hat Inc.
# Copyright (c) 2009, 2010, 2011 Intel, Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; version 2 of the License
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import os
import tempfile
import shutil
from wic import msger
from wic.utils.errors import CreatorError
from wic.utils import runner
class BaseImageCreator():
"""Base class for image creation.
BaseImageCreator is the simplest creator class available; it will
create a system image according to the supplied kickstart file.
e.g.
import wic.imgcreate as imgcreate
ks = imgcreate.read_kickstart("foo.ks")
imgcreate.ImageCreator(ks, "foo").create()
"""
def __del__(self):
self.cleanup()
def __init__(self, createopts=None):
"""Initialize an ImageCreator instance.
ks -- a pykickstart.KickstartParser instance; this instance will be
used to drive the install by e.g. providing the list of packages
to be installed, the system configuration and %post scripts
name -- a name for the image; used for e.g. image filenames or
filesystem labels
"""
self.__builddir = None
self.ks = None
self.name = "target"
self.tmpdir = "/var/tmp/wic"
self.workdir = "/var/tmp/wic/build"
# setup tmpfs tmpdir when enabletmpfs is True
self.enabletmpfs = False
if createopts:
# Mapping table for variables that have different names.
optmap = {"outdir" : "destdir",
}
# update setting from createopts
for key in createopts:
if key in optmap:
option = optmap[key]
else:
option = key
setattr(self, option, createopts[key])
self.destdir = os.path.abspath(os.path.expanduser(self.destdir))
self._dep_checks = ["ls", "bash", "cp", "echo"]
# Output image file names
self.outimage = []
# No ks provided when called by convertor, so skip the dependency check
if self.ks:
# If we have btrfs partition we need to check necessary tools
for part in self.ks.partitions:
if part.fstype and part.fstype == "btrfs":
self._dep_checks.append("mkfs.btrfs")
break
# make sure the specified tmpdir and cachedir exist
if not os.path.exists(self.tmpdir):
os.makedirs(self.tmpdir)
#
# Hooks for subclasses
#
def _create(self):
"""Create partitions for the disk image(s)
This is the hook where subclasses may create the partitions
that will be assembled into disk image(s).
There is no default implementation.
"""
pass
def _cleanup(self):
"""Undo anything performed in _create().
This is the hook where subclasses must undo anything which was
done in _create().
There is no default implementation.
"""
pass
#
# Actual implementation
#
def __ensure_builddir(self):
if not self.__builddir is None:
return
try:
self.workdir = os.path.join(self.tmpdir, "build")
if not os.path.exists(self.workdir):
os.makedirs(self.workdir)
self.__builddir = tempfile.mkdtemp(dir=self.workdir,
prefix="imgcreate-")
except OSError as err:
raise CreatorError("Failed create build directory in %s: %s" %
(self.tmpdir, err))
def __setup_tmpdir(self):
if not self.enabletmpfs:
return
runner.show('mount -t tmpfs -o size=4G tmpfs %s' % self.workdir)
def __clean_tmpdir(self):
if not self.enabletmpfs:
return
runner.show('umount -l %s' % self.workdir)
def create(self):
"""Create partitions for the disk image(s)
Create the partitions that will be assembled into disk
image(s).
"""
self.__setup_tmpdir()
self.__ensure_builddir()
self._create()
def cleanup(self):
"""Undo anything performed in create().
Note, make sure to call this method once finished with the creator
instance in order to ensure no stale files are left on the host e.g.:
creator = ImageCreator(ks, name)
try:
creator.create()
finally:
creator.cleanup()
"""
if not self.__builddir:
return
self._cleanup()
shutil.rmtree(self.__builddir, ignore_errors=True)
self.__builddir = None
self.__clean_tmpdir()
def print_outimage_info(self):
msg = "The new image can be found here:\n"
self.outimage.sort()
for path in self.outimage:
msg += ' %s\n' % os.path.abspath(path)
msger.info(msg)
|
py | 1a52de9edc14e0877f5ee9aff82bfe48a1903552 | def incworkload(inchtml,composition):
Xh = 45100*(composition/(composition+1)) / (3 * 60 * 60)*inchtml
Xi = 45100*(1/(composition+1)) / (3 * 60 * 60)*inchtml
request_size_h = 3000 / 1000
request_size_i = 15000 / 1000
Dhc = (0.008 + (0.002 * request_size_h))
Dic = (0.008 + (0.002 * request_size_i))
Dhd = 0.012 * request_size_h
Did = 0.012 * request_size_i
Uhc = Xh * Dhc
Uic = Xi * Dic
Uhd = Xh * Dhd
Uid = Xi * Did
Uc = Uhc + Uic
Ud = Uhd + Uid
Rhc = Dhc / (1 - Uc)
Ric = Dic / (1 - Uc)
Rhd = Dhd / (1 - Ud)
Rid = Did / (1 - Ud)
Rhtml = Rhc + Rhd
Rimage = Ric + Rid
print("composition:" + str(int(composition)))
print("Xh={:.4f},Xi={:.4f}".format(Xh, Xi))
print("Dhc={:.4f},Dic={:.4f},Dhd={:.4f},Did={:.4f}".
format(Dhc, Dic, Dhd, Did))
print("Uhc={:.4f},Uic={:.4f},Uhd={:.4f},Uid={:.4f}".
format(Uhc, Uic, Uhd, Uid))
print("Uc={:.4f},Ud={:.4f}".format(Uc, Ud))
# print("Rhc={:.4f},Ric={:.4f},Rhd={:.4f},Rid={:.4f}".
# format(Rhc, Ric, Rhd, Rid))
# print("Rhtml={:.4f},Rimage={:.4f}".format(Rhtml, Rimage))
print("-------------------------")
incworkload(inchtml=1,composition=8)
incworkload(inchtml=1,composition=10)
incworkload(inchtml=4,composition=10)
incworkload(inchtml=5,composition=10)
|
py | 1a52debad6267b42e8a2c6bacc4e9508a6a54131 | import sys
import pytest
from draconic.exceptions import *
def test_comps(e):
assert e('[a + 1 for a in [1,2,3]]') == [2, 3, 4]
assert e('{a + 1 for a in [1,2,3]}') == {2, 3, 4}
assert e('{a + 1: a - 1 for a in [1,2,3]}') == {2: 0, 3: 1, 4: 2}
def test_setcomp(e):
assert e('{0 for a in [1,2,3]}') == {0}
assert e('{a % 10 for a in [5,10,15,20]}') == {0, 5}
def test_genexp(e):
assert e('list(a + 1 for a in [1,2,3])') == [2, 3, 4]
class TestOperations:
"""
Tests the operations with a custom handler:
ast.Add: self._safe_add,
ast.Sub: self._safe_sub,
ast.Mult: self._safe_mult,
ast.Pow: self._safe_power,
ast.LShift: self._safe_lshift,
"""
def test_arithmetic(self, e):
assert e('3 + 3') == 6
assert e('3 - 3') == 0
assert e('3 * 3') == 9
assert e('3 ** 3') == 27
assert e('3 << 3') == 0b11000
def test_bitwise_operators(self, e):
assert e('4 << 1') == 8
assert e('8 >> 1') == 4
assert e('4 | 1') == 5
assert e('5 ^ 6') == 3
assert e('3 & 6') == 2
def test_compound_type_operators(self, e):
assert e('[0] * 500') == [0] * 500
assert e('[1, 2] * 10') == [1, 2] * 10
class TestAssignments:
def test_names(self, e):
e('a = 1')
assert e('a') == 1
e('b = c = 2')
assert e('b') == 2
assert e('c') == 2
e('d = c + 1')
assert e('d') == 3
with pytest.raises(NotDefined):
e('e = x')
def test_augassign(self, e):
e('a = 1')
assert e('a') == 1
e('a += 1')
assert e('a') == 2
e('a *= 2')
assert e('a') == 4
e('a <<= 1')
assert e('a') == 8
e('a >>= 1')
assert e('a') == 4
e('a |= 1')
assert e('a') == 5
e('a ^= 6')
assert e('a') == 3
e('a &= 6')
assert e('a') == 2
e('a /= 2')
assert e('a') == 1
e('a += a + 1')
assert e('a') == 3
e('a += -2')
assert e('a') == 1
with pytest.raises(NotDefined):
e('b += 1')
with pytest.raises(DraconicSyntaxError):
e('a + 1 += 1')
def test_assigning_expressions(self, e):
e('a = 1')
e('b = 2')
e('c = "foo"')
e('ab = a + b')
assert e('ab') == 3
e('cb = c * b')
assert e('cb') == 'foofoo'
e('cb *= 2')
assert e('cb') == 'foofoofoofoo'
e('cb = cb.upper()')
assert e('cb') == 'FOOFOOFOOFOO'
with pytest.raises(IterableTooLong):
e('cb = cb * 1000000')
class TestCompoundAssignments:
def test_unpack(self, i, e):
i.builtins['x'] = (1, 2)
i.builtins['y'] = (1, (2, 3), 4)
e('a, b = (1, 2)')
assert e('a') == 1
assert e('b') == 2
e('a, (b, c), d = (1, (2, 3), 4)')
assert e('a') == 1
assert e('b') == 2
assert e('c') == 3
assert e('d') == 4
e('e = (1, (2, 3), 4)')
assert e('e') == (1, (2, 3), 4)
e('a, (b, c), d = (1, (a, (3, 3)), "foo")')
assert e('a') == 1
assert e('b') == 1
assert e('c') == (3, 3)
assert e('d') == 'foo'
def test_bad_unpacks(self, e):
with pytest.raises(DraconicValueError):
e('a, b, c = (1, 2)')
with pytest.raises(DraconicValueError):
e('a, b = (1, 2, 3)')
with pytest.raises(DraconicValueError):
e('a, b = 1')
def test_iterator_unpack(self, i, e):
i.builtins['range'] = range
e('a, b, c, d = range(4)')
assert e('a') == 0
assert e('b') == 1
assert e('c') == 2
assert e('d') == 3
e('a, b, c, d = [i + 1 for i in range(4)]')
assert e('a') == 1
assert e('b') == 2
assert e('c') == 3
assert e('d') == 4
def test_compound_assignments(self, e):
e('a = [1, 2, 3]')
e('b = {"foo": "bar"}')
e('a[0] = 0')
assert e('a') == [0, 2, 3]
e('a[1] = (1, 2)')
assert e('a') == [0, (1, 2), 3]
e('a[2] = a') # oh boy
# this = [0, (1, 2), 0]
# this[2] = this
# assert e('a') == this # this causes a RecursionError in comparison
# but making a self-referencing list does not explode here, which is the real test
assert e('a[2] is a') is True
e('b[0] = 0')
assert e('b') == {"foo": "bar", 0: 0}
e('b["foo"] = "bletch"')
assert e('b') == {"foo": "bletch", 0: 0}
def test_compound_unpack(self, i, e):
i.builtins['x'] = (1, 2)
i.builtins['y'] = (1, (2, 3), 4)
e('a = [1, 2, 3]')
e('a[0], a[1] = (-1, -2)')
assert e('a') == [-1, -2, 3]
e('a[0], a[1], _ = y')
assert e('a') == [1, (2, 3), 3]
def test_assign_slice(self, i, e):
e('a = [1, 2, 3]')
i.builtins['range'] = range
e('a[0:2] = range(2)')
assert e('a') == [0, 1, 3]
def test_deep_assign(self, e):
e('a = [[[0]]]')
e('b = {0:{0:{0: 0}}}')
e('a[0][0][0] = 1')
assert e('a') == [[[1]]]
e('b[0][0][0] = 1')
assert e('b') == {0: {0: {0: 1}}}
def test_references(self, e):
e('a = [1, 2, 3]')
e('b = a')
assert e('a is b') is True
e('b[0] = 0')
assert e('a') == [0, 2, 3]
assert e('b') == [0, 2, 3]
class TestNamedExpressions:
def test_names(self, e):
if sys.version_info < (3, 8, 0): return
assert e('(a := 1)') == 1
assert e('a') == 1
assert e('(b := a + 1)') == 2
assert e('b') == 2
with pytest.raises(NotDefined):
e('(c := x)')
e("d = [1, 2, 3]")
with pytest.raises(DraconicSyntaxError):
e("(d[0] := 0)")
def test_assigning_expressions(self, e):
if sys.version_info < (3, 8, 0): return
e('a = 1')
e('b = 2')
e('c = "foo"')
assert e('(ab := a + b)') == 3
assert e('ab') == 3
assert e('(cb := c * b)') == 'foofoo'
assert e('cb') == 'foofoo'
assert e('(cb := cb.upper())') == 'FOOFOO'
assert e('cb') == 'FOOFOO'
with pytest.raises(IterableTooLong):
e('(cb := cb * 1000000)')
|
py | 1a52e0503bb12c4181a4a1b4dd540073f787c51d | """Test data purging."""
import json
from datetime import datetime, timedelta
import unittest
from homeassistant.components import recorder
from homeassistant.components.recorder.const import DATA_INSTANCE
from homeassistant.components.recorder.purge import purge_old_data
from homeassistant.components.recorder.models import States, Events
from homeassistant.components.recorder.util import session_scope
from tests.common import get_test_home_assistant, init_recorder_component
class TestRecorderPurge(unittest.TestCase):
"""Base class for common recorder tests."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
init_recorder_component(self.hass)
self.hass.start()
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
def _add_test_states(self):
"""Add multiple states to the db for testing."""
now = datetime.now()
five_days_ago = now - timedelta(days=5)
attributes = {'test_attr': 5, 'test_attr_10': 'nice'}
self.hass.block_till_done()
self.hass.data[DATA_INSTANCE].block_till_done()
with recorder.session_scope(hass=self.hass) as session:
for event_id in range(5):
if event_id < 3:
timestamp = five_days_ago
state = 'purgeme'
else:
timestamp = now
state = 'dontpurgeme'
session.add(States(
entity_id='test.recorder2',
domain='sensor',
state=state,
attributes=json.dumps(attributes),
last_changed=timestamp,
last_updated=timestamp,
created=timestamp,
event_id=event_id + 1000
))
def _add_test_events(self):
"""Add a few events for testing."""
now = datetime.now()
five_days_ago = now - timedelta(days=5)
event_data = {'test_attr': 5, 'test_attr_10': 'nice'}
self.hass.block_till_done()
self.hass.data[DATA_INSTANCE].block_till_done()
with recorder.session_scope(hass=self.hass) as session:
for event_id in range(5):
if event_id < 2:
timestamp = five_days_ago
event_type = 'EVENT_TEST_PURGE'
else:
timestamp = now
event_type = 'EVENT_TEST'
session.add(Events(
event_type=event_type,
event_data=json.dumps(event_data),
origin='LOCAL',
created=timestamp,
time_fired=timestamp,
))
def test_purge_old_states(self):
"""Test deleting old states."""
self._add_test_states()
# make sure we start with 5 states
with session_scope(hass=self.hass) as session:
states = session.query(States)
self.assertEqual(states.count(), 5)
# run purge_old_data()
purge_old_data(self.hass.data[DATA_INSTANCE], 4)
# we should only have 2 states left after purging
self.assertEqual(states.count(), 2)
def test_purge_old_events(self):
"""Test deleting old events."""
self._add_test_events()
with session_scope(hass=self.hass) as session:
events = session.query(Events).filter(
Events.event_type.like("EVENT_TEST%"))
self.assertEqual(events.count(), 5)
# run purge_old_data()
purge_old_data(self.hass.data[DATA_INSTANCE], 4)
# now we should only have 3 events left
self.assertEqual(events.count(), 3)
|
py | 1a52e0a7041b83746b2adf0cf11df1c68d9acfda |
class PrivateUnauthorizedError(Exception):
def __init__(self, msg):
self.msg = msg |
py | 1a52e16acb0a0d7ff11c4e5166e80f0e755395de | from django.db import models
from django.db.models import Q
from django.utils.encoding import force_text
from cms.models import CMSPlugin, Placeholder
class AliasPluginModel(CMSPlugin):
cmsplugin_ptr = models.OneToOneField(
CMSPlugin,
on_delete=models.CASCADE,
related_name='cms_aliasplugin',
parent_link=True,
)
plugin = models.ForeignKey(
CMSPlugin,
on_delete=models.CASCADE,
editable=False,
related_name='alias_reference',
null=True,
)
alias_placeholder = models.ForeignKey(
Placeholder,
on_delete=models.CASCADE,
editable=False,
related_name='alias_placeholder',
null=True,
)
class Meta:
app_label = 'cms'
def __str__(self):
if self.plugin_id:
return "(%s) %s" % (force_text(self.plugin.get_plugin_name()), self.plugin.get_plugin_instance()[0])
else:
return force_text(self.alias_placeholder.get_label())
def get_aliased_placeholder_id(self):
if self.plugin_id:
placeholder_id = self.plugin.placeholder_id
else:
placeholder_id = self.alias_placeholder_id
return placeholder_id
def is_recursive(self):
placeholder_id = self.get_aliased_placeholder_id()
if not placeholder_id:
return False
plugins = AliasPluginModel.objects.filter(
plugin_type='AliasPlugin',
placeholder_id=placeholder_id,
)
plugins = plugins.filter(
Q(plugin=self) |
Q(plugin__placeholder=self.placeholder_id) |
Q(alias_placeholder=self.placeholder_id)
)
return plugins.exists()
|
py | 1a52e4ff140e11d73eaba994aa18d8af52703a70 | #!/usr/bin/python
# Copyright (C) 2015, WSID
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import unittest
from gi.repository import GObject
from gi.repository import CrankBase
class TestVecInt(unittest.TestCase):
def assertFloat (self, a, b, delta=0.0001):
"""A simple custom assert that given values are same.
It takes into delta values into account, so that test can endure little
errors.
"""
try: #if they are both of list type.
if (len(a) != len(b)):
raise AssertionError ("array length: %d != %d" % (len(a), len(b)))
for i in range (0, len(a)):
if ((a[i] < b[i] - delta) or (b[i] + delta < a[i])):
raise AssertionError ("%g != %g (diff=%g)" % (a[i], b[i], b[i]-a[i]))
except TypeError: #then they are numeric type.
if ((a < b - delta) or (b + delta < a)):
raise AssertionError ("%g != %g (diff=%g)" % (a, b, b-a))
def test_equal (self):
a = CrankBase.CplxFloat.init (3, 4)
b = CrankBase.CplxFloat.init (3, 4)
c = CrankBase.CplxFloat.init (4, 3)
assert (a.equal (b))
assert (not a.equal (c))
def test_equal_delta (self):
a = CrankBase.CplxFloat.init (3, 4)
b = CrankBase.CplxFloat.init (3.2, 4.1)
c = CrankBase.CplxFloat.init (4, 3)
assert (a.equal_delta (b, 1))
assert (not a.equal_delta (c, 1))
def test_get_norm (self):
a = CrankBase.CplxFloat.init (3, 4)
self.assertFloat (a.get_norm (), 5)
def test_neg (self):
a = CrankBase.CplxFloat.init (3, 4)
a = a.neg ()
self.assertFloat (a.real, -3)
self.assertFloat (a.imag, -4)
def test_inverse (self):
a = CrankBase.CplxFloat.init (3, 4)
a = a.inverse ()
self.assertFloat (a.real, 0.12)
self.assertFloat (a.imag, -0.16)
def test_conjugate (self):
a = CrankBase.CplxFloat.init (3, 4)
a = a.conjugate ()
self.assertFloat (a.real, 3)
self.assertFloat (a.imag, -4)
def test_unit (self):
a = CrankBase.CplxFloat.init (3, 4)
a = a.unit ()
self.assertFloat (a.real, 0.6)
self.assertFloat (a.imag, 0.8)
def test_sqrt (self):
a = CrankBase.CplxFloat.init (7, 8)
a = a.sqrt ()
self.assertFloat (a.real, 2.9690)
self.assertFloat (a.imag, 1.3472)
def test_addr (self):
a = CrankBase.CplxFloat.init (3, 4)
a = a.addr (2)
self.assertFloat (a.real, 5)
self.assertFloat (a.imag, 4)
def test_subr (self):
a = CrankBase.CplxFloat.init (3, 4)
a = a.subr (2)
self.assertFloat (a.real, 1)
self.assertFloat (a.imag, 4)
def test_mulr (self):
a = CrankBase.CplxFloat.init (3, 4)
a = a.mulr (2)
self.assertFloat (a.real, 6)
self.assertFloat (a.imag, 8)
def test_divr (self):
a = CrankBase.CplxFloat.init (3, 4)
a = a.divr (2)
self.assertFloat (a.real, 1.5)
self.assertFloat (a.imag, 2)
def test_rsubr (self):
a = CrankBase.CplxFloat.init (3, 4)
a = a.rsubr (2)
self.assertFloat (a.real, -1)
self.assertFloat (a.imag, -4)
def test_rdivr (self):
a = CrankBase.CplxFloat.init (3, 4)
a = a.rdivr (2)
self.assertFloat (a.real, 0.24)
self.assertFloat (a.imag, -0.32)
def test_add (self):
a = CrankBase.CplxFloat.init (3, 4)
b = CrankBase.CplxFloat.init (5, 12)
a = a.add (b)
self.assertFloat (a.real, 8)
self.assertFloat (a.imag, 16)
def test_sub (self):
a = CrankBase.CplxFloat.init (3, 4)
b = CrankBase.CplxFloat.init (5, 12)
a = a.sub (b)
self.assertFloat (a.real, -2)
self.assertFloat (a.imag, -8)
def test_mul (self):
a = CrankBase.CplxFloat.init (3, 4)
b = CrankBase.CplxFloat.init (5, 12)
a = a.mul (b)
self.assertFloat (a.real, -33)
self.assertFloat (a.imag, 56)
def test_div (self):
a = CrankBase.CplxFloat.init (3, 4)
b = CrankBase.CplxFloat.init (5, 12)
a = a.div (b)
self.assertFloat (a.real, 63.0/169.0)
self.assertFloat (a.imag, -16.0/169.0)
def test_mul_conj (self):
a = CrankBase.CplxFloat.init (3, 4)
b = CrankBase.CplxFloat.init (5, 12)
a = a.mul_conj (b)
self.assertFloat (a.real, 63.0)
self.assertFloat (a.imag, -16.0)
def test_mix (self):
a = CrankBase.CplxFloat.init (3, 4)
b = CrankBase.CplxFloat.init (5, 12)
a = a.mix (b, 0.25)
self.assertFloat (a.real, 3.5)
self.assertFloat (a.imag, 6.0)
def test_ln (self):
a = CrankBase.CplxFloat.init (3, 4)
a = a.ln ()
self.assertFloat (a.real, 1.6094)
self.assertFloat (a.imag, 0.9273)
def test_exp (self):
a = CrankBase.CplxFloat.init (3, 4)
a = a.exp ()
self.assertFloat (a.real, -13.1287)
self.assertFloat (a.imag, -15.2008)
def test_pow (self):
a = CrankBase.CplxFloat.init (3, 4)
b = CrankBase.CplxFloat.init (1, 2)
a = a.pow (b)
self.assertFloat (a.real, -0.4198)
self.assertFloat (a.imag, -0.6605)
def test_sinh (self):
a = CrankBase.CplxFloat.init (3, 4)
a = a.sinh ()
self.assertFloat (a.real, -6.5481)
self.assertFloat (a.imag, -7.6192)
def test_cosh (self):
a = CrankBase.CplxFloat.init (3, 4)
a = a.cosh ()
self.assertFloat (a.real, -6.5807)
self.assertFloat (a.imag, -7.5816)
def test_tanh (self):
a = CrankBase.CplxFloat.init (3, 4)
a = a.tanh ()
self.assertFloat (a.real, 1.0007)
self.assertFloat (a.imag, 0.0049)
def test_sin (self):
a = CrankBase.CplxFloat.init (3, 4)
a = a.sin ()
self.assertFloat (a.real, 3.8537)
self.assertFloat (a.imag, -27.0168)
def test_cos (self):
a = CrankBase.CplxFloat.init (3, 4)
a = a.cos ()
self.assertFloat (a.real, -27.0349)
self.assertFloat (a.imag, -3.8512)
def test_tan (self):
a = CrankBase.CplxFloat.init (3, 4)
a = a.tan ()
self.assertFloat (a.real, -0.0001)
self.assertFloat (a.imag, 0.9994)
if __name__ == '__main__':
unittest.main ()
|
py | 1a52e5c425a558fdc74699ce332ffcbacff420ca | '''OpenGL extension EXT.texture_compression_latc
This module customises the behaviour of the
OpenGL.raw.GL.EXT.texture_compression_latc to provide a more
Python-friendly API
Overview (from the spec)
This extension introduces four new block-based texture compression
formats suited for unsigned and signed luminance and luminance-alpha
textures (hence the name "latc" for Luminance-Alpha Texture
Compression).
These formats are designed to reduce the storage requirements and
memory bandwidth required for luminance and luminance-alpha textures
by a factor of 2-to-1 over conventional uncompressed luminance and
luminance-alpha textures with 8-bit components (GL_LUMINANCE8 and
GL_LUMINANCE8_ALPHA8).
The compressed signed luminance-alpha format is reasonably suited
for storing compressed normal maps.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/EXT/texture_compression_latc.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.EXT.texture_compression_latc import *
from OpenGL.raw.GL.EXT.texture_compression_latc import _EXTENSION_NAME
def glInitTextureCompressionLatcEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION |
py | 1a52e78d6ccfeb8715f1f9d850518e5e51e8a73d | #!/usr/bin/env python3
# Copyright 2017-18 TransitCenter http://transitcenter.org
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Infer calls at bus stops based on GTFS schedules and bus position data"""
from __future__ import division
import sys
import os
import getpass
from bisect import bisect, bisect_left
from typing import Callable
from datetime import datetime, timedelta
from multiprocessing import Pool
import logging
import warnings
from collections import Counter, namedtuple
from itertools import cycle
import argparse
import psycopg2
from psycopg2.extras import NamedTupleCursor
import numpy as np
import pytz
logger = logging.getLogger()
logger.setLevel(logging.INFO)
loghandler = logging.StreamHandler(sys.stdout)
logformatter = logging.Formatter(
fmt="%(levelname)s (%(lineno)3d) %(asctime)s %(message)s"
)
loghandler.setFormatter(logformatter)
logger.addHandler(loghandler)
warnings.simplefilter("ignore")
DEC2FLOAT = psycopg2.extensions.new_type(
psycopg2.extensions.DECIMAL.values,
"DEC2FLOAT",
lambda value, curs: float(value) if value is not None else None,
)
# Maximum elapsed time between positions before we declare a new run
MAX_TIME_BETWEEN_STOPS = timedelta(seconds=60 * 30)
# when dist_from_stop < 30.48 m (100 feet) considered "at stop" by MTA --NJ
# this is not correct! It's only that the sign displays "at stop"
# beginning at 100 ft. Nevertheless, we're doing 100 ft
STOP_THRESHOLD = 30.48
# Minimum distance between positions when extrapolating.
# When zero, identical positions are allowed, which can produce crazy results
MIN_EXTRAP_DIST = 1
# The number of positions to use when extrapolating.
EXTRAP_LENGTH = 5
# Maximum number of stops to extrapolate forward or backward
EXTRAP_COUNT = 2
# Doing one complicated thing in this query.
# Some bus routes are loops with tails (e.g. B74):
# +--+
# | |---- (start and end)
# +——+
# ST_LineLocatePoint can't handle this, so we use the mostly-untrustworthy
# "positions"."dist_along_route" column to limit the part of the shape_geom
# we examine to a fraction of the LineString.
VEHICLE_QUERY = """
SELECT
EXTRACT(EPOCH FROM timestamp) AS time,
trip_id,
trip_start_date AS date,
st.stop_sequence AS seq,
CASE
WHEN dist_along_route is NULL and dist_from_stop is NULL
THEN ST_LineLocatePoint(g.route_geom, n.position_geom) * h.length
ELSE inferno.safe_locate(
g.route_geom,
n.position_geom,
LEAST(h.length - 500, GREATEST(0, dist_along_route - dist_from_stop - 500))::numeric,
LEAST(h.length, GREATEST(dist_along_route, 0) + 100)::numeric,
h.length::numeric
)
END::numeric(10, 2) AS distance
FROM {0} AS p
LEFT JOIN gtfs.trips USING (trip_id)
-- TODO: change to LEFT JOIN when fix implemented for orphan stops
INNER JOIN gtfs.stop_times st USING (feed_index, trip_id, stop_id)
LEFT JOIN gtfs.shape_geoms r USING (feed_index, shape_id),
ST_Transform(r.the_geom, %(epsg)s) g (route_geom),
ST_Transform(ST_SetSRID(ST_MakePoint(longitude, latitude), 4326), %(epsg)s) n (position_geom),
LATERAL (SELECT CASE %(epsg)s WHEN 4326 THEN r.length ELSE ST_Length(g.route_geom) END) h (length)
WHERE
vehicle_id = %(vehicle)s
AND trip_start_date = %(date)s::date
ORDER BY "timestamp"
"""
SELECT_VEHICLE = """SELECT DISTINCT vehicle_id
FROM {0} WHERE trip_start_date = %s"""
SELECT_CALLED_VEHICLES = """SELECT vehicle_id FROM {calls}
WHERE source = 'I' AND call_time::date = %s
GROUP BY vehicle_id"""
SELECT_STOPTIMES = """SELECT
feed_index,
stop_id,
inferno.wall_timez(DATE %(date)s, arrival_time, agency_timezone) AS datetime,
DATE %(date)s as date,
route_id,
direction_id,
stop_sequence AS seq,
ST_LineLocatePoint(route.the_geom, ST_Transform(stops.the_geom, %(epsg)s)) * ST_Length(route.the_geom) distance
FROM gtfs.trips
LEFT JOIN gtfs.agency USING (feed_index)
LEFT JOIN gtfs.stop_times USING (feed_index, trip_id)
LEFT JOIN gtfs.stops USING (feed_index, stop_id)
LEFT JOIN gtfs.shape_geoms shape USING (feed_index, shape_id),
ST_Transform(shape.the_geom, %(epsg)s) route (the_geom)
WHERE trip_id = %(trip)s
AND feed_index = (
SELECT MAX(feed_index)
FROM gtfs.trips
LEFT JOIN gtfs.calendar USING (feed_index, service_id)
WHERE trip_id = %(trip)s
AND date %(date)s BETWEEN start_date and end_date
)
ORDER BY stop_sequence ASC
"""
SELECT_STOPTIMES_PLAIN = """SELECT DISTINCT
feed_index,
stop_id,
inferno.wall_timez(date %(date)s, arrival_time, agency_timezone) AS datetime,
date %(date)s as date,
route_id,
direction_id,
stop_sequence,
ST_LineLocatePoint(route.the_geom, ST_Transform(stops.the_geom, %(epsg)s)) * ST_Length(route.the_geom) distance
FROM gtfs.trips
LEFT JOIN gtfs.agency USING (feed_index)
LEFT JOIN gtfs.stop_times USING (feed_index, trip_id)
LEFT JOIN gtfs.stops USING (feed_index, stop_id)
LEFT JOIN gtfs.shape_geoms shape USING (feed_index, shape_id),
ST_Transform(shape.the_geom, %(epsg)s) route (the_geom)
WHERE trip_id = %(trip)s
ORDER BY stop_sequence ASC;
"""
INSERT = """INSERT INTO {}
(vehicle_id, trip_id, direction_id, stop_id, run_index,
call_time, source, deviation, feed_index, date)
VALUES (%(vehicle)s, %(trip)s, %(direction_id)s, %(stop_id)s, currval('inferno.run_index'),
%(call_time)s, %(source)s, %(deviation)s, %(feed_index)s, %(date)s)
ON CONFLICT DO NOTHING"""
def common(lis: list):
"""Return the most common value in a list"""
return Counter(lis).most_common(1)[0][0]
def mask(lis: list, key: Callable, keep_last=None) -> list:
"""
Create a mask on `lis` using the `key` function.
`key` will be evaluated on pairs of items in `lis`.
Returned list will only include items where `key` evaluates to True.
Arguments:
keep_last (boolean): In a sequence of items where key() is False,
keep the last one.
"""
result = [lis[0]]
for item in lis[1:]:
if key(item, result[-1]):
result.append(item)
elif keep_last is True:
result[-1] = item
return result
def desc2fn(description: tuple) -> tuple:
"""Extract tuple of field names from psycopg2 cursor.description."""
return tuple(d.name for d in description)
def compare_dist(a, b):
try:
return a.distance >= b.distance
except TypeError:
# Don't be lenient when there's bad data: return False.
return False
def toutc(timestamp):
return datetime.utcfromtimestamp(timestamp).replace(tzinfo=pytz.UTC)
def get_positions(cursor, positions_table, query_args):
"""
Compile list of positions for a vehicle, using a list of positions
and filtering based on positions that reflect change in pattern or next_stop.
"""
runs = []
query = VEHICLE_QUERY.format(positions_table or "rt.vehicle_positions")
# load up cursor with every position for vehicle
cursor.execute(query, query_args)
if cursor.rowcount == 0:
logging.warning(
"No rows found for %s on %s", query_args["vehicle"], query_args["date"]
)
return []
# dummy position for comparison with first row
prev = namedtuple("prev", ("distance", "trip_id"))(0, None)
for position in cursor:
# If we're on a new trip, start a new run
if not position.trip_id == prev.trip_id:
runs.append([])
# If the distance has not declined, append the position
if compare_dist(position, prev) or position.trip_id != prev.trip_id:
runs[-1].append(position)
prev = position
return runs
def filter_positions(runs):
"""Filter runs to elimate shorties."""
return [run for run in runs if len(run) > 2 and len(set(r.seq for r in run)) > 1]
def get_stoptimes(cursor, tripid, date, epsg):
logging.debug("Fetching stoptimes for %s", tripid)
fields = {"trip": tripid, "date": date, "epsg": epsg}
cursor.execute(SELECT_STOPTIMES, fields)
if cursor.rowcount == 0:
logging.warning(
"Couldn't find any stoptimes in date range, running simple query: %s",
tripid,
)
logging.debug(cursor.query.decode("utf8"))
cursor.execute(SELECT_STOPTIMES_PLAIN, fields)
return cursor.fetchall()
def extrapolate(run, stoptimes, method=None):
"""
Extrapolating is hard. Depending on the input data points, extrapolated
data could produce impossible results, e.g. an extrapoled time being less
than a known time. This is true even for linear extrapolations.
This function may run multiple extrapolations, counterintuitively using less
data until a reasonable result is obtained. In the extreme, a linear extrapolation
from two observations will always provide a plausible (if rough) estimate.
"""
xs = [x.distance for x in run]
ys = [x.time for x in run]
data = [x.distance for x in stoptimes]
result = []
# Use builtin comparison functions.
# Operations are symmetric when extrapolating forward vs. backward.
if method == "E":
# Extrapolate forward (to End).
compare = ys[-1].__lt__
elif method == "S":
# Extrapolate backward (to Start).
compare = ys[0].__gt__
else:
raise ValueError("Invalid direction")
# Try to ensure that the extrapolated values are consistent with
# the previous values by using shorter versions of the run when necessary
while len(ys) > 1:
slope, intercept = np.polyfit(xs, ys, 1)
result = [slope * x + intercept for x in data]
if slope > 0 and all(compare(y) for y in result):
# Got a legal extrapolation, return calls.
# Slope should always be > 0, if it isn't there's a serious data issue.
break
# otherwise...
result = []
# Slice from the beginning (if forward) or end (if backward)
# of the run.
logging.debug(
"Invalid extrap. method: %s. slope: %s. comparison: %s",
method,
round(slope, 2),
[compare(y) for y in result],
)
logging.debug("new extrap length: %s", len(xs) - 1)
xs.pop(0 if method == "E" else -1)
ys.pop(0 if method == "E" else -1)
return [call(s, t, method) for s, t in zip(stoptimes, result)]
def call(stoptime, seconds, method=None):
"""
Returns a dict with route, direction, stop, call time and source.
Call time is in UTC.
"""
result = dict(stoptime._asdict(), call_time=toutc(seconds), source=method or "I")
result["deviation"] = result["call_time"] - stoptime.datetime
return result
def generate_calls(run: list, stops: list, mintime=None, maxtime=None) -> list:
"""
list of calls to be written
Args:
run: list generated from enumerate(positions)
stoptimes: list of scheduled stoptimes for this trip
mintime: don't extrapolate back before this time
maxtime: don't extrapolate forward past this time
"""
obs_distances = [p.distance for p in run]
obs_times = [p.time for p in run]
stop_positions = [x.distance for x in stops]
# Get the range of stop positions that can be interpolated based on data.
# The rest will be extrapolated
si = bisect_left(stop_positions, obs_distances[0])
ei = bisect(stop_positions, obs_distances[-1])
logging.debug("min, max\t%s\t%s", mintime, maxtime)
logging.debug("this run\t%s\t%s", toutc(obs_times[0]), toutc(obs_times[-1]))
if not stops[si:ei]:
logging.debug(
"No calls because no stops between si (%s) and ei (%s)",
obs_distances[0],
obs_distances[-1],
)
logging.debug(
"Stop distance range: %s - %s", min(stop_positions), max(stop_positions)
)
return []
# Interpolate main chunk of positions.
interpolated = np.interp(stop_positions[si:ei], obs_distances, obs_times)
calls = [call(stop, secs) for stop, secs in zip(stops[si:ei], interpolated)]
# Goal is to only extrapolate based on unique distances,
# When extrapolating forward, keep the oldest figure for a particular distance;
# when extrapolating back, keep the newest.
back_mask = mask(run, lambda x, y: x.distance > y.distance + MIN_EXTRAP_DIST)[
:EXTRAP_LENGTH
]
forward_mask = mask(
run, lambda x, y: x.distance > y.distance + MIN_EXTRAP_DIST, keep_last=True
)[-EXTRAP_LENGTH:]
# Extrapolate back for stops that occurred before observed positions.
if si > 0 and len(back_mask) > 1:
logging.debug("extrapolating backward. si = %s", si)
try:
backward = extrapolate(back_mask, stops[si - EXTRAP_COUNT : si], "S")
if mintime:
backward = [x for x in backward if x["call_time"] > mintime]
calls = backward + calls
except Exception as error:
logging.warning(
"Ignoring back extrapolation (trip_id = %s): %s ", run[0].trip_id, error
)
# import pdb
# pdb.set_trace()
# Extrapolate forward to the stops after the observed positions.
if ei < len(stops) and len(forward_mask) > 1:
logging.debug("extrapolating forward. ei = %s", ei)
try:
forward = extrapolate(forward_mask, stops[ei : ei + EXTRAP_COUNT], "E")
if maxtime:
forward = [x for x in forward if x["call_time"] < maxtime]
calls.extend(forward)
except Exception as error:
logging.warning(
"Ignoring forward extrapolation (trip_id = %s): %s",
run[0].trip_id,
error,
)
try:
assert increasing([x["call_time"] for x in calls])
except AssertionError:
logging.info("%s -- non-increasing calls", run[0].trip_id)
logging.debug(
"calc'ed call times: %s", [x["call_time"].timestamp() for x in calls]
)
logging.debug("observed positions: %s", obs_distances)
logging.debug("observed times: %s", obs_times)
logging.debug("stop positions: %s", stop_positions)
return calls
def increasing(L):
"""Check if array is increasing"""
return all(x <= y for x, y in zip(L, L[1:]))
def track_vehicle(
vehicle_id, query_args: dict, conn_kwargs: dict, calls_table, positions_table=None
):
"""Generate calls for a single vehicle in the database"""
positions_table = positions_table or "rt.vehicle_positions"
query_args["vehicle"] = vehicle_id
with psycopg2.connect(**conn_kwargs) as conn:
psycopg2.extensions.register_type(DEC2FLOAT)
logging.info("STARTING %s", vehicle_id)
with conn.cursor(cursor_factory=NamedTupleCursor) as cursor:
rawruns = get_positions(cursor, positions_table, query_args)
# filter out short runs and ones with few stops
runs = filter_positions(rawruns)
if len(rawruns) > len(runs):
logging.debug(
"skipping %d short runs, query: %s",
len(rawruns) - len(runs),
query_args,
)
# Compute temporal bounds of each run.
starts = [None] + [toutc(run[0].time) for run in runs[:-1]]
ends = [toutc(run[-1].time) for run in runs[1:]] + [None]
# Counter is just for logging.
lenc = 0
# each run will become a trip
for run, start, end in zip(runs, starts, ends):
if not run:
continue
if len(run) <= 2:
logging.debug(
"short run (%d positions), v_id=%s, %s",
len(run),
query_args["vehicle"],
run[0].time,
)
continue
# Assume most common trip is the correct one.
trip_id = common([x.trip_id for x in run])
# Get the scheduled list of stops for this trip.
stoptimes = get_stoptimes(
cursor, trip_id, query_args["date"], query_args["epsg"]
)
if any(x.distance is None for x in stoptimes):
logging.warning(
"Missing stoptimes trip_id= %s, date= %s",
trip_id,
query_args["date"],
)
continue
# Generate (infer) calls.
calls = generate_calls(run, stoptimes, mintime=start, maxtime=end)
# update run_index sequence
cursor.execute("SELECT nextval('inferno.run_index')")
# write calls to sink
cursor.executemany(
INSERT.format(calls_table),
[dict(trip=trip_id, vehicle=vehicle_id, **c) for c in calls],
)
lenc += len(calls)
conn.commit()
logging.debug("%s", cursor.statusmessage)
logging.info("COMMIT vehicle= %s, calls= %s", vehicle_id, lenc)
def get_cpus():
try:
return len(os.sched_getaffinity(0))
except AttributeError:
return os.cpu_count()
def connection_params():
"""Check the environment for postgresql connection parameters"""
pg = {
"PGUSER": "user",
"PGHOST": "host",
"PGPORT": "port",
"PGSERVICE": "service",
"PGPASSWORD": "password",
"PGPASSFILE": "passfile",
}
params = dict()
params.update({v: os.environ[k] for k, v in pg.items() if k in os.environ})
return params
def main(): # pragma: no cover
"""Run command line script"""
# connectionstring: str, table, date, vehicle=None
parser = argparse.ArgumentParser()
parser.add_argument("date", type=str)
parser.add_argument(
"--calls", type=str, default=os.environ.get("CALLS", "inferno.calls")
)
parser.add_argument(
"--positions",
type=str,
default=os.environ.get("POSITIONS", "rt.vehicle_positions"),
)
parser.add_argument("--vehicle", type=str)
parser.add_argument(
"--epsg",
type=int,
default=int(os.environ.get("EPSG", 4326)),
help="projection in which to calculate distances",
)
parser.add_argument(
"--debug", action="store_true", help="Run verbosely and without parallelism"
)
parser.add_argument("--quiet", action="store_true")
parser.add_argument("--verbose", action="store_true")
parser.add_argument(
"--incomplete", action="store_true", help="Restart an incomplete date"
)
parser.add_argument(
"--jobs",
type=int,
help="Number of jobs to run. Defaults to %s" % get_cpus(),
default=get_cpus(),
)
args = parser.parse_args()
conn_kwargs = connection_params()
if args.debug or args.verbose:
logger.setLevel(logging.DEBUG)
logging.debug("cli: %s", args)
logging.debug("connection: %s", conn_kwargs)
elif args.quiet:
logger.setLevel(logging.WARNING)
if args.vehicle:
vehicles = [args.vehicle]
else:
with psycopg2.connect(**conn_kwargs) as conn:
psycopg2.extensions.register_type(DEC2FLOAT)
with conn.cursor() as cursor:
logging.info("Finding vehicles for %s", args.date)
cursor.execute(SELECT_VEHICLE.format(args.positions), (args.date,))
vehicles = [x[0] for x in cursor.fetchall()]
if args.incomplete:
logging.info("Removing already-called vehicles")
cursor.execute(
SELECT_CALLED_VEHICLES.format(calls=args.calls), (args.date,)
)
called = set(x[0] for x in cursor.fetchall())
vehicles = set(vehicles).difference(called)
logging.info("Removed %s", len(called))
logging.info("Found %s vehicles", len(vehicles))
itervehicles = zip(
vehicles,
cycle([{"date": args.date, "epsg": args.epsg}]),
cycle([conn_kwargs]),
cycle([args.calls]),
cycle([args.positions]),
)
if args.debug or args.jobs == 1:
for i in itervehicles:
track_vehicle(*i)
else:
with Pool(args.jobs) as pool:
pool.starmap(track_vehicle, itervehicles)
logging.info("completed %s", args.date)
if __name__ == "__main__":
main()
|
py | 1a52e8c51e3b1b1008d6635142f04f39694d4910 | # -*- coding: utf-8 -*-
import numpy as np
import scipy.signal as sig
from math import pi
import math
def PLL(input_signal, Fs, lenght, N):
"""Synchronizes the input carryer signal with the local oscillator to avoid crosstalk due to phase and frequency differences between TX and RX.
Parameters
----------
input_signal : 1D array of floats
Complex signal received at the input of the demodulator.
Fs : float
Sampling frequency.
lenght : int
Lenght of the output vector.
N : int
Samples per period of the sinusuidal wave.
Returns
-------
cos_out : 1D array of floats
Cosine wave synchronized with the input signal.
sin_out : 1D array of floats
Sine wave synchronized with the input signal.
"""
# K_p = 0.2667
# K_i = 0.0178
zeta = .707 # damping factor
k = 1
Bn = 0.01*Fs #Noise Bandwidth
K_0 = 1 # NCO gain
K_d = 1/2 # Phase Detector gain
K_p = (1/(K_d*K_0))*((4*zeta)/(zeta+(1/(4*zeta)))) * \
(Bn/Fs) # Proporcional gain
K_i = (1/(K_d*K_0))*(4/(zeta+(1/(4*zeta)**2))) * \
(Bn/Fs)**2 # Integrator gain
integrator_out = 0
phase_estimate = np.zeros(lenght)
e_D = [] # phase-error output
e_F = [] # loop filter output
sin_out_n = np.zeros(lenght)
cos_out_n = np.ones(lenght)
for n in range(lenght-1):
# phase detector
try:
e_D.append(
math.atan(input_signal[n] * (cos_out_n[n] + sin_out_n[n])))
except IndexError:
e_D.append(0)
# loop filter
integrator_out += K_i * e_D[n]
e_F.append(K_p * e_D[n] + integrator_out)
# NCO
try:
phase_estimate[n+1] = phase_estimate[n] + K_0 * e_F[n]
except IndexError:
phase_estimate[n+1] = K_0 * e_F[n]
sin_out_n[n+1] = -np.sin(2*np.pi*(k/N)*(n+1) + phase_estimate[n])
cos_out_n[n+1] = np.cos(2*np.pi*(k/N)*(n+1) + phase_estimate[n])
sin_out_n = -sin_out_n
cos_out = cos_out_n[280:400]
sin_out = sin_out_n[280:400]
for i in range(18):
cos_out = np.concatenate(
(cos_out, cos_out_n[280:400], cos_out_n[280:400]), axis=None)
sin_out = np.concatenate(
(sin_out, sin_out_n[280:400], sin_out_n[280:400]), axis=None)
return(cos_out, sin_out)
def LPF(signal, fc, Fs):
"""Low pass filter, Butterworth approximation.
Parameters
----------
signal : 1D array of floats
Signal to be filtered.
fc : float
Cutt-off frequency.
Fs : float
Sampling frequency.
Returns
-------
signal_filt : 1D array of floats
Filtered signal.
W : 1D array of floats
The frequencies at which 'h' was computed, in Hz.
h : complex
The frequency response.
"""
o = 5 # order of the filter
fc = np.array([fc])
wn = 2*fc/Fs
[b, a] = sig.butter(o, wn, btype='lowpass')
[W, h] = sig.freqz(b, a, worN=1024)
W = Fs*W/(2*pi)
signal_filt = sig.lfilter(b, a, signal)
return(signal_filt, W, h)
def matched_filter(signal, template):
"""Convolutes the baseband signal with the template of the impulse response used in the modulator (Square Root Raised Cosine) to increase the SNR.
Parameters
----------
signal : 1D array of floats
Baseband signal to be filtered.
template : 1D array of floats
Impulse response of the filter used at the signal shaping block
Returns
-------
signal_filt : 1D array of floats
Filtered signal.
"""
signal_filt = np.convolve(signal, template, 'full')
return(signal_filt)
def downsampler(signal, packet_s, upsampler_f):
"""The algorithm analyzes the synchronization symbols and tries to find the sample where the value of the symbol is maximum. After that, is possible to estimate in which sample the information begins to appear on the signal (i.e. detects the delay)
Parameters
----------
signal : 1D array of floats
Baseband signal.
packet_s : int
Number of bits in the transmitted packet.
upsampler_f : int
Upsampler factor used at the modulator.
Returns
-------
symbols : 1D array of floats
The sampled symbols.
"""
e = 0
gardner_e = []
peak_sample = 0
peak_sample_acc = []
low_point = 0
threshold = 4
for i in range(len(signal)):
if signal[low_point] < -threshold:
if signal[i] > threshold:
e = (abs(signal[(i+1)]) -
abs(signal[i-1])) * abs(signal[i])
gardner_e.append(e)
if e > 0.8:
peak_sample = peak_sample + 1
peak_sample_acc.append(peak_sample)
elif e < -0.8:
peak_sample = peak_sample - 1
peak_sample_acc.append(peak_sample)
else:
break
else:
peak_sample = peak_sample + 1
peak_sample_acc.append(peak_sample)
else:
low_point = low_point + 1
peak_sample = peak_sample + 1
peak_sample_acc.append(peak_sample)
# 450 is the number of samples before the convergence symbol of the algorithm.
cut_i = peak_sample - 450
cut_f = cut_i + int((packet_s/4)*upsampler_f)
print("Cut_i = ", cut_i)
print("Cut_f = ", cut_f)
# For the code to still work, even when there is a big BER, this secction is required.
if cut_i > 730:
signal = signal[261:2306+510]
elif cut_i < 690:
signal = signal[261:2306+510]
else:
signal = signal[cut_i:cut_f]
symbols = signal[slice(0, len(signal), upsampler_f)]
return(symbols)
def demapper(symbols_I, symbols_Q, packetSize, threshold = 3.0):
"""Generates an array of bits using the values based on the 16QAM indexing vector.
- If the symbol amplitude is between 0 and the threshold, it corresponds to the bits 10, if it's greater than the threshold, it corresponds to the sequence 11.
- If the symbol amplitude is between 0 and -threshold, it corresponds to the bits 01, if it's lower than -threshold, it corresponds to the sequence 00.
After the recovery of the bits, both vectors (I and Q) are merged, generating the output bitstream.
Parameters
----------
symbols_I : 1D array of floats
Downsampled in-phase symbols.
symbols_Q : 1D array of floats
Downsampled quadrature symbols.
packetSize : int
Number of bits in the transmitted packet.
threshold : float, optional
The limit between two symbols in the 16QAM constellation. The default value is 3.
Returns
-------
bitstream : 1D array of ints
Bits transmitted.
"""
Ns = int(packetSize/4)
bits_I = []
bits_Q = []
for i in range(Ns):
if symbols_I[i] >= 0 and symbols_I[i] <= threshold:
bits_I.append(1)
bits_I.append(0)
if symbols_I[i] > threshold:
bits_I.append(1)
bits_I.append(1)
if symbols_I[i] < 0 and symbols_I[i] >= -threshold:
bits_I.append(0)
bits_I.append(1)
if symbols_I[i] < -threshold:
bits_I.append(0)
bits_I.append(0)
if symbols_Q[i] >= 0 and symbols_Q[i] <= threshold:
bits_Q.append(1)
bits_Q.append(0)
if symbols_Q[i] > threshold:
bits_Q.append(1)
bits_Q.append(1)
if symbols_Q[i] < 0 and symbols_Q[i] >= -threshold:
bits_Q.append(0)
bits_Q.append(1)
if symbols_Q[i] < -threshold:
bits_Q.append(0)
bits_Q.append(0)
bits_I = list(map(int, bits_I))
bits_Q = list(map(int, bits_Q))
bitStream = np.zeros(packetSize)
for i in range(len(bits_I)):
bitStream[2*i] = bits_I[i]
bitStream[2*i-1] = bits_Q[i-1]
return(bitStream)
|
py | 1a52e9d25a80910dab00647ef1fc4316392b8024 | from . import FieldType
from . import FieldValue
from . import FieldTypeRegression
from . import FieldValueRegression
def rec(node, result, randomstate):
node.randomstate = randomstate
v = node.pick_value()
result[node.field_name] = v.get_field_value()
if v.next_field and len(v.next_field.values) > 0:
rec(v.next_field, result, randomstate)
def process_type(node, randomstate):
field = FieldType()
field.values = []
for field_key, _ in node.items():
if field_key not in ('count', 'ratio', 'total'):
field.field_name = field_key
for value_key, field_value in node[field_key].items():
if value_key not in ['total']:
field.values.append(process_value(field_value, value_key, randomstate))
return field
def process_stats(node, randomstate):
outer_type = None
current_value = None
for field_key, _ in node['stats'].items():
next_type = FieldTypeRegression()
next_type.field_name = field_key
value = FieldValueRegression(randomstate)
value.count = node['stats'][field_key]["count"]
value.mean = node['stats'][field_key]["mean"]
value.var = node['stats'][field_key]["var"]
value.std = node['stats'][field_key]["std"]
value.min = node['stats'][field_key]["min"]
value.max = node['stats'][field_key]["max"]
value.median = node['stats'][field_key]["median"]
if "best_fit_distribution" in node['stats'][field_key]:
value.best_fit = node['stats'][field_key]["best_fit_distribution"]
value.fit_parameter = node['stats'][field_key]["fit_parameter"]
else:
value.best_fit = None
value.fit_parameter = None
value.next_field = None
next_type.values = []
next_type.values.append(value)
if not outer_type:
outer_type = next_type
else:
current_value.next_field = next_type
current_value = value
return outer_type
def process_value(node, value, randomstate):
field_value = FieldValue()
field_value.field_value = value
field_value.ratio = node['ratio']
field_value.count = node['count']
if 'stats' in node:
field_value.next_field = process_stats(node, randomstate)
else:
field_value.next_field = process_type(node, randomstate)
return field_value
|
py | 1a52ea329131bfd92599fb95caab123f7cdb6de4 | import FWCore.ParameterSet.Config as cms
from Configuration.Eras.Era_Phase2C6_timing_layer_bar_cff import Phase2C6_timing_layer_bar
process = cms.Process('PROD',Phase2C6_timing_layer_bar)
process.load("SimGeneral.HepPDTESSource.pythiapdt_cfi")
process.load("IOMC.EventVertexGenerators.VtxSmearedGauss_cfi")
process.load('Configuration.Geometry.GeometryExtended2026D44_cff')
process.load("Configuration.StandardSequences.MagneticField_cff")
process.load("Configuration.EventContent.EventContent_cff")
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Validation.HGCalValidation.hfnoseSimHitStudy_cfi')
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
from Configuration.AlCa.autoCond import autoCond
process.GlobalTag.globaltag = autoCond['phase2_realistic']
if hasattr(process,'MessageLogger'):
process.MessageLogger.categories.append('HGCalValidation')
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'file:step1.root',
)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.TFileService = cms.Service("TFileService",
fileName = cms.string('hfnSimHitD44tt.root'),
closeFileFast = cms.untracked.bool(True)
)
process.analysis_step = cms.Path(process.hgcalSimHitStudy)
# Schedule definition
process.schedule = cms.Schedule(process.analysis_step)
|
py | 1a52ea59e689178650035e52c4ed51762fa7cc30 | '''
Problem 72 - Counting fractions
Consider the fraction, n/d, where n and d are positive integers.
If n<d and HCF(n,d)=1, it is called a reduced proper fraction.
If we list the set of reduced proper fractions for d ≤ 8 in ascending order of size, we get:
1/8, 1/7, 1/6, 1/5, 1/4, 2/7, 1/3, 3/8, 2/5, 3/7, 1/2, 4/7, 3/5, 5/8, 2/3, 5/7, 3/4, 4/5, 5/6, 6/7, 7/8
It can be seen that there are 21 elements in this set.
How many elements would be contained in the set of reduced proper fractions for d ≤ 1,000,000 ?
'''
'''
Solution : It can be seen that the solution is:
sum(phi(i)) for i in range(2, 1000001)
We can use Euler's formula and a seive to compute it easily
'''
N = 10**6
# import numpy as np
# arr = np.arange(N+1)
# print(arr.shape)
arr = [i for i in range(N + 1)]
result = 0
for i in range(2, N + 1):
if arr[i] == i:
for j in range(i, N + 1, i):
arr[j] = (arr[j] // i) * (i - 1)
result += arr[i]
print(result)
|
py | 1a52eafcbe6a90a9d73ab2685e162a226766a473 | import numpy as np
from allennlp.common.testing import ModelTestCase
from tests import FIXTURES_ROOT
class TestBidirectionalLanguageModel(ModelTestCase):
def setUp(self):
super().setUp()
self.expected_embedding_shape = (2, 8, 14)
self.set_up_model(
FIXTURES_ROOT / "lm" / "language_model" / "experiment_bidirectional.jsonnet",
FIXTURES_ROOT / "lm" / "language_model" / "sentences.txt",
)
def test_bidirectional_lm_can_train_save_load(self):
self.ensure_model_can_train_save_and_load(self.param_file)
def test_batch_predictions_are_consistent(self):
self.ensure_batch_predictions_are_consistent(keys_to_ignore=["batch_weight"])
def test_forward_pass_runs_correctly(self):
training_tensors = self.dataset.as_tensor_dict()
result = self.model(**training_tensors)
assert set(result) == {
"loss",
"forward_loss",
"backward_loss",
"lm_embeddings",
"noncontextual_token_embeddings",
"mask",
"batch_weight",
}
# The model should preserve the BOS / EOS tokens.
embeddings = result["lm_embeddings"]
assert tuple(embeddings.shape) == self.expected_embedding_shape
loss = result["loss"].item()
forward_loss = result["forward_loss"].item()
backward_loss = result["backward_loss"].item()
np.testing.assert_almost_equal(loss, (forward_loss + backward_loss) / 2, decimal=3)
class TestBidirectionalLanguageModelUnsampled(TestBidirectionalLanguageModel):
def setUp(self):
super().setUp()
self.set_up_model(
FIXTURES_ROOT / "lm" / "language_model" / "experiment_bidirectional_unsampled.jsonnet",
FIXTURES_ROOT / "lm" / "language_model" / "sentences.txt",
)
|
py | 1a52ec1b098218a3d88404d5aa9e9de5af5c3ae0 | from random import choice
class RandomWalk:
"""A Class to generate random walks."""
def __init__(self, num_points=5000) -> None:
"""Initialize attributes of a walk"""
self.num_points = num_points
# all walks start at (0, 0)
self.x_values = [0]
self.y_values = [0]
def fill_walk(self):
"""Calculate all the points in the walk."""
# Keep taking steps until the walk reaches the desired lenght.
while len(self.x_values) < self.num_points:
# Decide which direction to go and how far to go in that direction.
x_direction = choice([1, -1])
x_distance = choice([0, 1, 2, 3, 4])
x_step = x_direction * x_distance
y_direction = choice([1, -1])
y_distance = choice([0, 1, 2, 3, 4])
y_step = y_direction * y_distance
# Reject moves that go nowhere.
if x_step == 0 and y_step == 0:
continue
# Calculate the nex x and y values.
next_x = self.x_values[-1] + x_step
next_y = self.y_values[-1] + y_step
self.x_values.append(next_x)
self.y_values.append(next_y)
|
py | 1a52ec75ad41101d71591b9d4ae7ef47b239ed73 | # -*- coding: utf-8 -*-
from openerp import models, fields, api
class Wizard(models.TransientModel):
_name = 'openacademy.wizard'
def _default_session(self):
return self.env['openacademy.session'].browse(self._context.get('active_ids'))
session_ids = fields.Many2many('openacademy.session',
string="Session",
required=True,
default=_default_session)
attendee_ids = fields.Many2many('res.partner',
string="Attendees")
@api.multi
def subscribe(self):
for session in self.session_ids:
session.attendee_ids |= self.attendee_ids
return {}
|
py | 1a52ed56fa842ab55fc4b91ff0f1936de3fd7129 | import os
import bottle
import utils
import background as bg
@bottle.route("/")
@bottle.route("/index")
def index():
return bottle.template("view/index")
@bottle.route("/chooseTable")
def chooseTable():
args = bottle.request.query
filePath = args.databaseFile
if not os.path.isfile(filePath):
return '<h2>File "{}" not accessible.</h2>'.format(filePath) # TODO: redirect after 5sec to index
utils.writePref(databaseFile=filePath)
tables = bg.sqlCommandRunner(filePath, ".tables")
tables = tables.split()
return bottle.template("view/chooseTable"
, tableList=tables)
@bottle.route("/displayTable")
def displayTable():
args = bottle.request.query
pageNum = int(args.pageNum)
tableName = args.tableName
chunkSize = int(args.chunkSize)
filePath = utils.readPref("databaseFile")
col_names = bg.getColNames(filePath, tableName)
to = chunkSize*pageNum
frm = to - (chunkSize-1)
data = bg.getRows(filePath, tableName, col_names, frm, to)
isThisLastPage = True if len(data) < to-frm+1 else False
isThisFirstPage = True if pageNum == 1 else False
return bottle.template("view/displayTable"
, tableName=tableName
, chunkSize=chunkSize
, pageNum=pageNum
, table_head=col_names
, table_body=data
, isThisLastPage=isThisLastPage
, isThisFirstPage=isThisFirstPage)
if __name__ == '__main__':
bottle.run(debug=True, reloader=True)
|
py | 1a52efb9ffca532b894ca34fd8857a7fb33bcf80 | # coding: utf-8
"""
Hydrogen Atom API
The Hydrogen Atom API # noqa: E501
OpenAPI spec version: 1.7.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class PageCustomerRevenue(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'content': 'list[CustomerRevenue]',
'first': 'bool',
'last': 'bool',
'number': 'int',
'number_of_elements': 'int',
'size': 'int',
'sort': 'list[Sort]',
'total_elements': 'int',
'total_pages': 'int'
}
attribute_map = {
'content': 'content',
'first': 'first',
'last': 'last',
'number': 'number',
'number_of_elements': 'number_of_elements',
'size': 'size',
'sort': 'sort',
'total_elements': 'total_elements',
'total_pages': 'total_pages'
}
def __init__(self, content=None, first=None, last=None, number=None, number_of_elements=None, size=None, sort=None, total_elements=None, total_pages=None): # noqa: E501
"""PageCustomerRevenue - a model defined in Swagger""" # noqa: E501
self._content = None
self._first = None
self._last = None
self._number = None
self._number_of_elements = None
self._size = None
self._sort = None
self._total_elements = None
self._total_pages = None
self.discriminator = None
if content is not None:
self.content = content
if first is not None:
self.first = first
if last is not None:
self.last = last
if number is not None:
self.number = number
if number_of_elements is not None:
self.number_of_elements = number_of_elements
if size is not None:
self.size = size
if sort is not None:
self.sort = sort
if total_elements is not None:
self.total_elements = total_elements
if total_pages is not None:
self.total_pages = total_pages
@property
def content(self):
"""Gets the content of this PageCustomerRevenue. # noqa: E501
:return: The content of this PageCustomerRevenue. # noqa: E501
:rtype: list[CustomerRevenue]
"""
return self._content
@content.setter
def content(self, content):
"""Sets the content of this PageCustomerRevenue.
:param content: The content of this PageCustomerRevenue. # noqa: E501
:type: list[CustomerRevenue]
"""
self._content = content
@property
def first(self):
"""Gets the first of this PageCustomerRevenue. # noqa: E501
:return: The first of this PageCustomerRevenue. # noqa: E501
:rtype: bool
"""
return self._first
@first.setter
def first(self, first):
"""Sets the first of this PageCustomerRevenue.
:param first: The first of this PageCustomerRevenue. # noqa: E501
:type: bool
"""
self._first = first
@property
def last(self):
"""Gets the last of this PageCustomerRevenue. # noqa: E501
:return: The last of this PageCustomerRevenue. # noqa: E501
:rtype: bool
"""
return self._last
@last.setter
def last(self, last):
"""Sets the last of this PageCustomerRevenue.
:param last: The last of this PageCustomerRevenue. # noqa: E501
:type: bool
"""
self._last = last
@property
def number(self):
"""Gets the number of this PageCustomerRevenue. # noqa: E501
:return: The number of this PageCustomerRevenue. # noqa: E501
:rtype: int
"""
return self._number
@number.setter
def number(self, number):
"""Sets the number of this PageCustomerRevenue.
:param number: The number of this PageCustomerRevenue. # noqa: E501
:type: int
"""
self._number = number
@property
def number_of_elements(self):
"""Gets the number_of_elements of this PageCustomerRevenue. # noqa: E501
:return: The number_of_elements of this PageCustomerRevenue. # noqa: E501
:rtype: int
"""
return self._number_of_elements
@number_of_elements.setter
def number_of_elements(self, number_of_elements):
"""Sets the number_of_elements of this PageCustomerRevenue.
:param number_of_elements: The number_of_elements of this PageCustomerRevenue. # noqa: E501
:type: int
"""
self._number_of_elements = number_of_elements
@property
def size(self):
"""Gets the size of this PageCustomerRevenue. # noqa: E501
:return: The size of this PageCustomerRevenue. # noqa: E501
:rtype: int
"""
return self._size
@size.setter
def size(self, size):
"""Sets the size of this PageCustomerRevenue.
:param size: The size of this PageCustomerRevenue. # noqa: E501
:type: int
"""
self._size = size
@property
def sort(self):
"""Gets the sort of this PageCustomerRevenue. # noqa: E501
:return: The sort of this PageCustomerRevenue. # noqa: E501
:rtype: list[Sort]
"""
return self._sort
@sort.setter
def sort(self, sort):
"""Sets the sort of this PageCustomerRevenue.
:param sort: The sort of this PageCustomerRevenue. # noqa: E501
:type: list[Sort]
"""
self._sort = sort
@property
def total_elements(self):
"""Gets the total_elements of this PageCustomerRevenue. # noqa: E501
:return: The total_elements of this PageCustomerRevenue. # noqa: E501
:rtype: int
"""
return self._total_elements
@total_elements.setter
def total_elements(self, total_elements):
"""Sets the total_elements of this PageCustomerRevenue.
:param total_elements: The total_elements of this PageCustomerRevenue. # noqa: E501
:type: int
"""
self._total_elements = total_elements
@property
def total_pages(self):
"""Gets the total_pages of this PageCustomerRevenue. # noqa: E501
:return: The total_pages of this PageCustomerRevenue. # noqa: E501
:rtype: int
"""
return self._total_pages
@total_pages.setter
def total_pages(self, total_pages):
"""Sets the total_pages of this PageCustomerRevenue.
:param total_pages: The total_pages of this PageCustomerRevenue. # noqa: E501
:type: int
"""
self._total_pages = total_pages
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PageCustomerRevenue, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PageCustomerRevenue):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | 1a52efce95006eeff62ba79f9e32387a7078d2c6 | #!/usr/bin/env python
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the listreceivedbyaddress API
import sys; assert sys.version_info < (3,), ur"This script does not run under Python 3. Please use Python 2.7.x."
from test_framework.test_framework import BitcoinTestFramework
from decimal import Decimal
def get_sub_array_from_array(object_array, to_match):
'''
Finds and returns a sub array from an array of arrays.
to_match should be a unique idetifier of a sub array
'''
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
return item
return []
def check_array_result(object_array, to_match, expected, should_not_find = False):
'''
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found in object_array
'''
if should_not_find == True:
expected = { }
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0 and should_not_find != True:
raise AssertionError("No objects matched %s"%(str(to_match)))
if num_matched > 0 and should_not_find == True:
raise AssertionError("Objects was matched %s"%(str(to_match)))
class ReceivedByTest(BitcoinTestFramework):
def run_test(self):
'''
listreceivedbyaddress Test
'''
# Send from node 0 to 1
addr = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendtoaddress(addr, 0.1)
self.sync_all()
# Check not listed in listreceivedbyaddress because has 0 confirmations
check_array_result(self.nodes[1].listreceivedbyaddress(),
{"address":addr},
{ },
True)
# Bury Tx under 10 block so it will be returned by listreceivedbyaddress
self.nodes[1].generate(10)
self.sync_all()
check_array_result(self.nodes[1].listreceivedbyaddress(),
{"address":addr},
{"address":addr, "account":"", "amount":Decimal("0.1"), "confirmations":10, "txids":[txid,]})
# With min confidence < 10
check_array_result(self.nodes[1].listreceivedbyaddress(5),
{"address":addr},
{"address":addr, "account":"", "amount":Decimal("0.1"), "confirmations":10, "txids":[txid,]})
# With min confidence > 10, should not find Tx
check_array_result(self.nodes[1].listreceivedbyaddress(11),{"address":addr},{ },True)
# Empty Tx
addr = self.nodes[1].getnewaddress()
check_array_result(self.nodes[1].listreceivedbyaddress(0,True),
{"address":addr},
{"address":addr, "account":"", "amount":0, "confirmations":0, "txids":[]})
'''
getreceivedbyaddress Test
'''
# Send from node 0 to 1
addr = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendtoaddress(addr, 0.1)
self.sync_all()
# Check balance is 0 because of 0 confirmations
balance = self.nodes[1].getreceivedbyaddress(addr)
if balance != Decimal("0.0"):
raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f"%(balance))
# Check balance is 0.1
balance = self.nodes[1].getreceivedbyaddress(addr,0)
if balance != Decimal("0.1"):
raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f"%(balance))
# Bury Tx under 10 block so it will be returned by the default getreceivedbyaddress
self.nodes[1].generate(10)
self.sync_all()
balance = self.nodes[1].getreceivedbyaddress(addr)
if balance != Decimal("0.1"):
raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f"%(balance))
'''
listreceivedbyaccount + getreceivedbyaccount Test
'''
# set pre-state
addrArr = self.nodes[1].getnewaddress()
account = self.nodes[1].getaccount(addrArr)
received_by_account_json = get_sub_array_from_array(self.nodes[1].listreceivedbyaccount(),{"account":account})
if len(received_by_account_json) == 0:
raise AssertionError("No accounts found in node")
balance_by_account = self.nodes[1].getreceivedbyaccount(account)
txid = self.nodes[0].sendtoaddress(addr, 0.1)
self.sync_all()
# listreceivedbyaccount should return received_by_account_json because of 0 confirmations
check_array_result(self.nodes[1].listreceivedbyaccount(),
{"account":account},
received_by_account_json)
# getreceivedbyaddress should return same balance because of 0 confirmations
balance = self.nodes[1].getreceivedbyaccount(account)
if balance != balance_by_account:
raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f"%(balance))
self.nodes[1].generate(10)
self.sync_all()
# listreceivedbyaccount should return updated account balance
check_array_result(self.nodes[1].listreceivedbyaccount(),
{"account":account},
{"account":received_by_account_json["account"], "amount":(received_by_account_json["amount"] + Decimal("0.1"))})
# getreceivedbyaddress should return updates balance
balance = self.nodes[1].getreceivedbyaccount(account)
if balance != balance_by_account + Decimal("0.1"):
raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f"%(balance))
# Create a new account named "mynewaccount" that has a 0 balance
self.nodes[1].getaccountaddress("mynewaccount")
received_by_account_json = get_sub_array_from_array(self.nodes[1].listreceivedbyaccount(0,True),{"account":"mynewaccount"})
if len(received_by_account_json) == 0:
raise AssertionError("No accounts found in node")
# Test includeempty of listreceivedbyaccount
if received_by_account_json["amount"] != Decimal("0.0"):
raise AssertionError("Wrong balance returned by listreceivedbyaccount, %0.2f"%(received_by_account_json["amount"]))
# Test getreceivedbyaccount for 0 amount accounts
balance = self.nodes[1].getreceivedbyaccount("mynewaccount")
if balance != Decimal("0.0"):
raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f"%(balance))
if __name__ == '__main__':
ReceivedByTest().main()
|
py | 1a52efeef5b0cebb58d23b74e1a945ae5cb428ce | # -*- encoding: utf-8 -*-
"""
License: MIT
Copyright (c) 2019 - present AppSeed.us
"""
from django.contrib import admin
from django.urls import path, include # add this
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include("rest.urls")),
path("", include("authentication.urls")), # add this
path("", include("app.urls")) # add this
]
|
py | 1a52f0b472cb0b8a27c498a376f822d4a842d142 | """
Blocklist management
"""
from collections import defaultdict
from typing import TYPE_CHECKING, Any, Dict, List
import pkg_resources
from .configuration import BandersnatchConfig
if TYPE_CHECKING:
from configparser import SectionProxy
# The API_REVISION is incremented if the plugin class is modified in a
# backwards incompatible way. In order to prevent loading older
# broken plugins that may be installed and will break due to changes to
# the methods of the classes.
PLUGIN_API_REVISION = 2
PROJECT_PLUGIN_RESOURCE = f"bandersnatch_filter_plugins.v{PLUGIN_API_REVISION}.project"
METADATA_PLUGIN_RESOURCE = (
f"bandersnatch_filter_plugins.v{PLUGIN_API_REVISION}.metadata"
)
RELEASE_PLUGIN_RESOURCE = f"bandersnatch_filter_plugins.v{PLUGIN_API_REVISION}.release"
RELEASE_FILE_PLUGIN_RESOURCE = (
f"bandersnatch_filter_plugins.v{PLUGIN_API_REVISION}.release_file"
)
class Filter:
"""
Base Filter class
"""
name = "filter"
deprecated_name: str = ""
def __init__(self, *args: Any, **kwargs: Any) -> None:
self.configuration = BandersnatchConfig().config
if (
"plugins" not in self.configuration
or "enabled" not in self.configuration["plugins"]
):
return
split_plugins = self.configuration["plugins"]["enabled"].split("\n")
if (
"all" not in split_plugins
and self.name not in split_plugins
# TODO: Remove after 5.0
and not (self.deprecated_name and self.deprecated_name in split_plugins)
):
return
self.initialize_plugin()
def initialize_plugin(self) -> None:
"""
Code to initialize the plugin
"""
# The initialize_plugin method is run once to initialize the plugin. This should
# contain all code to set up the plugin.
# This method is not run in the fast path and should be used to do things like
# indexing filter databases, etc that will speed the operation of the filter
# and check_match methods that are called in the fast path.
pass
def filter(self, metadata: dict) -> bool:
"""
Check if the plugin matches based on the package's metadata.
Returns
=======
bool:
True if the values match a filter rule, False otherwise
"""
return False
def check_match(self, **kwargs: Any) -> bool:
"""
Check if the plugin matches based on the arguments provides.
Returns
=======
bool:
True if the values match a filter rule, False otherwise
"""
return False
@property
def allowlist(self) -> "SectionProxy":
return self.configuration["allowlist"]
@property
def blocklist(self) -> "SectionProxy":
return self.configuration["blocklist"]
class FilterProjectPlugin(Filter):
"""
Plugin that blocks sync operations for an entire project
"""
name = "project_plugin"
class FilterMetadataPlugin(Filter):
"""
Plugin that blocks sync operations for an entire project based on info fields.
"""
name = "metadata_plugin"
class FilterReleasePlugin(Filter):
"""
Plugin that modifies the download of specific releases or dist files
"""
name = "release_plugin"
class FilterReleaseFilePlugin(Filter):
"""
Plugin that modify the download of specific release or dist files
"""
name = "release_file_plugin"
class LoadedFilters:
"""
A class to load all of the filters enabled
"""
ENTRYPOINT_GROUPS = [
PROJECT_PLUGIN_RESOURCE,
METADATA_PLUGIN_RESOURCE,
RELEASE_PLUGIN_RESOURCE,
RELEASE_FILE_PLUGIN_RESOURCE,
]
def __init__(self, load_all: bool = False) -> None:
"""
Loads and stores all of specified filters from the config file
"""
self.config = BandersnatchConfig().config
self.loaded_filter_plugins: Dict[str, List["Filter"]] = defaultdict(list)
self.enabled_plugins = self._load_enabled()
if load_all:
self._load_filters(self.ENTRYPOINT_GROUPS)
def _load_enabled(self) -> List[str]:
"""
Reads the config and returns all the enabled plugins
"""
enabled_plugins: List[str] = []
try:
config_plugins = self.config["plugins"]["enabled"]
split_plugins = config_plugins.split("\n")
if "all" in split_plugins:
enabled_plugins = ["all"]
else:
for plugin in split_plugins:
if not plugin:
continue
enabled_plugins.append(plugin)
except KeyError:
pass
return enabled_plugins
def _load_filters(self, groups: List[str]) -> None:
"""
Loads filters from the entry-point groups specified in groups
"""
for group in groups:
plugins = set()
for entry_point in pkg_resources.iter_entry_points(group=group):
plugin_class = entry_point.load()
plugin_instance = plugin_class()
if (
"all" in self.enabled_plugins
or plugin_instance.name in self.enabled_plugins
or plugin_instance.deprecated_name in self.enabled_plugins
):
plugins.add(plugin_instance)
self.loaded_filter_plugins[group] = list(plugins)
def filter_project_plugins(self) -> List[Filter]:
"""
Load and return the project filtering plugin objects
Returns
-------
list of bandersnatch.filter.Filter:
List of objects derived from the bandersnatch.filter.Filter class
"""
if PROJECT_PLUGIN_RESOURCE not in self.loaded_filter_plugins:
self._load_filters([PROJECT_PLUGIN_RESOURCE])
return self.loaded_filter_plugins[PROJECT_PLUGIN_RESOURCE]
def filter_metadata_plugins(self) -> List[Filter]:
"""
Load and return the metadata filtering plugin objects
Returns
-------
list of bandersnatch.filter.Filter:
List of objects derived from the bandersnatch.filter.Filter class
"""
if METADATA_PLUGIN_RESOURCE not in self.loaded_filter_plugins:
self._load_filters([METADATA_PLUGIN_RESOURCE])
return self.loaded_filter_plugins[METADATA_PLUGIN_RESOURCE]
def filter_release_plugins(self) -> List[Filter]:
"""
Load and return the release filtering plugin objects
Returns
-------
list of bandersnatch.filter.Filter:
List of objects derived from the bandersnatch.filter.Filter class
"""
if RELEASE_PLUGIN_RESOURCE not in self.loaded_filter_plugins:
self._load_filters([RELEASE_PLUGIN_RESOURCE])
return self.loaded_filter_plugins[RELEASE_PLUGIN_RESOURCE]
def filter_release_file_plugins(self) -> List[Filter]:
"""
Load and return the release file filtering plugin objects
Returns
-------
list of bandersnatch.filter.Filter:
List of objects derived from the bandersnatch.filter.Filter class
"""
if RELEASE_FILE_PLUGIN_RESOURCE not in self.loaded_filter_plugins:
self._load_filters([RELEASE_FILE_PLUGIN_RESOURCE])
return self.loaded_filter_plugins[RELEASE_FILE_PLUGIN_RESOURCE]
|
py | 1a52f20b524bec8d1948eaeb1a4ca3e024491046 | __author__ = "Christopher Dean"
__copyright__ = ""
__credits__ = ["Christopher Dean"]
__version__ = ""
__maintainer__ = "Christopher Dean"
__email__ = "[email protected]"
__status__ = "I'm doing fine."
import sys
import argparse
def parse_cmdline_params(cmdline_params):
info = "Remove duplicate annotations from FASTA formatted reference file"
parser = argparse.ArgumentParser(description=info)
parser.add_argument('-r', '--reference_sequence', type=str, required=True,
help='Please provide a FASTA formatted reference file')
parser.add_argument('-o', '--output', type=str, required=True,
help='Please provide an output file name in FASTA format')
return parser.parse_args(cmdline_params)
def read_fasta(filename):
"""
Removes duplicate annotations from FASTA file
:param (str) filename: FASTA file
:return (dict) records: A dictionary of FASTA records
"""
with open(filename, 'r') as fp:
records = {}
for line in fp:
key = line
value = fp.next()
if key in records:
if len(value) > len(records[key]):
records[key] = value
else:
records[key] = value
fp.close()
return records
def write_fasta(filename, records):
"""
Writes a dictionary of FASTA records to file
:param (str) filename: Output file
:param (dict) records: A dictionary of FASTA records
:return (void): Void method
"""
handler = open(filename, 'w')
for k, v in records.items():
handler.write(k)
handler.write(v)
handler.close()
if __name__ == "__main__":
opts = parse_cmdline_params(sys.argv[1:])
records = read_fasta(opts.reference_sequence)
write_fasta(opts.output, records)
|
py | 1a52f29277b444e7d7e7688b6cbbd45120fbb907 | from __future__ import unicode_literals
from django.apps import AppConfig
class DesktopConfig(AppConfig):
name = 'desktop'
|
bzl | 1a52f29e7b9b4ed4ead0cb64519ecd0c9abe3459 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# DO NOT EDIT -- GENERATED BY CMake -- Change the CMakeLists.txt file if needed
"""Automatically generated unit tests list - DO NOT EDIT."""
google_cloud_cpp_testing_unit_tests = [
"command_line_parsing_test.cc",
"contains_once_test.cc",
"crash_handler_test.cc",
"example_driver_test.cc",
"scoped_environment_test.cc",
"status_matchers_test.cc",
]
|
py | 1a52f2ef4fdbc648203e90a2f3000aa0ffea4f19 | import random
class BaseTank:
def __init__(self, tank_data):
self.death = False
self.model = tank_data.get('short_name')
self.nation = tank_data.get('nation')
default_profile = tank_data.get('default_profile')
self.armor_dict = default_profile.get('armor').get('hull')
self.health = default_profile.get('hp')
self.precision = round((1 - default_profile.get('gun').get('dispersion')) * 100) - 10
ammo_list = default_profile.get('ammo')
self.ammo_damage = None
for ammo in ammo_list:
if ammo.get('type') == 'ARMOR_PIERCING':
self.ammo_damage = ammo.get('damage')[2]
break
if not self.ammo_damage:
raise Exception('No ammo damage found for type Armor Piercing')
def __str__(self):
return f'''
Nation: {self.nation}
Model: {self.model}
Type: {type(self).__name__}
Armor: {self.armor_dict}
Health: {self.health}
Damage: {self.ammo_damage}
Precision: {self.precision}'''
def dodge(self):
return False
def inflict_damage(self, enemy_tank):
result = None
if not self.death:
if random.randint(1, 100) >= self.precision:
enemy_tank.receive_damage(self.ammo_damage)
result = 'hit'
else:
result = 'miss!'
return result
def receive_damage(self, damage_amount):
if not self.death:
self.health -= damage_amount
if self.health <= 0:
self.death = True
|
py | 1a52f2f4a173ca6ebc4e16b55d38c9518b4ada40 | import speedtest as st
import sys
import threading
import itertools
import time
from colorama import Fore
from plugin import plugin
class SpinnerThread(threading.Thread):
"""SpinnerThread class to show a spinner on command line while the progream is running"""
def __init__(self, label="Hmmm... ", delay=0.2):
super(SpinnerThread, self).__init__()
self.label = label
self.delay = delay
self.running = False
def start(self):
self.running = True
super(SpinnerThread, self).start()
def run(self):
chars = itertools.cycle(r'-\|/')
while self.running:
sys.stdout.write('\r' + self.label + next(chars))
sys.stdout.flush()
time.sleep(self.delay)
def stop(self):
self.running = False
self.join()
sys.stdout.write('\r')
sys.stdout.flush()
@plugin(network=True)
def speedtest(jarvis, s):
"""Runs a speedtest on your internet connection"""
try:
res = st.Speedtest()
except st.ConfigRetrievalError:
return jarvis.connection_error()
# Create a spinner on command line to show that its running
spinner = SpinnerThread('Running the test ', 0.15)
spinner.start()
res.get_best_server()
download_speed = res.download()
upload_speed = res.upload()
# results_dict = res.results.dict()
spinner.stop()
# Print the results
jarvis.say('Speed test results:', Fore.GREEN)
jarvis.say('Download: ' + pretty_speed(download_speed), Fore.GREEN)
jarvis.say('Upload: ' + pretty_speed(upload_speed), Fore.GREEN)
def pretty_speed(speed):
""" return speed value prettily accordingly in either bps, Kbps, Mbps, Gbps"""
unit = 'bps'
kmg = ['', 'K', 'M', 'G']
i = 0
while speed >= 1000:
speed /= 1000
i += 1
return "{:.2f}".format(speed) + ' ' + kmg[i] + unit
|
py | 1a52f3ee612e8069d38d0acc5c6edc102a888893 | import math
import re
import threading
import time
from typing import Any, Callable, List, Optional, Set, Tuple
import antlr4
from pytest import mark, param, raises, warns
from omegaconf import (
AnyNode,
Container,
DictConfig,
ListConfig,
OmegaConf,
_utils,
grammar_parser,
grammar_visitor,
)
from omegaconf._utils import nullcontext
from omegaconf.errors import (
GrammarParseError,
InterpolationKeyError,
InterpolationResolutionError,
UnsupportedInterpolationType,
)
TAB = "\t" # to be used in raw strings, e.g. `fr"C:\{TAB}foo"`
# Characters that are not allowed by the grammar in config key names.
INVALID_CHARS_IN_KEY_NAMES = r"""\{}()[].:"' """
UNQUOTED_SPECIAL = r"/-\+.$%*@?|" # special characters allowed in unquoted strings
# A fixed config that may be used (but not modified!) by tests.
BASE_TEST_CFG = OmegaConf.create(
{
# Standard data types.
"str": "hi",
"int": 123,
"float": 1.2,
"dict": {"a": 0, "b": {"c": 1}},
"list": [x - 1 for x in range(11)],
"null": None,
# Special cases.
"x@y": 123, # @ in name
"$x$y$z$": 456, # $ in name (beginning, middle and end)
"0": 0, # integer name
"FalsE": {"TruE": True}, # bool name
"None": {"null": 1}, # null-like name
"1": {"2": 12}, # dot-path with int keys
# Used in nested interpolations.
"str_test": "test",
"ref_str": "str",
"options": {"a": "A", "b": "B"},
"choice": "a",
"rel_opt": ".options",
}
)
# Parameters for tests of the "singleElement" rule when there is no interpolation.
# Each item is a tuple with three elements:
# - The id of the test.
# - The expression to be evaluated.
# - The expected result, that may be an exception. If it is a `GrammarParseError` then
# it is assumed that the parsing will fail. If it is another kind of exception then
# it is assumed that the parsing will succeed, but this exception will be raised when
# visiting (= evaluating) the parse tree. If the expected behavior is for the parsing
# to succeed, but a `GrammarParseError` to be raised when visiting it, then set the
# expected result to the pair `(None, GrammarParseError)`.
PARAMS_SINGLE_ELEMENT_NO_INTERPOLATION: List[Tuple[str, str, Any]] = [
# Special keywords.
("null", "null", None),
("true", "TrUe", True),
("false", "falsE", False),
("true_false", "true_false", "true_false"),
# Integers.
("int", "123", 123),
("int_pos", "+123", 123),
("int_neg", "-123", -123),
("int_underscore", "1_000", 1000),
("int_bad_underscore_1", "1_000_", "1_000_"),
("int_bad_underscore_2", "1__000", "1__000"),
("int_bad_underscore_3", "_1000", "_1000"),
("int_bad_zero_start", "007", "007"),
# Floats.
("float", "1.1", 1.1),
("float_no_int", ".1", 0.1),
("float_no_decimal", "1.", 1.0),
("float_minus", "-.2", -0.2),
("float_underscore", "1.1_1", 1.11),
("float_bad_1", "1.+2", "1.+2"),
("float_bad_2", r"1\.2", r"1\.2"),
("float_bad_3", "1.2_", "1.2_"),
("float_exp_1", "-1e2", -100.0),
("float_exp_2", "+1E-2", 0.01),
("float_exp_3", "1_0e1_0", 10e10),
("float_exp_4", "1.07e+2", 107.0),
("float_exp_5", "1e+03", 1000.0),
("float_exp_bad_1", "e-2", "e-2"),
("float_exp_bad_2", "01e2", "01e2"),
("float_inf", "inf", math.inf),
("float_plus_inf", "+inf", math.inf),
("float_minus_inf", "-inf", -math.inf),
("float_nan", "nan", math.nan),
("float_plus_nan", "+nan", math.nan),
("float_minus_nan", "-nan", math.nan),
# Unquoted strings.
# Note: raw strings do not allow trailing \, adding a space and stripping it.
(
"str_legal",
(r" a" + UNQUOTED_SPECIAL + r"\\ ").strip(),
(r" a" + UNQUOTED_SPECIAL + r"\ ").strip(),
),
("str_illegal_1", "a,=b", GrammarParseError),
("str_illegal_2", f"{chr(200)}", GrammarParseError),
("str_illegal_3", f"{chr(129299)}", GrammarParseError),
("str_dot", ".", "."),
("str_dollar", "$", "$"),
("str_colon", ":", ":"),
("str_ws_1", "hello world", "hello world"),
("str_ws_2", "a b\tc \t\t d", "a b\tc \t\t d"),
("str_esc_ws_1", r"\ hello\ world\ ", " hello world "),
("str_esc_ws_2", fr"\ \{TAB}\{TAB}", f" {TAB}{TAB}"),
("str_esc_comma", r"hello\, world", "hello, world"),
("str_esc_colon", r"a\:b", "a:b"),
("str_esc_equal", r"a\=b", "a=b"),
("str_esc_parentheses", r"\(foo\)", "(foo)"),
("str_esc_brackets", r"\[foo\]", "[foo]"),
("str_esc_braces", r"\{foo\}", "{foo}"),
("str_esc_backslash", r" \ ".strip(), r" \ ".strip()),
("str_backslash_noesc", r"ab\cd", r"ab\cd"),
("str_esc_illegal_1", r"\#", GrammarParseError),
("str_esc_illegal_2", r""" \'\" """.strip(), GrammarParseError),
# Quoted strings.
("str_quoted_single", "'!@#$%^&*|()[]:.,\"'", '!@#$%^&*|()[]:.,"'),
("str_quoted_double", '"!@#$%^&*|()[]:.,\'"', "!@#$%^&*|()[]:.,'"),
("str_quoted_outer_ws_single", "' a \t'", " a \t"),
("str_quoted_outer_ws_double", '" a \t"', " a \t"),
("str_quoted_int", "'123'", "123"),
("str_quoted_null", "'null'", "null"),
("str_quoted_bool", "['truE', \"FalSe\"]", ["truE", "FalSe"]),
("str_quoted_list", "'[a,b, c]'", "[a,b, c]"),
("str_quoted_dict", '"{a:b, c: d}"', "{a:b, c: d}"),
("str_quoted_backslash_noesc_single", r"'a\b'", r"a\b"),
("str_quoted_backslash_noesc_double", r'"a\b"', r"a\b"),
("str_quoted_concat_bad_2", "'Hi''there'", GrammarParseError),
("str_quoted_too_many_1", "''a'", GrammarParseError),
("str_quoted_too_many_2", "'a''", GrammarParseError),
("str_quoted_too_many_3", "''a''", GrammarParseError),
("str_quoted_trailing_esc_1", r"'abc\\'", r" abc\ ".strip()),
("str_quoted_trailing_esc_2", r"'abc\\\\'", r" abc\\ ".strip()),
("str_quoted_no_esc_single_1", r"'abc\def'", r"abc\def"),
("str_quoted_no_esc_single_2", r"'abc\\def'", r"abc\\def"),
("str_quoted_no_esc_single_3", r"'\\\abc\def'", r"\\\abc\def"),
("str_quoted_no_esc_dollar_single", r"'abc\\$$'", r"abc\\$$"),
("str_quoted_no_esc_double_1", r'"abc\def"', r"abc\def"),
("str_quoted_no_esc_double_2", r'"abc\\def"', r"abc\\def"),
("str_quoted_no_esc_double_3", r'"\\\abc\def"', r"\\\abc\def"),
("str_quoted_no_esc_dollar_double", r'"abc\\$$"', r"abc\\$$"),
("str_quoted_bad_1", r'"abc\"', GrammarParseError),
("str_quoted_bad_2", r'"abc\\\"', GrammarParseError),
("str_quoted_esc_quote_single_1", r"'abc\'def'", "abc'def"),
("str_quoted_esc_quote_single_2", r"'abc\\\'def'", r"abc\'def"),
("str_quoted_esc_quote_single_3", r"'abc\\\\\'def'", r"abc\\'def"),
("str_quoted_esc_quote_single_4", r"'a\'b\'cdef\\\''", r"a'b'cdef\'"),
("str_quoted_esc_quote_single_bad", r"'abc\\'def'", GrammarParseError),
("str_quoted_esc_quote_double_1", r'"abc\"def"', 'abc"def'),
("str_quoted_esc_quote_double_2", r'"abc\\\"def"', r"abc\"def"),
("str_quoted_esc_quote_double_3", r'"abc\\\\\"def"', r'abc\\"def'),
("str_quoted_esc_quote_double_4", r'"a\"b\"cdef\\\""', r'a"b"cdef\"'),
("str_quoted_esc_quote_double_bad", r'"abc\\"def"', GrammarParseError),
("str_quoted_empty", "''", ""),
("str_quoted_basic", "'a'", "a"),
("str_quoted_tmp_1", r"'\a'", r"\a"),
("str_quoted_tmp_2", r"'a\'", GrammarParseError),
("str_quoted_inside_quote_different", "'\"'", '"'),
("str_quoted_inside_quote_same", r"'\''", "'"),
("str_quoted_extra_quote", r"'c:\\''", GrammarParseError),
# Lists and dictionaries.
("list", "[0, 1]", [0, 1]),
(
"dict",
"{x: 1, a: b, y: 1e2, null2: 0.1, true3: false, inf4: true}",
{"x": 1, "a": "b", "y": 100.0, "null2": 0.1, "true3": False, "inf4": True},
),
(
"dict_unquoted_key",
fr"{{a0-null-1-3.14-NaN- {TAB}-true-False-{UNQUOTED_SPECIAL}\(\)\[\]\{{\}}\:\=\ \{TAB}\,:0}}",
{
fr"a0-null-1-3.14-NaN- {TAB}-true-False-{UNQUOTED_SPECIAL}()[]{{}}:= {TAB},": 0
},
),
(
"dict_quoted",
"{0: 1, 'a': 'b', 1.1: 1e2, null: 0.1, true: false, -inf: true}",
GrammarParseError,
),
(
"structured_mixed",
"[10,str,3.14,true,false,inf,[1,2,3], 'quoted', \"quoted\", 'a,b,c']",
[
10,
"str",
3.14,
True,
False,
math.inf,
[1, 2, 3],
"quoted",
"quoted",
"a,b,c",
],
),
("dict_int_key", "{0: 0}", {0: 0}),
("dict_float_key", "{1.1: 0}", {1.1: 0}),
("dict_null_key", "{null: 0}", {None: 0}),
("dict_nan_like_key", "{'nan': 0}", GrammarParseError),
("dict_list_as_key", "{[0]: 1}", GrammarParseError),
(
"dict_bool_key",
"{true: true, false: 'false'}",
{True: True, False: "false"},
),
("empty_dict", "{}", {}),
("empty_list", "[]", []),
(
"structured_deep",
"{null0: [0, 3.14, false], true1: {a: [0, 1, 2], b: {}}}",
{"null0": [0, 3.14, False], "true1": {"a": [0, 1, 2], "b": {}}},
),
]
# Parameters for tests of the "singleElement" rule when there are interpolations.
PARAMS_SINGLE_ELEMENT_WITH_INTERPOLATION = [
# Node interpolations.
("dict_access", "${dict.a}", 0),
("list_access", "${list.0}", -1),
("dict_access_getitem", "${dict[a]}", 0),
("list_access_getitem", "${list[0]}", -1),
("getitem_first_1", "${[dict].a}", 0),
("getitem_first_2", "${[list][0]}", -1),
("dict_access_deep_1", "${dict.b.c}", 1),
("dict_access_deep_2", "${dict[b].c}", 1),
("dict_access_deep_3", "${dict.b[c]}", 1),
("dict_access_deep_4", "${dict[b][c]}", 1),
("list_access_underscore", "${list.1_0}", 9),
("list_access_bad_negative", "${list.-1}", InterpolationKeyError),
("dict_access_list_like_1", "${0}", 0),
("dict_access_list_like_2", "${1.2}", 12),
("bool_like_keys", "${FalsE.TruE}", True),
("null_like_key_ok", "${None.null}", 1),
("null_like_key_bad_case", "${NoNe.null}", InterpolationKeyError),
("null_like_key_quoted_1", "${'None'.'null'}", GrammarParseError),
("null_like_key_quoted_2", "${'None.null'}", GrammarParseError),
("dotpath_bad_type", "${dict.${float}}", (None, InterpolationResolutionError)),
("at_in_key", "${x@y}", 123),
("dollar_in_key", "${$x$y$z$}", 456),
# Interpolations in dictionaries.
("dict_interpolation_value", "{hi: ${str}, int: ${int}}", {"hi": "hi", "int": 123}),
("dict_interpolation_key", "{${str}: 0, ${null}: 1", GrammarParseError),
# Interpolations in lists.
("list_interpolation", "[${str}, ${int}]", ["hi", 123]),
# Interpolations in unquoted strings.
("str_dollar_and_inter", "$$${str}", "$$hi"),
("str_inter", "hi_${str}", "hi_hi"),
("str_esc_illegal_3", r"\${foo\}", GrammarParseError),
# Interpolations in quoted strings.
("str_quoted_inter", "'${null}'", "None"),
("str_quoted_esc_single_1", r"'ab\'cd\'\'${str}'", "ab'cd''hi"),
("str_quoted_esc_single_2", r"""'\\\${foo}'""", r"\${foo}"),
("str_quoted_esc_single_3", r"""'\\a_${str}'""", r"\\a_hi"),
("str_quoted_esc_single_4", r"""'a_${str}\\'""", r" a_hi\ ".strip()),
("str_quoted_esc_double_1", r'"ab\"cd\"\"${str}"', 'ab"cd""hi'),
("str_quoted_esc_double_2", r'''"\\\${foo}"''', r"\${foo}"),
("str_quoted_esc_double_3", r'''"\\a_${str}"''', r"\\a_hi"),
("str_quoted_esc_double_4", r'''"a_${str}\\"''', r" a_hi\ ".strip()),
("str_quoted_other_quote_double", """'double"'""", 'double"'),
("str_quoted_other_quote_single", '''"single'"''', "single'"),
("str_quoted_concat_bad_1", '"Hi "${str}', GrammarParseError),
("str_quoted_nested", "'${test:\"b\"}'", "b"),
("str_quoted_nested_esc_quotes", "'${test:'b'}'", "b"),
("str_quoted_esc_inter", r"""'\${test:"b"}'""", '${test:"b"}'),
("str_quoted_esc_inter_and_quotes", r"'\${test:\'b\'}'", "${test:'b'}"),
("str_quoted_esc_inter_nested_single_1", r"""'${test:'\${str}'}'""", "${str}"),
("str_quoted_esc_inter_nested_single_2", r"""'${test:'\\${str}'}'""", r"\hi"),
("str_quoted_esc_inter_nested_single_3", r"""'${test:'\\\${str}'}'""", r"\${str}"),
("str_quoted_esc_inter_nested_double_1", r'''"${test:"\${str}"}"''', "${str}"),
("str_quoted_esc_inter_nested_double_2", r'''"${test:"\\${str}"}"''', r"\hi"),
("str_quoted_esc_inter_nested_double_3", r'''"${test:"\\\${str}"}"''', r"\${str}"),
("str_quoted_error_inside_quotes", "'${missing_brace'", GrammarParseError),
# Whitespaces.
("ws_inter_node_outer", "${ \tdict.a \t}", 0),
("ws_inter_node_around_dot", "${dict .\ta}", GrammarParseError),
("ws_inter_node_inside_id", "${d i c t.a}", GrammarParseError),
("ws_inter_res_outer", "${\t test:foo\t }", "foo"),
("ws_inter_res_around_colon", "${test\t : \tfoo}", "foo"),
("ws_inter_res_inside_id", "${te st:foo}", GrammarParseError),
("ws_inter_res_inside_args", "${test:f o o}", "f o o"),
("ws_inter_res_namespace", "${ns1 .\t ns2 . test:0}", GrammarParseError),
("ws_inter_res_no_args", "${test: \t}", []),
("ws_list", "${test:[\t a, b, ''\t ]}", ["a", "b", ""]),
("ws_dict", "${test:{\t a : 1\t , b: \t''}}", {"a": 1, "b": ""}),
("ws_quoted_single", "${test: \t'foo'\t }", "foo"),
("ws_quoted_double", '${test: \t"foo"\t }', "foo"),
# Nested interpolations.
("nested_simple", "${${ref_str}}", "hi"),
("nested_select", "${options.${choice}}", "A"),
("nested_select_getitem", "${options[${choice}]}", "A"),
("nested_relative", "${${rel_opt}.b}", "B"),
("str_quoted_nested_deep_single", r"'AB${test:'CD${test:'EF'}GH'}'", "ABCDEFGH"),
("str_quoted_nested_deep_double", r'"AB${test:"CD${test:"EF"}GH"}"', "ABCDEFGH"),
("str_quoted_nested_deep_mixed", r'''"AB${test:'CD${test:"EF"}GH'}"''', "ABCDEFGH"),
(
"str_quoted_issue_615",
r'${test:"The root drive is: \\${str}:\\"}',
r" The root drive is: \hi:\ ".strip(),
),
# Resolver interpolations.
("no_args", "${test:}", []),
("space_in_args", "${test:a, b c}", ["a", "b c"]),
("list_as_input", "${test:[a, b], 0, [1.1]}", [["a", "b"], 0, [1.1]]),
("dict_as_input", "${test:{a: 1.1, b: b}}", {"a": 1.1, "b": "b"}),
("dict_as_input_quotes", "${test:{'a': 1.1, b: b}}", GrammarParseError),
("dict_typo_colons", "${test:{a: 1.1, b:: b}}", {"a": 1.1, "b": ": b"}),
("missing_resolver", "${MiSsInG_ReSoLvEr:0}", UnsupportedInterpolationType),
("at_in_resolver", "${y@z:}", GrammarParseError),
("ns_resolver", "${ns1.ns2.test:123}", 123),
# Nested resolvers.
("nested_resolver", "${${str_test}:a, b, c}", ["a", "b", "c"]),
("nested_deep", "${test:${${test:${ref_str}}}}", "hi"),
(
"nested_resolver_combined_illegal",
"${some_${resolver}:a, b, c}",
GrammarParseError,
),
("nested_args", "${test:${str}, ${null}, ${int}}", ["hi", None, 123]),
# Invalid resolver names.
("int_resolver_quoted", "${'0':1,2,3}", GrammarParseError),
("int_resolver_noquote", "${0:1,2,3}", GrammarParseError),
("float_resolver_quoted", "${'1.1':1,2,3}", GrammarParseError),
("float_resolver_noquote", "${1.1:1,2,3}", GrammarParseError),
("float_resolver_exp", "${1e1:1,2,3}", GrammarParseError),
("inter_float_resolver", "${${float}:1,2,3}", (None, InterpolationResolutionError)),
# NaN as dictionary key (a resolver is used here to output only the key).
("dict_nan_key_1", "${first:{nan: 0}}", math.nan),
("dict_nan_key_2", "${first:{${test:nan}: 0}}", GrammarParseError),
]
# Parameters for tests of the "configValue" rule (may contain interpolations).
PARAMS_CONFIG_VALUE = [
# String interpolations (top-level).
("str_top_basic", "bonjour ${str}", "bonjour hi"),
("str_top_quotes_single_1", "'bonjour ${str}'", "'bonjour hi'"),
(
"str_top_quotes_single_2",
"'Bonjour ${str}', I said.",
"'Bonjour hi', I said.",
),
("str_top_quotes_double_1", '"bonjour ${str}"', '"bonjour hi"'),
(
"str_top_quotes_double_2",
'"Bonjour ${str}", I said.',
'"Bonjour hi", I said.',
),
("str_top_missing_end_quote_single", "'${str}", "'hi"),
("str_top_missing_end_quote_double", '"${str}', '"hi'),
("str_top_missing_start_quote_double", '${str}"', 'hi"'),
("str_top_missing_start_quote_single", "${str}'", "hi'"),
("str_top_middle_quote_single", "I'd like ${str}", "I'd like hi"),
("str_top_middle_quote_double", 'I"d like ${str}', 'I"d like hi'),
("str_top_middle_quotes_single", "I like '${str}'", "I like 'hi'"),
("str_top_middle_quotes_double", 'I like "${str}"', 'I like "hi"'),
(
"str_top_any_char",
r"${str} " + UNQUOTED_SPECIAL + r"^!#&})][({,;",
r"hi " + UNQUOTED_SPECIAL + r"^!#&})][({,;",
),
("str_top_esc_inter", r"Esc: \${str}", "Esc: ${str}"),
("str_top_esc_inter_wrong_1", r"Wrong: $\{str\}", r"Wrong: $\{str\}"),
("str_top_esc_inter_wrong_2", r"Wrong: \${str\}", r"Wrong: ${str\}"),
("str_top_esc_backslash_1", r"Esc: \\${str}", r"Esc: \hi"),
("str_top_esc_backslash_2", r"Esc: \\\\${str}", r"Esc: \\hi"),
("str_top_quoted_braces_wrong", r"Wrong: \{${str}\}", r"Wrong: \{hi\}"),
("str_top_leading_dollars", r"$$${str}", "$$hi"),
("str_top_trailing_dollars", r"${str}$$$$", "hi$$$$"),
("str_top_leading_escapes_1", r"\\\\\${str}", r"\\${str}"),
("str_top_leading_escapes_2", r"\\\\ \${str}", r"\\\\ ${str}"),
("str_top_middle_escapes_1", r"abc\\\\\${str}", r"abc\\${str}"),
("str_top_middle_escapes_2", r"abc\\\\ \${str}", r"abc\\\\ ${str}"),
("str_top_trailing_escapes", r" ${str}\\\ ".strip(), r" hi\\\ ".strip()),
("str_top_concat_interpolations", "${null}${float}", "None1.2"),
("str_top_issue_617", r""" ${test: "hi\\" }"} """, r" hi\"} "),
# Whitespaces.
("ws_toplevel", " \tab ${str} cd ${int}\t", " \tab hi cd 123\t"),
# Unmatched braces.
("missing_brace_1", "${test:${str}", GrammarParseError),
("missing_brace_2", "${${test:str}", GrammarParseError),
("extra_brace", "${str}}", "hi}"),
]
def parametrize_from(
data: List[Tuple[str, str, Any]]
) -> Callable[[Callable[..., Any]], Callable[..., Any]]:
"""Utility function to create PyTest parameters from the lists above"""
return mark.parametrize(
["definition", "expected"],
[param(definition, expected, id=key) for key, definition, expected in data],
)
class TestOmegaConfGrammar:
"""
Test most grammar constructs.
Each method in this class tests the validity of expressions in a specific
setting. For instance, `test_single_element_no_interpolation()` tests the
"singleElement" parsing rule on expressions that do not contain interpolations
(which allows for faster tests without using any config object).
Tests that actually need a config object all re-use the same `BASE_TEST_CFG`
config, to avoid creating a new config for each test.
"""
@parametrize_from(PARAMS_SINGLE_ELEMENT_NO_INTERPOLATION)
def test_single_element_no_interpolation(
self, definition: str, expected: Any
) -> None:
parse_tree, expected_visit = self._parse("singleElement", definition, expected)
if parse_tree is None:
return
# Since there are no interpolations here, we do not need to provide
# callbacks to resolve them, and the quoted string callback can simply
# be the identity.
visitor = grammar_visitor.GrammarVisitor(
node_interpolation_callback=None, # type: ignore
resolver_interpolation_callback=None, # type: ignore
memo=None,
)
self._visit(lambda: visitor.visit(parse_tree), expected_visit)
@parametrize_from(PARAMS_SINGLE_ELEMENT_WITH_INTERPOLATION)
def test_single_element_with_resolver(
self, restore_resolvers: Any, definition: str, expected: Any
) -> None:
parse_tree, expected_visit = self._parse("singleElement", definition, expected)
OmegaConf.register_new_resolver("test", self._resolver_test)
OmegaConf.register_new_resolver("first", self._resolver_first)
OmegaConf.register_new_resolver("ns1.ns2.test", self._resolver_test)
self._visit_with_config(parse_tree, expected_visit)
@parametrize_from(PARAMS_CONFIG_VALUE)
def test_config_value(
self, restore_resolvers: Any, definition: str, expected: Any
) -> None:
parse_tree, expected_visit = self._parse("configValue", definition, expected)
OmegaConf.register_new_resolver("test", self._resolver_test)
self._visit_with_config(parse_tree, expected_visit)
@parametrize_from(
[
("trailing_comma", "${test:a,b,}", ["a", "b", ""]),
("empty_middle", "${test:a,,b}", ["a", "", "b"]),
("empty_first", "${test:,a,b}", ["", "a", "b"]),
("single_comma", "${test:,}", ["", ""]),
(
"mixed_with_ws",
"${test: ,a,b,\t,,c, \t \t ,d,, \t}",
["", "a", "b", "", "", "c", "", "d", "", ""],
),
]
)
def test_deprecated_empty_args(
self, restore_resolvers: Any, definition: str, expected: Any
) -> None:
OmegaConf.register_new_resolver("test", self._resolver_test)
parse_tree, expected_visit = self._parse("singleElement", definition, expected)
with warns(
UserWarning, match=re.escape("https://github.com/omry/omegaconf/issues/572")
):
self._visit_with_config(parse_tree, expected_visit)
def _check_is_same_type(self, value: Any, expected: Any) -> None:
"""
Helper function to validate that types of `value` and `expected are the same.
This function assumes that `value == expected` holds, and performs a "deep"
comparison of types (= it goes into data structures like dictionaries, lists
and tuples).
Note that dictionaries being compared must have keys ordered the same way!
"""
assert type(value) is type(expected)
if isinstance(value, (str, int, float)):
pass
elif isinstance(value, (list, tuple, ListConfig)):
for vx, ex in zip(value, expected):
self._check_is_same_type(vx, ex)
elif isinstance(value, (dict, DictConfig)):
for (vk, vv), (ek, ev) in zip(value.items(), expected.items()):
assert vk == ek, "dictionaries are not ordered the same"
self._check_is_same_type(vk, ek)
self._check_is_same_type(vv, ev)
elif value is None:
assert expected is None
else:
raise NotImplementedError(type(value))
def _get_expected(self, expected: Any) -> Tuple[Any, Any]:
"""Obtain the expected result of the parse & visit steps"""
if isinstance(expected, tuple):
# Outcomes of both the parse and visit steps are provided.
assert len(expected) == 2
return expected[0], expected[1]
elif expected is GrammarParseError:
# If only a `GrammarParseError` is expected, assume it happens in parse step.
return expected, None
else:
# If anything else is provided, assume it is the outcome of the visit step.
return None, expected
def _get_lexer_mode(self, rule: str) -> str:
return {"configValue": "DEFAULT_MODE", "singleElement": "VALUE_MODE"}[rule]
def _parse(
self, rule: str, definition: str, expected: Any
) -> Tuple[Optional[antlr4.ParserRuleContext], Any]:
"""
Parse the expression given by `definition`.
Return both the parse tree and the expected result when visiting this tree.
"""
def get_tree() -> antlr4.ParserRuleContext:
return grammar_parser.parse(
value=definition,
parser_rule=rule,
lexer_mode=self._get_lexer_mode(rule),
)
expected_parse, expected_visit = self._get_expected(expected)
if expected_parse is None:
return get_tree(), expected_visit
else: # expected failure on the parse step
with raises(expected_parse):
get_tree()
return None, None
def _resolver_first(self, item: Any, *_: Any) -> Any:
"""Resolver that returns the first element of its first input"""
try:
return next(iter(item))
except StopIteration:
assert False # not supposed to happen in current tests
def _resolver_test(self, *args: Any) -> Any:
"""Resolver that returns the list of its inputs"""
return args[0] if len(args) == 1 else list(args)
def _visit(self, visit: Callable[[], Any], expected: Any) -> None:
"""Run the `visit()` function to visit the parse tree and validate the result"""
if isinstance(expected, type) and issubclass(expected, Exception):
with raises(expected):
visit()
else:
result = visit()
if expected is math.nan:
# Special case since nan != nan.
assert math.isnan(result)
else:
assert result == expected
# We also check types in particular because instances of `Node` are very
# good at mimicking their underlying type's behavior, and it is easy to
# fail to notice that the result contains nodes when it should not.
self._check_is_same_type(result, expected)
def _visit_with_config(
self, parse_tree: antlr4.ParserRuleContext, expected: Any
) -> None:
"""Visit the tree using the default config `BASE_TEST_CFG`"""
if parse_tree is None:
return
cfg = BASE_TEST_CFG
def visit() -> Any:
return _utils._get_value(
cfg.resolve_parse_tree(
parse_tree,
# Create a dummy `AnyNode` (it should not actually be used in these
# grammer tests, but `resolve_parse_tree()` requires it).
node=AnyNode(None, parent=cfg),
key=None,
parent=cfg,
)
)
self._visit(visit, expected)
@mark.parametrize(
"expression",
[
"${foo}",
"${foo.bar}",
"${a_b.c123}",
"${ foo \t}",
"x ${ab.cd.ef.gh} y",
"$ ${foo} ${bar} ${boz} $",
"${foo:bar}",
"${foo : bar, baz, boz}",
"${foo:bar,0,a-b+c*d/$.%@?|}",
r"\${foo}",
"${foo.bar:boz}",
"${$foo.bar$.x$y}",
"${$0.1.2$}",
"${0foo}",
# getitem syntax
"${foo[bar]}",
"${foo.bar[baz]}",
"${foo[bar].baz}",
"${foo[bar].baz[boz]}",
"${[foo]}",
"${[foo].bar}",
"${[foo][bar]}",
# relative interpolations
"${..foo}",
"${..foo.bar}",
"${..foo[bar]}",
"${..[foo].bar}",
],
)
class TestMatchSimpleInterpolationPattern:
def test_regex(self, expression: str) -> None:
assert grammar_parser.SIMPLE_INTERPOLATION_PATTERN.match(expression) is not None
def test_grammar_consistency(self, expression: str) -> None:
# The expression should be valid according to the grammar.
grammar_parser.parse(
value=expression,
parser_rule="configValue",
lexer_mode="DEFAULT_MODE",
)
@mark.parametrize(
("expression", "is_valid_grammar"),
[
# Also invalid according to the grammar.
("${.}", False),
("${..}", False),
("${}", False),
("${foo", False),
("${0foo:bar}", False),
("${foo . bar}", False),
("${ns . f:var}", False),
("${$foo:bar}", False),
("${.foo:bar}", False),
(r"${foo:\}", False),
# Valid according to the grammar but not matched by the regex.
("${foo.${bar}}", True),
("${foo:${bar}}", True),
("${foo:'hello'}", True),
(r"\${foo", True),
],
)
class TestDoNotMatchSimpleInterpolationPattern:
def test_regex(self, expression: str, is_valid_grammar: bool) -> None:
assert grammar_parser.SIMPLE_INTERPOLATION_PATTERN.match(expression) is None
def test_grammar_consistency(self, expression: str, is_valid_grammar: bool) -> None:
ctx: Any = nullcontext() if is_valid_grammar else raises(GrammarParseError)
with ctx:
grammar_parser.parse(
value=expression,
parser_rule="configValue",
lexer_mode="DEFAULT_MODE",
)
def test_empty_stack() -> None:
"""
Check that an empty stack during ANTLR parsing raises a `GrammarParseError`.
"""
with raises(GrammarParseError):
grammar_parser.parse("ab}", lexer_mode="VALUE_MODE")
@mark.parametrize(
("inter", "key", "expected"),
[
# config root
# simple
param("${dict.bar}", "", 20, id="dict_value"),
param("${dict}", "", {"bar": 20}, id="dict_node"),
param("${list}", "", [1, 2], id="list_node"),
param("${list.0}", "", 1, id="list_value"),
# relative
param(
"${..list}",
"dict",
[1, 2],
id="relative:list_from_dict",
),
param("${..list.1}", "dict", 2, id="up_down"),
param("${..[list][1]}", "dict", 2, id="up_down_getitem"),
],
)
def test_parse_interpolation(inter: Any, key: Any, expected: Any) -> None:
cfg = OmegaConf.create(
{
"dict": {"bar": 20},
"list": [1, 2],
},
)
root = OmegaConf.select(cfg, key)
tree = grammar_parser.parse(
parser_rule="singleElement",
value=inter,
lexer_mode="VALUE_MODE",
)
def callback(inter_key: Any, memo: Optional[Set[int]]) -> Any:
assert isinstance(root, Container)
ret = root._resolve_node_interpolation(inter_key=inter_key, memo=memo)
return ret
visitor = grammar_visitor.GrammarVisitor(
node_interpolation_callback=callback,
resolver_interpolation_callback=None, # type: ignore
memo=None,
)
ret = visitor.visit(tree)
assert ret == expected
def test_custom_resolver_param_supported_chars() -> None:
supported_chars = r"abc123_:" + UNQUOTED_SPECIAL
c = OmegaConf.create({"dir1": "${copy:" + supported_chars + "}"})
OmegaConf.register_new_resolver("copy", lambda x: x)
assert c.dir1 == supported_chars
def test_valid_chars_in_interpolation() -> None:
valid_chars = "".join(
chr(i) for i in range(33, 128) if chr(i) not in INVALID_CHARS_IN_KEY_NAMES
)
cfg_dict = {valid_chars: 123, "inter": f"${{{valid_chars}}}"}
cfg = OmegaConf.create(cfg_dict)
# Test that we can access the node made of all valid characters, both
# directly and through interpolations.
assert cfg[valid_chars] == 123
assert cfg.inter == 123
@mark.parametrize("c", list(INVALID_CHARS_IN_KEY_NAMES))
def test_invalid_chars_in_interpolation(c: str) -> None:
def create() -> DictConfig:
return OmegaConf.create({"invalid": f"${{ab{c}de}}"})
# Test that all invalid characters trigger errors in interpolations.
if c in [".", "}"]:
# With '.', we try to access `${ab.de}`.
# With '}', we try to access `${ab}`.
cfg = create()
with raises(InterpolationKeyError):
cfg.invalid
elif c == ":":
# With ':', we try to run a resolver `${ab:de}`
cfg = create()
with raises(UnsupportedInterpolationType):
cfg.invalid
else:
# Other invalid characters should be detected at creation time.
with raises(GrammarParseError):
create()
def test_grammar_cache_is_thread_safe() -> None:
"""
This test ensures that we can parse strings across multiple threads in parallel.
Besides ensuring that the parsing does not hang nor crash, we also verify that
the lexer used in each thread is different.
"""
n_threads = 10
lexer_ids = []
stop = threading.Event()
def check_cache_lexer_id() -> None:
# Parse a dummy string to make sure the grammar cache is populated
# (this also checks that multiple threads can parse in parallel).
grammar_parser.parse("foo")
# Keep track of the ID of the cached lexer.
lexer_ids.append(id(grammar_parser._grammar_cache.data[0]))
# Wait until we are done.
while not stop.is_set():
time.sleep(0.1)
# Launch threads.
threads = []
for i in range(n_threads):
threads.append(threading.Thread(target=check_cache_lexer_id))
threads[-1].start()
# Wait until all threads have reported their lexer ID.
while len(lexer_ids) < n_threads:
time.sleep(0.1)
# Terminate threads.
stop.set()
for thread in threads:
thread.join()
# Check that each thread used a unique lexer.
assert len(set(lexer_ids)) == n_threads
|
py | 1a52f41a78e1fb856c7c3c881e12e797772d7910 | from flask import Flask
from flask.ext.restful import Api
from resources.getprojectlist import GetProjectlist
from resources.getjoblist import GetJoblist
from resources.runningjob import RunningJob
from resources.jobres import JobRes
app = Flask(__name__)
app.config.from_object('config')
api = Api(app)
api.add_resource(GetProjectlist, '/projects')
api.add_resource(GetJoblist, '/project/<string:projectname>')
api.add_resource(RunningJob, '/job/<string:jobid>')
api.add_resource(JobRes, '/job/<string:jobiid>')
|
py | 1a52f424989c72e33b359b4b87374d9cf76ad836 | from django.conf.urls import include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = [
# Examples:
# url(r'^$', 'apply.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^accounts/login/$', 'django.contrib.auth.views.login'),
url(r'^accounts/logout/$', 'django.contrib.auth.views.logout', {'next_page': '/'}),
url(r'^', include('yard.urls')),
url(r'^admin/', include(admin.site.urls)),
]
|
py | 1a52f7291b8c1b88f48a06f9908d4223370964a4 | """
Utilities for gathering information on the host machine for inclusion with
results.
"""
import re
from lnt.testing.util.commands import capture, fatal
# All the things we care to probe about the system, and whether to track with
# the machine or run. This is a list of (sysctl, kind) where kind is one of:
# machine - key should always be part of machine
# machdep - key should be part of machine, unless --no-machdep-info is set
# run - key should always be part of run
sysctl_info_table = [
('hw.activecpu', 'machine'),
('hw.availcpu', 'machine'),
('hw.busfrequency', 'machine'),
('hw.busfrequency_max', 'machine'),
('hw.busfrequency_min', 'machine'),
('hw.byteorder', 'machine'),
('hw.cacheconfig', 'machine'),
('hw.cachelinesize', 'machine'),
('hw.cachesize', 'machine'),
('hw.cpu64bit_capable', 'machine'),
('hw.cpufamily', 'machine'),
('hw.cpufrequency', 'machine'),
('hw.cpufrequency_max', 'machine'),
('hw.cpufrequency_min', 'machine'),
('hw.cpusubtype', 'machine'),
('hw.cputype', 'machine'),
('hw.epoch', 'machine'),
('hw.l1dcachesize', 'machine'),
('hw.l1icachesize', 'machine'),
('hw.l2cachesize', 'machine'),
('hw.l2settings', 'machine'),
('hw.logicalcpu', 'machine'),
('hw.logicalcpu_max', 'machine'),
('hw.machine', 'machine'),
('hw.memsize', 'machine'),
('hw.model', 'machine'),
('hw.ncpu', 'machine'),
('hw.optional.floatingpoint', 'machine'),
('hw.optional.mmx', 'machine'),
('hw.optional.sse', 'machine'),
('hw.optional.sse2', 'machine'),
('hw.optional.sse3', 'machine'),
('hw.optional.sse4_1', 'machine'),
('hw.optional.sse4_2', 'machine'),
('hw.optional.supplementalsse3', 'machine'),
('hw.optional.x86_64', 'machine'),
('hw.packages', 'machine'),
('hw.pagesize', 'machine'),
('hw.physicalcpu', 'machine'),
('hw.physicalcpu_max', 'machine'),
('hw.physmem', 'machine'),
('hw.tbfrequency', 'machine'),
('hw.usermem', 'run'),
('hw.vectorunit', 'machine'),
('kern.aiomax', 'machine'),
('kern.aioprocmax', 'machine'),
('kern.aiothreads', 'machine'),
('kern.argmax', 'machine'),
('kern.boottime', 'run'),
('kern.clockrate: hz', 'machine'),
('kern.coredump', 'machine'),
('kern.corefile', 'machine'),
('kern.delayterm', 'machine'),
('kern.hostid', 'machine'),
('kern.hostname', 'machdep'),
('kern.job_control', 'machine'),
('kern.maxfiles', 'machine'),
('kern.maxfilesperproc', 'machine'),
('kern.maxproc', 'machine'),
('kern.maxprocperuid', 'machine'),
('kern.maxvnodes', 'machine'),
('kern.netboot', 'machine'),
('kern.ngroups', 'machine'),
('kern.nisdomainname', 'machine'),
('kern.nx', 'machine'),
('kern.osrelease', 'machine'),
('kern.osrevision', 'machine'),
('kern.ostype', 'machine'),
('kern.osversion', 'machine'),
('kern.posix1version', 'machine'),
('kern.procname', 'machine'),
('kern.rage_vnode', 'machine'),
('kern.safeboot', 'machine'),
('kern.saved_ids', 'machine'),
('kern.securelevel', 'machine'),
('kern.shreg_private', 'machine'),
('kern.speculative_reads_disabled', 'machine'),
('kern.sugid_coredump', 'machine'),
('kern.thread_name', 'machine'),
('kern.usrstack', 'run'),
('kern.usrstack64', 'run'),
('kern.version', 'machine'),
('machdep.cpu.address_bits.physical', 'machine'),
('machdep.cpu.address_bits.virtual', 'machine'),
('machdep.cpu.arch_perf.events', 'machine'),
('machdep.cpu.arch_perf.events_number', 'machine'),
('machdep.cpu.arch_perf.fixed_number', 'machine'),
('machdep.cpu.arch_perf.fixed_width', 'machine'),
('machdep.cpu.arch_perf.number', 'machine'),
('machdep.cpu.arch_perf.version', 'machine'),
('machdep.cpu.arch_perf.width', 'machine'),
('machdep.cpu.brand', 'machine'),
('machdep.cpu.brand_string', 'machine'),
('machdep.cpu.cache.L2_associativity', 'machine'),
('machdep.cpu.cache.linesize', 'machine'),
('machdep.cpu.cache.size', 'machine'),
('machdep.cpu.core_count', 'machine'),
('machdep.cpu.cores_per_package', 'machine'),
('machdep.cpu.extfamily', 'machine'),
('machdep.cpu.extfeature_bits', 'machine'),
('machdep.cpu.extfeatures', 'machine'),
('machdep.cpu.extmodel', 'machine'),
('machdep.cpu.family', 'machine'),
('machdep.cpu.feature_bits', 'machine'),
('machdep.cpu.features', 'machine'),
('machdep.cpu.logical_per_package', 'machine'),
('machdep.cpu.max_basic', 'machine'),
('machdep.cpu.max_ext', 'machine'),
('machdep.cpu.microcode_version', 'machine'),
('machdep.cpu.model', 'machine'),
('machdep.cpu.mwait.extensions', 'machine'),
('machdep.cpu.mwait.linesize_max', 'machine'),
('machdep.cpu.mwait.linesize_min', 'machine'),
('machdep.cpu.mwait.sub_Cstates', 'machine'),
('machdep.cpu.signature', 'machine'),
('machdep.cpu.stepping', 'machine'),
('machdep.cpu.thermal.ACNT_MCNT', 'machine'),
('machdep.cpu.thermal.dynamic_acceleration', 'machine'),
('machdep.cpu.thermal.sensor', 'machine'),
('machdep.cpu.thermal.thresholds', 'machine'),
('machdep.cpu.thread_count', 'machine'),
('machdep.cpu.tlb.data.large', 'machine'),
('machdep.cpu.tlb.data.large_level1', 'machine'),
('machdep.cpu.tlb.data.small', 'machine'),
('machdep.cpu.tlb.data.small_level1', 'machine'),
('machdep.cpu.tlb.inst.large', 'machine'),
('machdep.cpu.tlb.inst.small', 'machine'),
('machdep.cpu.vendor', 'machine'),
]
def _get_mac_addresses():
lines = capture(['ifconfig']).strip()
current_ifc = None
for ln in lines.split('\n'):
if ln.startswith('\t'):
if current_ifc is None:
fatal('unexpected ifconfig output')
if ln.startswith('\tether '):
yield current_ifc, ln[len('\tether '):].strip()
else:
current_ifc, = re.match(r'([A-Za-z0-9]*): .*', ln).groups()
def get_machine_information(use_machine_dependent_info=False):
machine_info = {}
run_info = {}
info_targets = {
'machdep': (run_info, machine_info)[use_machine_dependent_info],
'machine': machine_info,
'run': run_info,
}
for name, target in sysctl_info_table:
info_targets[target][name] = capture(['sysctl', '-n', name],
include_stderr=True).strip()
for ifc, addr in _get_mac_addresses():
# Ignore virtual machine mac addresses.
if ifc.startswith('vmnet'):
continue
info_targets['machdep']['mac_addr.%s' % ifc] = addr
return machine_info, run_info
|
py | 1a52f776715fd22439f73e8b9bf4ab3a4a4823eb | """
@author: Jonatan González Rodríguez <[email protected]>
"""
import re
import csv, datetime
import pysam
def reformat_nanomonsv(inp, out):
vcf = open(inp, 'r')
filtered_vcf = open(out, 'w')
for line in vcf:
if line.startswith('##') and 'ID=TR' in line:
filtered_vcf.write('##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">\n')
new_DR = line.replace(
'ID=TR,Number=1,Type=Integer,Description="The number of reads around the breakpoints"',
'ID=DR,Number=1,Type=Integer,Description="# of reads supporting the reference allele."',
)
filtered_vcf.write(new_DR)
elif line.startswith('##') and 'ID=VR' in line:
new_DV = line.replace(
'ID=VR,Number=1,Type=Integer,Description="The number of variant supporting reads determined in the validation realignment step"',
'ID=DV,Number=1,Type=Integer,Description="# of reads supporting the variant allele."',
)
filtered_vcf.write(new_DV)
elif line.startswith('#CHROM'):
headers = (
line.strip()
.replace('TUMOR', 'NANOMON_Tumor')
.replace('CONTROL', 'NANOMON_Normal')
.split('\t')
)
filtered_vcf.write(
line.replace('TUMOR', 'NANOMON_Tumor').replace('CONTROL', 'NANOMON_Normal')
)
elif not line.startswith('#'):
columns = line.strip().split('\t')
if columns[headers.index('FILTER')] == 'PASS':
columns[headers.index('REF')] = 'N'
Format = columns[headers.index('FORMAT')].replace('TR', 'DR').replace('VR', 'DV')
Format = 'GT:' + Format
Normal_Format = columns[headers.index('NANOMON_Normal')].split(':')
Normal_Format[0] = str(int(Normal_Format[0]) - int(Normal_Format[1]))
Tumor_Format = columns[headers.index('NANOMON_Tumor')].split(':')
Tumor_Format[0] = str(int(Tumor_Format[0]) - int(Tumor_Format[1]))
Normal = './.:' + ':'.join(Normal_Format)
Tumor = './.:' + ':'.join(Tumor_Format)
filtered_vcf.write(
'{}\t{}\t{}\t{}\n'.format('\t'.join(columns[0:8]), Format, Tumor, Normal)
)
else:
filtered_vcf.write(line)
vcf.close()
filtered_vcf.close()
def reformat_svim(inp, out, columnid, qual):
vcf = open(inp, 'r')
filtered_vcf = open(out, 'w')
for line in vcf:
if line.startswith('##') and 'ID=DP' in line:
new_DR = line.replace(
'ID=DP,Number=1,Type=Integer,Description="Read depth"',
'ID=DR,Number=1,Type=Integer,Description="# reads supporting the reference allele."',
)
filtered_vcf.write(new_DR)
elif line.startswith('##') and 'ID=AD' in line:
new_DV = line.replace(
'ID=AD,Number=R,Type=Integer,Description="Read depth for each allele"',
'ID=DV,Number=1,Type=Integer,Description="# of reads supporting the variant allele."',
)
filtered_vcf.write(new_DV)
elif line.startswith('#CHROM'):
headers = line.strip().split('\t')
filtered_vcf.write(line)
elif not line.startswith('#'):
columns = line.strip().split('\t')
if int(columns[headers.index('QUAL')]) >= qual:
if 'DUP:TANDEM' in columns[headers.index('ALT')]:
columns[headers.index('ALT')] = '<DUP>'
Format = (
columns[headers.index('FORMAT')].replace('DP', 'DR').replace('AD', 'DV')
)
Format = re.split(':', Format)
del Format[1]
Format_info = re.split(':|,', columns[headers.index(columnid)])
del Format_info[1:3]
filtered_vcf.write(
'{}\t{}\t{}\n'.format(
'\t'.join(columns[0:8]), ':'.join(Format), ':'.join(Format_info)
)
)
elif 'DUP:INT' in columns[headers.index('ALT')]:
columns[headers.index('ALT')] = '<DUP>'
Format = (
columns[headers.index('FORMAT')].replace('DP', 'DR').replace('AD', 'DV')
)
Format_info = re.split(':|,', columns[headers.index(columnid)])
del Format_info[1]
filtered_vcf.write(
'{}\t{}\t{}\n'.format(
'\t'.join(columns[0:8]), Format, ':'.join(Format_info)
)
)
else:
if 'DEL' in columns[headers.index('ALT')]:
columns[headers.index('POS')] = str(int(columns[headers.index('POS')]) + 1)
Format = (
columns[headers.index('FORMAT')].replace('DP', 'DR').replace('AD', 'DV')
)
Format_info = re.split(':|,', columns[headers.index(columnid)])
del Format_info[1]
filtered_vcf.write(
'{}\t{}\t{}\n'.format(
'\t'.join(columns[0:8]), Format, ':'.join(Format_info)
)
)
else:
filtered_vcf.write(line)
vcf.close()
filtered_vcf.close()
def reformat_sniffles(inp, out):
print(inp, out)
vcf = open(inp, 'r')
filtered_vcf = open(out, 'w')
for line in vcf:
if line.startswith('#CHROM'):
headers = line.strip().split('\t')
new_SEQ = '##INFO=<ID=SVINSSEQ,Number=1,Type=String,Description="Sequence of insertion">\n'
filtered_vcf.write(new_SEQ)
filtered_vcf.write(line)
elif not line.startswith('#'):
columns = line.strip().split('\t')
if 'DEL' in columns[headers.index('INFO')]:
columns[headers.index('REF')] = 'N'
columns[headers.index('ALT')] = '<DEL>'
filtered_vcf.write('\t'.join(columns) + '\n')
elif 'INS' in columns[headers.index('INFO')]:
columns[headers.index('POS')] = str(int(columns[headers.index('POS')]) - 1)
INFO = columns[headers.index('INFO')].split(';')
pos_idx = [i for i, x in enumerate(INFO) if x.startswith('END')][0]
INFO[pos_idx] = 'END=' + str(int(INFO[pos_idx].split('=')[1]) - 1)
columns[headers.index('INFO')] = ';'.join(INFO)
columns[headers.index('INFO')] += ';SVINSSEQ={}'.format(
columns[headers.index('ALT')]
)
columns[headers.index('ALT')] = '<INS>'
filtered_vcf.write('\t'.join(columns) + '\n')
else:
filtered_vcf.write(line)
else:
filtered_vcf.write(line)
def reformat_cutesv(inp, out):
vcf = open(inp, 'r')
filtered_vcf = open(out, 'w')
for line in vcf:
if line.startswith('#CHROM'):
headers = line.strip().split('\t')
filtered_vcf.write(
'##INFO=<ID=SVINSSEQ,Number=1,Type=String,Description="Sequence of insertion">\n'
)
filtered_vcf.write(line)
elif not line.startswith('#'):
columns = line.strip().split('\t')
if columns[headers.index('QUAL')] != '.':
if 'DEL' in columns[headers.index('INFO')]:
columns[headers.index('REF')] = 'N'
columns[headers.index('ALT')] = '<DEL>'
columns[headers.index('POS')] = str(int(columns[headers.index('POS')]) + 1)
filtered_vcf.write('\t'.join(columns) + '\n')
elif 'INS' in columns[headers.index('INFO')]:
columns[headers.index('POS')] = str(int(columns[headers.index('POS')]) - 1)
columns[headers.index('REF')] = 'N'
columns[headers.index('INFO')] += ';SVINSSEQ={}'.format(
columns[headers.index('ALT')]
)
columns[headers.index('ALT')] = '<INS>'
filtered_vcf.write('\t'.join(columns) + '\n')
else:
columns[headers.index('REF')] = 'N'
filtered_vcf.write('\t'.join(columns) + '\n')
else:
filtered_vcf.write(line)
def reverse_complement(seq):
complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A',
'W': 'W', 'S': 'S', 'M': 'K', 'K': 'M', 'R': 'Y', 'Y': 'R',
'B': 'V', 'V': 'B', 'D': 'H', 'H': 'D', 'N': 'N'}
return("".join(complement.get(base, base) for base in reversed(seq)))
def genomesv2vcf_convert(result_file, output_vcf, reference):
today_str = datetime.datetime.today().strftime("%Y%m%d")
header = '##fileformat=VCFv4.3\n'\
f'##fileDate={today_str}\n'\
f'##reference={reference}'
ref_tb = pysam.FastaFile(reference)
for (tchr, tlen) in zip(ref_tb.references, ref_tb.lengths):
header = header + '\n' + f"##contig=<ID={tchr},length={tlen}>"
header = header + '\n' + \
'##FILTER=<ID=Duplicate_with_close_SV,Description="When multiple SVs that share breakpoints in close proximity are detected, all but one SVs are filtered.">\n'\
'##FILTER=<ID=Duplicate_with_insertion,Description="Breakend SVs that are inferred to be the same as any of detected insertions">\n'\
'##FILTER=<ID=Duplicate_with_close_insertion,Description="When multiple insertions in close proximity are detected, all but one insertions are filtered.">\n'\
'##FILTER=<ID=SV_with_decoy,Description="SVs involving decoy contigs">\n'\
'##FILTER=<ID=Too_small_size,Description="Insertions whose size is below the threshould (currently 100bp)">\n'\
'##FILTER=<ID=Too_low_VAF,Description="SVs whose variant allele frequencies are inferred to be low">\n'\
'##INFO=<ID=SVTYPE,Number=1,Type=String,Description="Type of structural variant">\n'\
'##INFO=<ID=SVLEN,Number=1,Type=Integer,Description="Difference in length between REF and ALT alleles">\n'\
'##INFO=<ID=END,Number=1,Type=Integer,Description="End position of the variant described in this record">\n'\
'##INFO=<ID=MATEID,Number=1,Type=String,Description="ID of mate breakend">\n'\
'##INFO=<ID=SVINSLEN,Number=1,Type=Integer,Description="Length of insertion">\n'\
'##INFO=<ID=SVINSSEQ,Number=1,Type=String,Description="Sequence of insertion">\n'\
'##ALT=<ID=DEL,Description="Deletion">\n'\
'##ALT=<ID=INS,Description="Insertion">\n'\
'##ALT=<ID=DUP,Description="Duplication">\n'\
'##ALT=<ID=INV,Description="Inversion">\n'\
'##FORMAT=<ID=TR,Number=1,Type=Integer,Description="The number of reads around the breakpoints">\n'\
'##FORMAT=<ID=VR,Number=1,Type=Integer,Description="The number of variant supporting reads determined in the validation realignment step">'
with open(result_file, 'r') as hin, open(output_vcf, 'w') as hout:
dreader = csv.DictReader(hin, delimiter = '\t')
fieldname_list = dreader.fieldnames
is_control = True if "Checked_Read_Num_Control" in fieldname_list and "Supporting_Read_Num_Control" in fieldname_list else False
if is_control:
header = header + '\n' + "#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tTUMOR\tCONTROL"
else:
header = header + '\n' + "#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tTUMOR"
print(header, file = hout)
for F in dreader:
tchrom = F["Chr_1"]
tid = F["SV_ID"]
tqual = '.'
tfilter = F["Is_Filter"]
if F["Inserted_Seq"] != "---":
tsvinsseq = F["Inserted_Seq"]
tsvinslen = len(F["Inserted_Seq"])
else:
tsvinsseq = ''
tsvinslen = 0
tformat_sample = f'TR:VR\t{F["Checked_Read_Num_Tumor"]}:{F["Supporting_Read_Num_Tumor"]}'
if is_control:
tformat_sample = tformat_sample + f'\t{F["Checked_Read_Num_Control"]}:{F["Supporting_Read_Num_Control"]}'
if F["Chr_1"] == F["Chr_2"] and F["Dir_1"] == '+' and F["Dir_2"] == '-':
tpos = int(F["Pos_1"])
tref = ref_tb.fetch(tchrom, tpos - 1, tpos)
if tref == '' or tref is None: continue
tsvlen = int(F["Pos_2"]) - int(F["Pos_1"]) - 1
tend = int(F["Pos_2"]) - 1
# Deletion
if tsvlen > tsvinslen:
talt = "<DEL>"
tsvlen = int(F["Pos_2"]) - int(F["Pos_1"]) - 1
tinfo = f"END={tend};SVTYPE=DEL;SVLEN=-{tsvlen}"
if tsvinslen != 0:
tinfo = tinfo + f";SVINSLEN={tsvinslen};SVINSSEQ={tsvinsseq}"
# Insertion
elif tsvlen >= 0:
talt = "<INS>"
tinfo = f"END={tend};SVTYPE=INS;SVINSLEN={tsvinslen};SVINSSEQ={tsvinsseq}"
else:
continue
print(f"{tchrom}\t{tpos}\t{tid}\t{tref}\t{talt}\t{tqual}\t{tfilter}\t{tinfo}\t{tformat_sample}", file = hout)
# Duplication
elif F["Chr_1"] == F["Chr_2"] and F["Dir_1"] == '-' and F["Dir_2"] == '+' and F["Pos_1"] != '1':
tpos = int(F["Pos_1"])
tref = ref_tb.fetch(tchrom, tpos - 1, tpos)
if tref == '' or tref is None: continue
talt = "<DUP>"
tend = int(F["Pos_2"])
tsvlen = int(F["Pos_2"]) - int(F["Pos_1"]) + 1
tinfo = f"END={tend};SVTYPE=DUP;SVLEN={tsvlen}"
if tsvinslen != 0:
tinfo = tinfo + f";SVINSLEN={tsvinslen};SVINSSEQ={tsvinsseq}"
print(f"{tchrom}\t{tpos}\t{tid}\t{tref}\t{talt}\t{tqual}\t{tfilter}\t{tinfo}\t{tformat_sample}", file = hout)
# Breakend
elif F["Chr_1"] != F["Chr_2"]:
tchrom1 = F["Chr_1"]
tpos1 = int(F["Pos_1"])
tref1 = ref_tb.fetch(tchrom1, tpos1 - 1, tpos1)
if tref1 == '' or tref1 is None: continue
tchrom2 = F["Chr_2"]
tpos2 = int(F["Pos_2"])
tref2 = ref_tb.fetch(tchrom2, tpos2 - 1, tpos2)
if tref2 == '' or tref2 is None: continue
tbracket = ']' if F["Dir_2"] == '+' else '['
if F["Dir_1"] == '+':
talt1 = f'{tref1}{tsvinsseq}{tbracket}{tchrom2}:{tpos2}{tbracket}'
else:
talt1 = f'{tbracket}{tchrom2}:{tpos2}{tbracket}{tsvinsseq}{tref2}'
tinfo1 = f"SVTYPE=BND;MATEID={tid}_1"
if tsvinslen != 0: tinfo1 = tinfo1 + f";SVINSLEN={tsvinslen};SVINSSEQ={tsvinsseq}"
print(f"{tchrom1}\t{tpos1}\t{tid}_0\t{tref1}\t{talt1}\t{tqual}\t{tfilter}\t{tinfo1}\t{tformat_sample}", file = hout)
# tchrom2 = F["Chr_2"]
# tpos = int(F["Pos_2"])
# tref = ref_tb.fetch(tchrom2, tpos - 1, tpos)
# if tref == '' or tref is None: continue
tbracket = ']' if F["Dir_1"] == '+' else '['
tsvinsseq = reverse_complement(tsvinsseq)
if F["Dir_2"] == '+':
talt2 = f'{tref2}{tsvinsseq}{tbracket}{tchrom1}:{tpos1}{tbracket}'
else:
talt2 = f'{tbracket}{tchrom1}:{tpos1}{tbracket}{tsvinsseq}{tref2}'
tinfo2 = f"SVTYPE=BND;MATEID={tid}_0"
if tsvinslen != 0: tinfo2 = tinfo2 + f";SVINSLEN={tsvinslen};SVINSSEQ={tsvinsseq}"
print(f"{tchrom2}\t{tpos2}\t{tid}_1\t{tref2}\t{talt2}\t{tqual}\t{tfilter}\t{tinfo2}\t{tformat_sample}", file = hout)
else:
tpos = int(F["Pos_1"])
tref = ref_tb.fetch(tchrom, tpos - 1, tpos)
if tref == '' or tref is None: continue
talt = "<INV>"
tend = int(F["Pos_2"])
tsvlen = int(F["Pos_2"]) - int(F["Pos_1"]) + 1
tinfo = f"END={tend};SVTYPE=INV;SVLEN={tsvlen}"
if tsvinslen != 0:
tinfo = tinfo + f";SVINSLEN={tsvinslen};SVINSSEQ={tsvinsseq}"
print(f"{tchrom}\t{tpos}\t{tid}\t{tref}\t{talt}\t{tqual}\t{tfilter}\t{tinfo}\t{tformat_sample}", file = hout) |
py | 1a52f7ecba0c98fa4e5a298a8c03746323af6091 | # -*- coding: utf-8 -*-
##############################################################
# util.py
# Copyright (C) 2018 Tsubasa Hirakawa. All rights reserved.
##############################################################
def erase_new_line(s):
return s.strip()
def read_text(filename):
with open(filename, 'r') as f:
lines = map(erase_new_line, f.readlines())
return lines
def chunk_list(input_list, n):
return [input_list[x:x + n] for x in range(0, len(input_list), n)]
|
py | 1a52f8c27c916f527045af218c314726352c92de | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
'''
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
# Bokeh imports
from ..util.sampledata import external_csv
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'data',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
data = external_csv('glucose', 'CGM.csv', sep=',', parse_dates=[1], index_col=1)
|
py | 1a52fb16a052334230795cfb3a26251f8c6ba294 | # -*- coding: utf-8 -*-
"""
Created on Sat Oct 27 17:50:46 2018
@author: Priya
"""
import datetime
import hashlib
import json
from flask import Flask, jsonify, request
import requests
from uuid import uuid4
from urllib.parse import urlparse
ui = {"message":"priya"}
pass=""
if "message"in ui:
pass=ui["message"]
print(pass)
app.run(host = '0.0.0.0', port = 5008) |
py | 1a52fb1e27c6c41d7ceb28922119683560bb8220 | import pytest
from vnep_approx import treewidth_model
import numpy as np
from alib import mip
from alib import datamodel as dm
from alib import util
from .test_data.request_test_data import create_test_request
from .test_data.substrate_test_data import create_test_substrate_topology_zoo
from .test_data.substrate_test_data import create_test_substrate_topology_zoo
import random
import time
import logging
import itertools
logger = util.get_logger(__name__, make_file=False, propagate=True)
random.seed(0)
# TEST Valid Shortest Path Computer
@pytest.mark.parametrize("substrate_id", ["BtAsiaPac", "DeutscheTelekom", "Geant2012", "Surfnet", "Dfn"])
@pytest.mark.parametrize("cost_spread", [-1, 0.5, 1.0, 2.0, 4.0, 8.0]) #cost spread of -1 will test uniform costs
def test_shortest_valid_paths_computer_no_latencies(substrate_id, cost_spread):
req = create_test_request("single edge", set_allowed_nodes=False)
sub = create_test_substrate_topology_zoo(substrate_id, include_latencies=False)
vmrc = treewidth_model.ValidMappingRestrictionComputer(sub, req)
vmrc.compute()
if cost_spread == -1:
# uniform edge costs
edge_costs = {sedge: 1.0 for sedge in sub.edges}
svpc_dijkstra = treewidth_model.ShortestValidPathsComputer.createSVPC(
treewidth_model.ShortestValidPathsComputer.Approx_NoLatencies, sub, vmrc, edge_costs)
svpc_dijkstra.compute()
for reqedge in req.edges:
for snode_source in sub.nodes:
for snode_target in sub.nodes:
if snode_source == snode_target:
assert svpc_dijkstra.valid_sedge_costs[reqedge][(snode_source, snode_target)] == 0
else:
assert svpc_dijkstra.valid_sedge_costs[reqedge][(snode_source, snode_target)] >= 1
else:
# random edge costs
edge_costs = {sedge: max(1, 1000.0 * random.random()) for sedge in sub.edges}
for sedge in sub.edges:
sub.edge[sedge]['cost'] = edge_costs[sedge]
bellman_ford_time = time.time()
sub.initialize_shortest_paths_costs()
bellman_ford_time = time.time() - bellman_ford_time
svpc_dijkstra = treewidth_model.ShortestValidPathsComputer.createSVPC(
treewidth_model.ShortestValidPathsComputer.Approx_NoLatencies, sub, vmrc, edge_costs)
dijkstra_time = time.time()
svpc_dijkstra.compute()
dijkstra_time = time.time() - dijkstra_time
for reqedge in req.edges:
for snode_source in sub.nodes:
for snode_target in sub.nodes:
# print svpc.valid_sedge_costs[reqedge][(snode_source, snode_target)]
# print sub.get_shortest_paths_cost(snode_source, snode_target)
assert svpc_dijkstra.valid_sedge_costs[reqedge][(snode_source, snode_target)] == pytest.approx(
sub.get_shortest_paths_cost(snode_source, snode_target))
logger.info(
"\nComputation times were:\n\tBellman-Ford: {:2.4f}\n"
"\tDijkstra: {:2.4f}\n"
"\tSpeedup by using Dijkstra over Bellman: {:2.2f} (<1 is bad)\n".format(
bellman_ford_time, dijkstra_time, (bellman_ford_time / dijkstra_time)))
@pytest.mark.parametrize("substrate_id", ["BtAsiaPac"])#, "DeutscheTelekom", "Geant2012", "Surfnet", "Dfn"])
@pytest.mark.parametrize("cost_spread", [0.5, 1.0, 2.0, 4.0, 8.0])
@pytest.mark.parametrize("epsilon", [1.0, 0.5, 0.1, 0.01])
@pytest.mark.parametrize("limit_factor", [8.0, 4.0, 2.0, 1.0, 0.5])
def test_shortest_valid_paths_with_latencies(substrate_id, cost_spread, epsilon, limit_factor):
req = create_test_request("single edge", set_allowed_nodes=False)
sub = create_test_substrate_topology_zoo(substrate_id, include_latencies=True)
vmrc = treewidth_model.ValidMappingRestrictionComputer(sub, req)
vmrc.compute()
edge_costs = {sedge: cost_spread*random.random()+1.0 for sedge in sub.edges}
for sedge in sub.edges:
sub.edge[sedge]['cost'] = edge_costs[sedge]
maximal_latency_upper_bound = sum([sub.edge[sedge]["latency"] for sedge in sub.edges])
minimum_edge_cost = min([sub.edge[sedge]["cost"] for sedge in sub.edges])
average_latency = maximal_latency_upper_bound / len(sub.edges)
edge_costs = {sedge: sub.edge[sedge]['cost'] for sedge in sub.edges}
edge_latencies = {sedge: sub.edge[sedge]['latency'] for sedge in sub.edges}
limit = average_latency * limit_factor
runtime_exact_mip = 0.0
runtime_approx_mip = 0.0
runtime_strict = 0.0
runtime_flex = 0.0
def time_computation(spvc):
start_time = time.time()
spvc.compute()
return time.time() - start_time
def compute_latency_of_path(sedge_path):
if sedge_path is None:
return 0.0
return sum([sub.edge[sedge]["latency"] for sedge in sedge_path])
def nan_to_negative_value(value):
if np.isnan(value):
# this guarantees that this cost is ..
# a) negative and
# b) the absolute value of the returned cost is smaller than the minimum cost value
return -minimum_edge_cost / (10*max(epsilon, 1/epsilon))
return value
svpc_exact_mip = treewidth_model.ShortestValidPathsComputer.createSVPC(
treewidth_model.ShortestValidPathsComputer.Approx_Exact_MIP,
sub,
vmrc,
edge_costs,
edge_latencies=edge_latencies,
limit=limit,
epsilon=0.0)
svpc_approximate_mip = treewidth_model.ShortestValidPathsComputer.createSVPC(
treewidth_model.ShortestValidPathsComputer.Approx_Exact_MIP,
sub, vmrc, edge_costs,
edge_latencies=edge_latencies,
limit=limit,
epsilon=epsilon)
svpc_strict = treewidth_model.ShortestValidPathsComputer.createSVPC(
treewidth_model.ShortestValidPathsComputer.Approx_Strict, sub, vmrc, edge_costs,
edge_latencies=edge_latencies,
limit=limit,
epsilon=epsilon)
svpc_flex = treewidth_model.ShortestValidPathsComputer.createSVPC(
treewidth_model.ShortestValidPathsComputer.Approx_Flex, sub, vmrc, edge_costs,
edge_latencies=edge_latencies,
limit=limit,
epsilon=epsilon)
logger.info("\n\n========================================================================================================\n\n"
"Considering now a latency limit of {} (average latency is {}), an epsilon of {} and a cost spread of {}\n\n"
"========================================================================================================\n\n".format(limit, average_latency, epsilon, cost_spread))
logger.info("\n\nStarting exact MIP...\n\n")
runtime_exact_mip += time_computation(svpc_exact_mip)
logger.info("\n\nStarting approximate MIP...\n\n")
runtime_approx_mip += time_computation(svpc_approximate_mip)
logger.info("\n\nStarting strict...\n\n")
runtime_strict += time_computation(svpc_strict)
logger.info("\n\nStarting flex ...\n\n")
runtime_flex += time_computation(svpc_flex)
logger.info(
"\t{:^6s} | {:^6s} || {:^15s} | {:^15s} | {:^15s} | {:^15s} || {:^15s} | {:^15s} || {:^15s} | {:^15s} | {:^15s} | {:^15s}".format("Source", "Target", "c(Flex)", "c(Exact-MIP)",
"c(Approx-MIP)", "c(Strict)", "epsilon", "latency_bound", "l(Flex)", "l(Exact-MIP)",
"l(Approx-MIP)", "l(Strict)"))
failure_counts = {alg: {"cost": 0, "lat": 0} for alg in ["exact_mip", "approx_mip", "flex", "strict"]}
for reqedge in req.edges:
for snode_source in sub.nodes:
for snode_target in sub.nodes:
if snode_source == snode_target:
assert svpc_exact_mip.get_valid_sedge_costs_for_reqedge(reqedge,
(snode_source, snode_target)) == 0.0
assert svpc_approximate_mip.get_valid_sedge_costs_for_reqedge(reqedge,
(snode_source, snode_target)) == 0.0
assert svpc_strict.get_valid_sedge_costs_for_reqedge(reqedge,
(snode_source, snode_target)) == 0.0
assert svpc_flex.get_valid_sedge_costs_for_reqedge(reqedge,
(snode_source, snode_target)) == 0.0
else:
cost_flex = nan_to_negative_value(svpc_flex.get_valid_sedge_costs_for_reqedge(reqedge, (snode_source, snode_target)))
cost_exact_mip = nan_to_negative_value(svpc_exact_mip.get_valid_sedge_costs_for_reqedge(reqedge, (snode_source, snode_target)))
cost_approx_mip = nan_to_negative_value(svpc_approximate_mip.get_valid_sedge_costs_for_reqedge(reqedge, (snode_source, snode_target)))
cost_strict = nan_to_negative_value(svpc_strict.get_valid_sedge_costs_for_reqedge(reqedge, (snode_source, snode_target)))
path_flex = svpc_flex.get_valid_sedge_path(reqedge, snode_source, snode_target)
path_exact_mip = svpc_exact_mip.get_valid_sedge_path(reqedge, snode_source, snode_target)
path_approx_mip = svpc_approximate_mip.get_valid_sedge_path(reqedge, snode_source, snode_target)
path_strict = svpc_strict.get_valid_sedge_path(reqedge, snode_source, snode_target)
lat_flex = compute_latency_of_path(path_flex)
lat_exact_mip = compute_latency_of_path(path_exact_mip)
lat_approx_mip = compute_latency_of_path(path_approx_mip)
lat_strict = compute_latency_of_path(path_strict)
failure_dict = {alg : {"cost": False, "lat": False} for alg in ["exact_mip", "approx_mip", "flex", "strict"]}
def value_lies_outside_of_range(value, reference_value, lower_factor, upper_factor):
result = False
result |= (abs(value) < abs(reference_value) * lower_factor)
result |= (abs(value) > abs(reference_value) * upper_factor)
return result
def bool_to_failure_output(boolean_value):
if boolean_value:
return "FAILED"
else:
return "PASSED"
failure_dict["approx_mip"]["cost"] |= value_lies_outside_of_range(cost_approx_mip, cost_exact_mip, 0.999, 1.001 + epsilon)
failure_dict["strict"]["cost"] |= value_lies_outside_of_range(cost_strict, cost_exact_mip, 0.999, 1.001 + epsilon)
failure_dict["flex"]["cost"] |= value_lies_outside_of_range(cost_flex, cost_exact_mip, 0.0, 1.001)
failure_dict["exact_mip"]["lat"] |= value_lies_outside_of_range(lat_exact_mip, limit, 0.0, 1.001)
failure_dict["approx_mip"]["lat"] |= value_lies_outside_of_range(lat_approx_mip, limit, 0.0, 1.001)
failure_dict["strict"]["lat"] |= value_lies_outside_of_range(lat_strict, limit, 0.0, 1.001)
failure_dict["flex"]["lat"] |= value_lies_outside_of_range(lat_exact_mip, limit, 0.0, 1.001 + epsilon)
failure_found = any([failure_dict[alg][type] for alg in failure_dict for type in failure_dict[alg]])
failure_message = None
output_message = "\t{:^6s} | {:^6s} || {:^15.4f} | {:^15.4f} | {:^15.4f} | {:^15.4f} || {:^15.4f} | {:^15.4f} || {:^15.4f} | {:^15.4f} | {:^15.4f} | {:^15.4f} ".format(
snode_source,
snode_target,
cost_flex,
cost_exact_mip,
cost_approx_mip,
cost_strict,
epsilon,
limit,
lat_flex,
lat_exact_mip,
lat_approx_mip,
lat_strict
)
if failure_found:
failure_message = "\t{:^6s} | {:^6s} || {:^15s} | {:^15s} | {:^15s} | {:^15s} || {:^15.4f} | {:^15.4f} || {:^15s} | {:^15s} | {:^15s} | {:^15s} ".format(
snode_source,
snode_target,
bool_to_failure_output(failure_dict["flex"]["cost"]),
bool_to_failure_output(failure_dict["exact_mip"]["cost"]),
bool_to_failure_output(failure_dict["approx_mip"]["cost"]),
bool_to_failure_output(failure_dict["strict"]["cost"]),
epsilon,
limit,
bool_to_failure_output(failure_dict["flex"]["lat"]),
bool_to_failure_output(failure_dict["exact_mip"]["lat"]),
bool_to_failure_output(failure_dict["approx_mip"]["lat"]),
bool_to_failure_output(failure_dict["strict"]["lat"])
)
if failure_found:
logger.error(output_message)
logger.error(failure_message)
else:
logger.debug(output_message)
for alg in failure_dict:
for type in failure_dict[alg]:
if failure_dict[alg][type]:
failure_counts[alg][type] += 1
logger.info("Runtimes are \n"
"\tExact-MIP: {:10.4f}\n"
"\tApprox-MIP: {:10.4f}\n"
"\tStrict: {:10.4f}\n"
"\tFlex: {:10.4f}\n\n\n".format(runtime_exact_mip,
runtime_approx_mip,
runtime_strict,
runtime_flex))
number_of_failed_tests = sum([failure_counts[alg][type] for alg in failure_counts for type in failure_counts[alg]])
logger.info("Total number of failures: {}\n".format(number_of_failed_tests))
number_of_node_combinations = len(sub.nodes) * len(sub.nodes)
for alg in failure_counts:
for type in failure_counts[alg]:
if failure_counts[alg][type] > 0:
logger.error("\tSummary\t{:^15s} {:^15s}: {:4d} failed of {:4d} ({:6.3f}%)".format(alg, type, failure_counts[alg][type], number_of_node_combinations, 100.0*failure_counts[alg][type]/float(number_of_node_combinations)))
else:
logger.info(
"\t\Summary\t{:^15s} {:^15s}: {:4d} failed of {:4d} ({:6.3f}%)".format(alg, type, failure_counts[alg][type],
number_of_node_combinations,
100.0 * failure_counts[alg][type] / float(
number_of_node_combinations)))
assert number_of_failed_tests == 0 |
py | 1a52fc3f3efb5b79fc976e8824dc0e60fc85a681 | def get_file_lines(filename='/usr/share/dict/words'):
"""Return a list of strings on separate lines in the given text file with
any leading and trailing whitespace characters removed from each line."""
# Open file and remove whitespace from each line
with open(filename) as file:
lines = [line.strip() for line in file]
return lines
def solve_word_jumble(words, circles, final):
"""Solve a word jumble by unscrambling four jumbles, then a final jumble.
Parameters:
- words: list of strings, each is the scrambled letters for a single word
- circles: list of strings, each marks whether the letter at that position
in the solved anagram word will be used to solve the final jumble.
This string contains only two different characters:
1. O (letter "oh") = the letter is in the final jumble
2. _ (underscore) = the letter is not in the final jumble
- final: list of strings in the same format as circles parameter that shows
how the final jumble's letters are arranged into a word or phrase."""
# Get all English words in the built-in dictionary
all_words = get_file_lines()
# TODO: Solve this word jumble with data structures and algorithms
def main():
# Word Jumble 1. Cartoon prompt for final jumble:
# "Farley rolled on the barn floor because of his ___."
words1 = ['TEFON', 'SOKIK', 'NIUMEM', 'SICONU']
circles1 = ['__O_O', 'OO_O_', '____O_', '___OO_']
final1 = ['OO', 'OOOOOO']
solve_word_jumble(words1, circles1, final1)
# Word Jumble 2. Cartoon prompt for final jumble: "What a dog house is."
words2 = ['TARFD', 'JOBUM', 'TENJUK', 'LETHEM']
circles2 = ['____O', '_OO__', '_O___O', 'O____O']
final2 = ['OOOO', 'OOO']
solve_word_jumble(words2, circles2, final2)
if __name__ == '__main__':
main()
# def all_perms(elements):
# if len(elements) <=1:
# return elements
# else:
# tmp = []
# for perm in all_perms(elements[1:]):
# for i in range(len(elements)):
# tmp.append(perm[:i] + elements[0:1] + perm[i:])
# print(tmp)
#
#
# all_perms("dog")
|
py | 1a52fc6c168b2da253d61faedf522446cfd50bb0 | import abc
from typing import Any, Dict, List, Optional
import tqdm
from openforcefield.topology import Molecule
from openforcefield.utils.toolkits import OpenEyeToolkitWrapper, RDKitToolkitWrapper
from pydantic import BaseModel, Field, validator
from pydantic.main import ModelMetaclass
from qcelemental.util import which_import
from ..common_structures import ComponentProperties
from ..datasets import ComponentResult
class InheritSlots(ModelMetaclass):
# This allows subclasses of CustomWorkFlowComponentto inherit __slots__
def __new__(mcs, name, bases, namespace):
slots = set(namespace.pop("__slots__", tuple()))
for base in bases:
if hasattr(base, "__slots__"):
slots.update(base.__slots__)
if "__dict__" in slots:
slots.remove("__dict__")
namespace["__slots__"] = tuple(slots)
return ModelMetaclass.__new__(mcs, name, bases, namespace)
class CustomWorkflowComponent(BaseModel, abc.ABC, metaclass=InheritSlots):
"""
This is an abstract base class which should be used to create all workflow components, following the design of this
class should allow users to easily create new work flow components with out needing to change much of the dataset
factory code
"""
component_name: str = Field(
..., description="The name of the component which should match the class name."
)
component_description: str = Field(
...,
description="A short description of what the component will do to the molecules.",
)
component_fail_message: str = Field(
...,
description="A short description with hints on why the molecule may have caused an error in this workflow component.",
)
_properties: ComponentProperties = Field(
...,
description="The internal runtime properties of the component which can not be changed, these indecate if the component can be ran in parallel and if it may produce duplicate molecules.",
)
_cache: Dict
class Config:
validate_assignment = True
arbitrary_types_allowed = True
# this is a pydantic workaround to add private variables taken from
# https://github.com/samuelcolvin/pydantic/issues/655
__slots__ = [
"_cache",
]
def __init__(self, *args, **kwargs):
super(CustomWorkflowComponent, self).__init__(*args, **kwargs)
self._cache = {}
def __setattr__(self, attr: str, value: Any) -> None:
"""
Overwrite the Pydantic setattr to configure the handling of our __slots___
"""
if attr in self.__slots__:
object.__setattr__(self, attr, value)
else:
super(CustomWorkflowComponent, self).__setattr__(attr, value)
# getstate and setstate are needed since private instance members (_*)
# are not included in pickles by Pydantic at the current moment. Force them
# to be added here. This is needed for multiprocessing support.
def __getstate__(self):
return (
super().__getstate__(),
{slot: getattr(self, slot) for slot in self.__slots__},
)
def __setstate__(self, state):
super().__setstate__(state[0])
d = state[1]
for slot in d:
setattr(self, slot, d[slot])
@classmethod
@abc.abstractmethod
def is_available(cls) -> bool:
"""
This method should identify if the component can be used by checking if the requirements are available.
Returns:
`True` if the component can be used else `False`
"""
...
@abc.abstractmethod
def _apply(self, molecules: List[Molecule]) -> ComponentResult:
"""
This is the main feature of the workflow component which should accept a molecule, perform the component action
and then return the result.
Parameters:
molecules: The list of molecules to be processed by this component.
Returns:
An instance of the [ComponentResult][qcsubmit.datasets.ComponentResult]
class which handles collecting together molecules that pass and fail
the component
"""
...
def _apply_init(self, result: ComponentResult) -> None:
"""
Any actions that should be performed before running the main apply method should set up such as setting up the _cache for multiprocessing.
Here we clear out the _cache in case something has been set.
"""
self._cache.clear()
def _apply_finalize(self, result: ComponentResult) -> None:
"""
Any clean up actions should be added here, by default the _cache is cleaned.
"""
self._cache.clear()
def apply(
self,
molecules: List[Molecule],
processors: Optional[int] = None,
verbose: bool = True,
) -> ComponentResult:
"""
This is the main feature of the workflow component which should accept a molecule, perform the component action
and then return the
Parameters:
molecules: The list of molecules to be processed by this component.
processors: The number of processor the component can use to run the job in parallel across molecules, None will default to all cores.
verbose: If true a progress bar will be shown on screen.
Returns:
An instance of the [ComponentResult][qcsubmit.datasets.ComponentResult]
class which handles collecting together molecules that pass and fail
the component
"""
result: ComponentResult = self._create_result()
self._apply_init(result)
# Use a Pool to get around the GIL. As long as self does not contain
# too much data, this should be efficient.
if (processors is None or processors > 1) and self._properties.process_parallel:
from multiprocessing.pool import Pool
with Pool(processes=processors) as pool:
# Assumes to process in batches of 1 for now
work_list = [
pool.apply_async(self._apply, ([molecule],))
for molecule in molecules
]
for work in tqdm.tqdm(
work_list,
total=len(work_list),
ncols=80,
desc="{:30s}".format(self.component_name),
disable=not verbose,
):
work = work.get()
for success in work.molecules:
result.add_molecule(success)
for fail in work.filtered:
result.filter_molecule(fail)
else:
for molecule in tqdm.tqdm(
molecules,
total=len(molecules),
ncols=80,
desc="{:30s}".format(self.component_name),
disable=not verbose,
):
work = self._apply([molecule])
for success in work.molecules:
result.add_molecule(success)
for fail in work.filtered:
result.filter_molecule(fail)
self._apply_finalize(result)
return result
@abc.abstractmethod
def provenance(self) -> Dict:
"""
This function should detail the programs with version information and procedures called during activation
of the workflow component.
Returns:
A dictionary containing the information about the component and the functions called.
"""
...
def _create_result(self, **kwargs) -> ComponentResult:
"""
A helpful method to build to create the component result with the required information.
Returns:
A [ComponentResult][qcsubmit.datasets.ComponentResult] instantiated with the required information.
"""
result = ComponentResult(
component_name=self.component_name,
component_description=self.dict(),
component_provenance=self.provenance(),
skip_unique_check=not self._properties.produces_duplicates,
**kwargs,
)
return result
class ToolkitValidator(BaseModel):
"""
A pydantic mixin class that adds toolkit settings and validation along with provenance information.
Note:
The provenance information and toolkit settings are handled by the
[ToolkitValidator][qcsubmit.workflow_components.base_component.ToolkitValidator] mixin.
"""
toolkit: str = Field(
"openeye",
description="The name of the toolkit which should be used in this component.",
)
_toolkits: Dict = {"rdkit": RDKitToolkitWrapper, "openeye": OpenEyeToolkitWrapper}
@validator("toolkit")
def _check_toolkit(cls, toolkit):
"""
Make sure that toolkit is one of the supported types in the OFFTK.
"""
if toolkit not in cls._toolkits.keys():
raise ValueError(
f"The requested toolkit ({toolkit}) is not support by the OFFTK. "
f"Please chose from {cls._toolkits.keys()}."
)
else:
return toolkit
def provenance(self) -> Dict:
"""
This component calls the OFFTK to perform the task and logs information on the backend toolkit used.
Returns:
A dictionary containing the version information about the backend toolkit called to perform the task.
"""
import openforcefield
import qcsubmit
provenance = {
"OpenforcefieldToolkit": openforcefield.__version__,
"QCSubmit": qcsubmit.__version__,
}
if self.toolkit == "rdkit":
import rdkit
provenance["rdkit"] = rdkit.__version__
elif self.toolkit == "openeye":
import openeye
provenance["openeye"] = openeye.__version__
return provenance
@classmethod
def is_available(cls) -> bool:
"""
Check if any of the requested backend toolkits can be used.
"""
if len(cls._toolkits) == 1:
# the package needs a specific toolkit so raise the error
raise_error = True
else:
raise_error = False
for toolkit in cls._toolkits:
if toolkit == "openeye":
oe = which_import(
".oechem",
package="openeye",
return_bool=True,
raise_error=raise_error,
raise_msg="Please install via `conda install openeye-toolkits -c openeye`.",
)
if oe:
return True
elif toolkit == "rdkit":
rdkit = which_import(
"rdkit",
return_bool=True,
raise_error=raise_error,
raise_msg="Please install via `conda install rdkit -c conda-forge`.",
)
if rdkit:
return True
# if we are here both toolkits are missing
raise ModuleNotFoundError(
f"Openeye or RDKit is required to use this component please install via `conda install openeye-toolkits -c openeye` or `conda install rdkit -c conda-forge`."
)
class BasicSettings(BaseModel):
"""
This mixin identifies the class as being basic and always being available as it only requires basic packages.
"""
@classmethod
def is_available(cls) -> bool:
"""
This component is basic if it requires no extra dependencies.
"""
return True
def provenance(self) -> Dict:
"""
The basic settings provenance generator.
"""
import openforcefield
import qcsubmit
provenance = {
"OpenforcefieldToolkit": openforcefield.__version__,
"QCSubmit": qcsubmit.__version__,
}
return provenance
|
py | 1a52fe02f29412e7e684bfaee304a56f09b413d5 | from pathlib import Path
from setuptools import find_packages, setup
module_dir = Path(__file__).resolve().parent
with open(module_dir / "README.md") as f:
long_description = f.read()
if __name__ == "__main__":
setup(
name="atomate2",
setup_requires=["setuptools_scm"],
use_scm_version={"version_scheme": "python-simplified-semver"},
description="atomate2 is a library of materials science workflows",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/hackingmaterials/atomate2",
author="Alex Ganose",
author_email="[email protected]",
license="modified BSD",
keywords="high-throughput automated workflow dft vasp",
package_dir={"": "src"},
package_data={"atomate2": ["py.typed"]},
packages=find_packages("src"),
data_files=["LICENSE"],
zip_safe=False,
include_package_data=True,
install_requires=[
"pymatgen>=2019.11.11",
"custodian>=2019.8.24",
"pydantic",
"monty",
"jobflow>=0.1.5",
"PyYAML",
"numpy",
"click",
],
extras_require={
"amset": ["amset>=0.4.15", "pydash"],
"cclib": ["cclib"],
"docs": [
"sphinx==4.5.0",
"numpydoc==1.2.1",
"mistune==0.8.4",
"ipython==8.2.0",
"FireWorks==2.0.2",
"pydata-sphinx-theme==0.8.1",
"autodoc_pydantic==1.6.1",
"sphinx_panels==0.6.0",
"myst-parser==0.17.0",
],
"tests": [
"pytest==7.1.1",
"pytest-cov==3.0.0",
"FireWorks==2.0.2",
# "amset==0.4.15",
],
"dev": ["pre-commit>=2.12.1"],
"phonons": ["phonopy>=1.10.8"],
},
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"Intended Audience :: System Administrators",
"Intended Audience :: Information Technology",
"Operating System :: OS Independent",
"Topic :: Other/Nonlisted Topic",
"Topic :: Scientific/Engineering",
],
python_requires=">=3.8",
tests_require=["pytest"],
entry_points={
"console_scripts": [
"atm = atomate2.cli:cli",
]
},
)
|
py | 1a52ffcc8df7956e049de5c5f587624b9774e0a4 |
class BaseCAM(object):
"""
Base class for Class Activation Mapping.
"""
def __init__(self, model_dict):
"""Init
# Arguments
model_dict: dict. A dict with format model_dict = dict(arch=self.model, layer_name=target_layer_name).
"""
layer_name = model_dict['layer_name']
self.model_arch = model_dict['arch']
self.model_arch.eval()
self.gradients = dict()
self.activations = dict()
# save gradient
def backward_hook(module, grad_input, grad_output):
self.gradients['value'] = grad_output[0]
return None
# save activation map
def forward_hook(module, input, output):
self.activations['value'] = output
return None
target_layer = self.find_layer(self.model_arch, layer_name)
target_layer.register_forward_hook(forward_hook)
target_layer.register_backward_hook(backward_hook)
def find_layer(self, arch, target_layer_name):
if target_layer_name is None:
if 'resnet' in str(type(arch)):
target_layer_name = 'layer4'
elif 'alexnet' in str(type(arch)) or 'vgg' in str(type(arch)) or 'squeezenet' in str(type(arch)) or 'densenet' in str(type(arch)):
target_layer_name = 'features'
else:
raise Exception('Invalid layer name! Please specify layer name.', target_layer_name)
hierarchy = target_layer_name.split('_')
if hierarchy[0] not in arch._modules.keys():
raise Exception('Invalid layer name!', target_layer_name)
target_layer = arch._modules[hierarchy[0]]
if len(hierarchy) >= 2:
if hierarchy[1] not in target_layer._modules.keys():
raise Exception('Invalid layer name!', target_layer_name)
target_layer = target_layer._modules[hierarchy[1]]
if len(hierarchy) >= 3:
if hierarchy[2] not in target_layer._modules.keys():
raise Exception('Invalid layer name!', target_layer_name)
target_layer = target_layer._modules[hierarchy[2]]
if len(hierarchy) >= 4:
if hierarchy[3] not in target_layer._modules.keys():
raise Exception('Invalid layer name!', target_layer_name)
target_layer = target_layer._modules[hierarchy[3]]
return target_layer
def forward(self, input_, class_idx=None, retain_graph=False):
return None
def __call__(self, input_, class_idx=None, retain_graph=False):
return self.forward(input_, class_idx, retain_graph)
|
py | 1a5300756e1f7a423dfa961fe98e0566dd1a7f17 |
# Copyright (C) 2020 Intel Corporation
#
# SPDX-License-Identifier: MIT
class _GitImportFix:
import sys
former_path = sys.path[:]
@classmethod
def apply(cls):
# HACK: fix application and module name clash
# 'git' app is found earlier than a library in the path.
# The clash is introduced by unittest discover
import sys
print('apply')
apps_dir = __file__[:__file__.rfind('/dataset_manager/')]
assert 'apps' in apps_dir
try:
sys.path.remove(apps_dir)
except ValueError:
pass
for name in list(sys.modules):
if name.startswith('git.') or name == 'git':
m = sys.modules.pop(name, None)
del m
import git
assert apps_dir not in git.__file__
@classmethod
def restore(cls):
import sys
print('restore')
for name in list(sys.modules):
if name.startswith('git.') or name == 'git':
m = sys.modules.pop(name)
del m
sys.path.insert(0, __file__[:__file__.rfind('/dataset_manager/')])
import importlib
importlib.invalidate_caches()
def _setUpModule():
_GitImportFix.apply()
import cvat.apps.dataset_manager.task as dm
from cvat.apps.engine.models import Task
globals()['dm'] = dm
globals()['Task'] = Task
import sys
sys.path.insert(0, __file__[:__file__.rfind('/dataset_manager/')])
def tearDownModule():
_GitImportFix.restore()
from io import BytesIO
import os
import random
import tempfile
from PIL import Image
from django.contrib.auth.models import User, Group
from rest_framework.test import APITestCase, APIClient
from rest_framework import status
_setUpModule()
def generate_image_file(filename):
f = BytesIO()
width = random.randint(10, 200)
height = random.randint(10, 200)
image = Image.new('RGB', size=(width, height))
image.save(f, 'jpeg')
f.name = filename
f.seek(0)
return f
def create_db_users(cls):
group_user, _ = Group.objects.get_or_create(name="user")
user_dummy = User.objects.create_superuser(username="test", password="test", email="")
user_dummy.groups.add(group_user)
cls.user = user_dummy
class ForceLogin:
def __init__(self, user, client):
self.user = user
self.client = client
def __enter__(self):
if self.user:
self.client.force_login(self.user,
backend='django.contrib.auth.backends.ModelBackend')
return self
def __exit__(self, exception_type, exception_value, traceback):
if self.user:
self.client.logout()
class TaskExportTest(APITestCase):
def setUp(self):
self.client = APIClient()
@classmethod
def setUpTestData(cls):
create_db_users(cls)
def _generate_task(self):
task = {
"name": "my task #1",
"owner": '',
"assignee": '',
"overlap": 0,
"segment_size": 100,
"z_order": False,
"labels": [
{
"name": "car",
"attributes": [
{
"name": "model",
"mutable": False,
"input_type": "select",
"default_value": "mazda",
"values": ["bmw", "mazda", "renault"]
},
{
"name": "parked",
"mutable": True,
"input_type": "checkbox",
"default_value": False
},
]
},
{"name": "person"},
]
}
task = self._create_task(task, 3)
annotations = {
"version": 0,
"tags": [
{
"frame": 0,
"label_id": task["labels"][0]["id"],
"group": None,
"attributes": []
}
],
"shapes": [
{
"frame": 0,
"label_id": task["labels"][0]["id"],
"group": None,
"attributes": [
{
"spec_id": task["labels"][0]["attributes"][0]["id"],
"value": task["labels"][0]["attributes"][0]["values"][0]
},
{
"spec_id": task["labels"][0]["attributes"][1]["id"],
"value": task["labels"][0]["attributes"][0]["default_value"]
}
],
"points": [1.0, 2.1, 100, 300.222],
"type": "rectangle",
"occluded": False
},
{
"frame": 1,
"label_id": task["labels"][1]["id"],
"group": None,
"attributes": [],
"points": [2.0, 2.1, 100, 300.222, 400, 500, 1, 3],
"type": "polygon",
"occluded": False
},
],
"tracks": [
{
"frame": 0,
"label_id": task["labels"][0]["id"],
"group": None,
"attributes": [
{
"spec_id": task["labels"][0]["attributes"][0]["id"],
"value": task["labels"][0]["attributes"][0]["values"][0]
},
],
"shapes": [
{
"frame": 0,
"points": [1.0, 2.1, 100, 300.222],
"type": "rectangle",
"occluded": False,
"outside": False,
"attributes": [
{
"spec_id": task["labels"][0]["attributes"][1]["id"],
"value": task["labels"][0]["attributes"][1]["default_value"]
}
]
},
{
"frame": 1,
"attributes": [],
"points": [2.0, 2.1, 100, 300.222],
"type": "rectangle",
"occluded": True,
"outside": True
},
]
},
{
"frame": 1,
"label_id": task["labels"][1]["id"],
"group": None,
"attributes": [],
"shapes": [
{
"frame": 1,
"attributes": [],
"points": [1.0, 2.1, 100, 300.222],
"type": "rectangle",
"occluded": False,
"outside": False
}
]
},
]
}
self._put_api_v1_task_id_annotations(task["id"], annotations)
return task, annotations
def _create_task(self, data, size):
with ForceLogin(self.user, self.client):
response = self.client.post('/api/v1/tasks', data=data, format="json")
assert response.status_code == status.HTTP_201_CREATED, response.status_code
tid = response.data["id"]
images = {
"client_files[%d]" % i: generate_image_file("image_%d.jpg" % i)
for i in range(size)
}
images["image_quality"] = 75
response = self.client.post("/api/v1/tasks/{}/data".format(tid), data=images)
assert response.status_code == status.HTTP_202_ACCEPTED, response.status_code
response = self.client.get("/api/v1/tasks/{}".format(tid))
task = response.data
return task
def _put_api_v1_task_id_annotations(self, tid, data):
with ForceLogin(self.user, self.client):
response = self.client.put("/api/v1/tasks/{}/annotations".format(tid),
data=data, format="json")
return response
def _test_export(self, format_name, save_images=False):
self.assertTrue(format_name in [f['tag'] for f in dm.EXPORT_FORMATS])
task, _ = self._generate_task()
project = dm.TaskProject.from_task(
Task.objects.get(pk=task["id"]), self.user.username)
with tempfile.TemporaryDirectory() as test_dir:
project.export(format_name, test_dir, save_images=save_images)
self.assertTrue(os.listdir(test_dir))
def test_datumaro(self):
self._test_export(dm.EXPORT_FORMAT_DATUMARO_PROJECT, save_images=False)
def test_coco(self):
self._test_export('cvat_coco', save_images=True)
def test_voc(self):
self._test_export('cvat_voc', save_images=True)
def test_tf_detection_api(self):
self._test_export('cvat_tfrecord', save_images=True)
def test_yolo(self):
self._test_export('cvat_yolo', save_images=True)
def test_mot(self):
self._test_export('cvat_mot', save_images=True)
def test_labelme(self):
self._test_export('cvat_label_me', save_images=True)
def test_formats_query(self):
formats = dm.get_export_formats()
expected = set(f['tag'] for f in dm.EXPORT_FORMATS)
actual = set(f['tag'] for f in formats)
self.assertSetEqual(expected, actual)
|
py | 1a530101f569b24cb27c39c8752d0d27fc3df578 | #!/home/kyy/lecture/search-restaurants/venv/bin/python3.5
"""PILdriver, an image-processing calculator using PIL.
An instance of class PILDriver is essentially a software stack machine
(Polish-notation interpreter) for sequencing PIL image
transformations. The state of the instance is the interpreter stack.
The only method one will normally invoke after initialization is the
`execute' method. This takes an argument list of tokens, pushes them
onto the instance's stack, and then tries to clear the stack by
successive evaluation of PILdriver operators. Any part of the stack
not cleaned off persists and is part of the evaluation context for
the next call of the execute method.
PILDriver doesn't catch any exceptions, on the theory that these
are actually diagnostic information that should be interpreted by
the calling code.
When called as a script, the command-line arguments are passed to
a PILDriver instance. If there are no command-line arguments, the
module runs an interactive interpreter, each line of which is split into
space-separated tokens and passed to the execute method.
In the method descriptions below, a first line beginning with the string
`usage:' means this method can be invoked with the token that follows
it. Following <>-enclosed arguments describe how the method interprets
the entries on the stack. Each argument specification begins with a
type specification: either `int', `float', `string', or `image'.
All operations consume their arguments off the stack (use `dup' to
keep copies around). Use `verbose 1' to see the stack state displayed
before each operation.
Usage examples:
`show crop 0 0 200 300 open test.png' loads test.png, crops out a portion
of its upper-left-hand corner and displays the cropped portion.
`save rotated.png rotate 30 open test.tiff' loads test.tiff, rotates it
30 degrees, and saves the result as rotated.png (in PNG format).
"""
# by Eric S. Raymond <[email protected]>
# $Id$
# TO DO:
# 1. Add PILFont capabilities, once that's documented.
# 2. Add PILDraw operations.
# 3. Add support for composing and decomposing multiple-image files.
#
from __future__ import print_function
from PIL import Image
class PILDriver(object):
verbose = 0
def do_verbose(self):
"""usage: verbose <int:num>
Set verbosity flag from top of stack.
"""
self.verbose = int(self.do_pop())
# The evaluation stack (internal only)
stack = [] # Stack of pending operations
def push(self, item):
"Push an argument onto the evaluation stack."
self.stack.insert(0, item)
def top(self):
"Return the top-of-stack element."
return self.stack[0]
# Stack manipulation (callable)
def do_clear(self):
"""usage: clear
Clear the stack.
"""
self.stack = []
def do_pop(self):
"""usage: pop
Discard the top element on the stack.
"""
return self.stack.pop(0)
def do_dup(self):
"""usage: dup
Duplicate the top-of-stack item.
"""
if hasattr(self, 'format'): # If it's an image, do a real copy
dup = self.stack[0].copy()
else:
dup = self.stack[0]
self.push(dup)
def do_swap(self):
"""usage: swap
Swap the top-of-stack item with the next one down.
"""
self.stack = [self.stack[1], self.stack[0]] + self.stack[2:]
# Image module functions (callable)
def do_new(self):
"""usage: new <int:xsize> <int:ysize> <int:color>:
Create and push a greyscale image of given size and color.
"""
xsize = int(self.do_pop())
ysize = int(self.do_pop())
color = int(self.do_pop())
self.push(Image.new("L", (xsize, ysize), color))
def do_open(self):
"""usage: open <string:filename>
Open the indicated image, read it, push the image on the stack.
"""
self.push(Image.open(self.do_pop()))
def do_blend(self):
"""usage: blend <image:pic1> <image:pic2> <float:alpha>
Replace two images and an alpha with the blended image.
"""
image1 = self.do_pop()
image2 = self.do_pop()
alpha = float(self.do_pop())
self.push(Image.blend(image1, image2, alpha))
def do_composite(self):
"""usage: composite <image:pic1> <image:pic2> <image:mask>
Replace two images and a mask with their composite.
"""
image1 = self.do_pop()
image2 = self.do_pop()
mask = self.do_pop()
self.push(Image.composite(image1, image2, mask))
def do_merge(self):
"""usage: merge <string:mode> <image:pic1>
[<image:pic2> [<image:pic3> [<image:pic4>]]]
Merge top-of stack images in a way described by the mode.
"""
mode = self.do_pop()
bandlist = []
for band in mode:
bandlist.append(self.do_pop())
self.push(Image.merge(mode, bandlist))
# Image class methods
def do_convert(self):
"""usage: convert <string:mode> <image:pic1>
Convert the top image to the given mode.
"""
mode = self.do_pop()
image = self.do_pop()
self.push(image.convert(mode))
def do_copy(self):
"""usage: copy <image:pic1>
Make and push a true copy of the top image.
"""
self.dup()
def do_crop(self):
"""usage: crop <int:left> <int:upper> <int:right> <int:lower>
<image:pic1>
Crop and push a rectangular region from the current image.
"""
left = int(self.do_pop())
upper = int(self.do_pop())
right = int(self.do_pop())
lower = int(self.do_pop())
image = self.do_pop()
self.push(image.crop((left, upper, right, lower)))
def do_draft(self):
"""usage: draft <string:mode> <int:xsize> <int:ysize>
Configure the loader for a given mode and size.
"""
mode = self.do_pop()
xsize = int(self.do_pop())
ysize = int(self.do_pop())
self.push(self.draft(mode, (xsize, ysize)))
def do_filter(self):
"""usage: filter <string:filtername> <image:pic1>
Process the top image with the given filter.
"""
from PIL import ImageFilter
imageFilter = getattr(ImageFilter, self.do_pop().upper())
image = self.do_pop()
self.push(image.filter(imageFilter))
def do_getbbox(self):
"""usage: getbbox
Push left, upper, right, and lower pixel coordinates of the top image.
"""
bounding_box = self.do_pop().getbbox()
self.push(bounding_box[3])
self.push(bounding_box[2])
self.push(bounding_box[1])
self.push(bounding_box[0])
def do_getextrema(self):
"""usage: extrema
Push minimum and maximum pixel values of the top image.
"""
extrema = self.do_pop().extrema()
self.push(extrema[1])
self.push(extrema[0])
def do_offset(self):
"""usage: offset <int:xoffset> <int:yoffset> <image:pic1>
Offset the pixels in the top image.
"""
xoff = int(self.do_pop())
yoff = int(self.do_pop())
image = self.do_pop()
self.push(image.offset(xoff, yoff))
def do_paste(self):
"""usage: paste <image:figure> <int:xoffset> <int:yoffset>
<image:ground>
Paste figure image into ground with upper left at given offsets.
"""
figure = self.do_pop()
xoff = int(self.do_pop())
yoff = int(self.do_pop())
ground = self.do_pop()
if figure.mode == "RGBA":
ground.paste(figure, (xoff, yoff), figure)
else:
ground.paste(figure, (xoff, yoff))
self.push(ground)
def do_resize(self):
"""usage: resize <int:xsize> <int:ysize> <image:pic1>
Resize the top image.
"""
ysize = int(self.do_pop())
xsize = int(self.do_pop())
image = self.do_pop()
self.push(image.resize((xsize, ysize)))
def do_rotate(self):
"""usage: rotate <int:angle> <image:pic1>
Rotate image through a given angle
"""
angle = int(self.do_pop())
image = self.do_pop()
self.push(image.rotate(angle))
def do_save(self):
"""usage: save <string:filename> <image:pic1>
Save image with default options.
"""
filename = self.do_pop()
image = self.do_pop()
image.save(filename)
def do_save2(self):
"""usage: save2 <string:filename> <string:options> <image:pic1>
Save image with specified options.
"""
filename = self.do_pop()
options = self.do_pop()
image = self.do_pop()
image.save(filename, None, options)
def do_show(self):
"""usage: show <image:pic1>
Display and pop the top image.
"""
self.do_pop().show()
def do_thumbnail(self):
"""usage: thumbnail <int:xsize> <int:ysize> <image:pic1>
Modify the top image in the stack to contain a thumbnail of itself.
"""
ysize = int(self.do_pop())
xsize = int(self.do_pop())
self.top().thumbnail((xsize, ysize))
def do_transpose(self):
"""usage: transpose <string:operator> <image:pic1>
Transpose the top image.
"""
transpose = self.do_pop().upper()
image = self.do_pop()
self.push(image.transpose(transpose))
# Image attributes
def do_format(self):
"""usage: format <image:pic1>
Push the format of the top image onto the stack.
"""
self.push(self.do_pop().format)
def do_mode(self):
"""usage: mode <image:pic1>
Push the mode of the top image onto the stack.
"""
self.push(self.do_pop().mode)
def do_size(self):
"""usage: size <image:pic1>
Push the image size on the stack as (y, x).
"""
size = self.do_pop().size
self.push(size[0])
self.push(size[1])
# ImageChops operations
def do_invert(self):
"""usage: invert <image:pic1>
Invert the top image.
"""
from PIL import ImageChops
self.push(ImageChops.invert(self.do_pop()))
def do_lighter(self):
"""usage: lighter <image:pic1> <image:pic2>
Pop the two top images, push an image of the lighter pixels of both.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.lighter(image1, image2))
def do_darker(self):
"""usage: darker <image:pic1> <image:pic2>
Pop the two top images, push an image of the darker pixels of both.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.darker(image1, image2))
def do_difference(self):
"""usage: difference <image:pic1> <image:pic2>
Pop the two top images, push the difference image
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.difference(image1, image2))
def do_multiply(self):
"""usage: multiply <image:pic1> <image:pic2>
Pop the two top images, push the multiplication image.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.multiply(image1, image2))
def do_screen(self):
"""usage: screen <image:pic1> <image:pic2>
Pop the two top images, superimpose their inverted versions.
"""
from PIL import ImageChops
image2 = self.do_pop()
image1 = self.do_pop()
self.push(ImageChops.screen(image1, image2))
def do_add(self):
"""usage: add <image:pic1> <image:pic2> <int:offset> <float:scale>
Pop the two top images, produce the scaled sum with offset.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
scale = float(self.do_pop())
offset = int(self.do_pop())
self.push(ImageChops.add(image1, image2, scale, offset))
def do_subtract(self):
"""usage: subtract <image:pic1> <image:pic2> <int:offset> <float:scale>
Pop the two top images, produce the scaled difference with offset.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
scale = float(self.do_pop())
offset = int(self.do_pop())
self.push(ImageChops.subtract(image1, image2, scale, offset))
# ImageEnhance classes
def do_color(self):
"""usage: color <image:pic1>
Enhance color in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Color(image)
self.push(enhancer.enhance(factor))
def do_contrast(self):
"""usage: contrast <image:pic1>
Enhance contrast in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Contrast(image)
self.push(enhancer.enhance(factor))
def do_brightness(self):
"""usage: brightness <image:pic1>
Enhance brightness in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Brightness(image)
self.push(enhancer.enhance(factor))
def do_sharpness(self):
"""usage: sharpness <image:pic1>
Enhance sharpness in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Sharpness(image)
self.push(enhancer.enhance(factor))
# The interpreter loop
def execute(self, list):
"Interpret a list of PILDriver commands."
list.reverse()
while len(list) > 0:
self.push(list[0])
list = list[1:]
if self.verbose:
print("Stack: " + repr(self.stack))
top = self.top()
if not isinstance(top, str):
continue
funcname = "do_" + top
if not hasattr(self, funcname):
continue
else:
self.do_pop()
func = getattr(self, funcname)
func()
if __name__ == '__main__':
import sys
# If we see command-line arguments, interpret them as a stack state
# and execute. Otherwise go interactive.
driver = PILDriver()
if len(sys.argv[1:]) > 0:
driver.execute(sys.argv[1:])
else:
print("PILDriver says hello.")
while True:
try:
if sys.version_info[0] >= 3:
line = input('pildriver> ')
else:
line = raw_input('pildriver> ')
except EOFError:
print("\nPILDriver says goodbye.")
break
driver.execute(line.split())
print(driver.stack)
# The following sets edit modes for GNU EMACS
# Local Variables:
# mode:python
# End:
|
py | 1a53010622676c41b01819ea5248204431810b9b | import sys
def main(input_file):
with open(input_file, 'r') as fh:
for line in fh:
line = line.strip()
days = int(line.split(';')[0])
array = [int(x) for x in line.split(';')[1].split(' ')]
result = 0
for i in range(0, len(array)-days+1):
result = max(sum(array[i:i+days]), result)
print(result)
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: %s <input_file>" % sys.argv[0])
sys.exit(1)
main(sys.argv[1])
|
py | 1a530162f0ed202ea17a803ea266c7892fee7149 | # Copyright 2013 IBM Corp.
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import inspect
import math
import time
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import strutils
import six
import webob
from nova.api.openstack import api_version_request as api_version
from nova.api.openstack import versioned_method
from nova import exception
from nova import i18n
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LI
from nova import utils
from nova import wsgi
LOG = logging.getLogger(__name__)
_SUPPORTED_CONTENT_TYPES = (
'application/json',
'application/vnd.openstack.compute+json',
)
_MEDIA_TYPE_MAP = {
'application/vnd.openstack.compute+json': 'json',
'application/json': 'json',
}
# These are typically automatically created by routes as either defaults
# collection or member methods.
_ROUTES_METHODS = [
'create',
'delete',
'show',
'update',
]
_METHODS_WITH_BODY = [
'POST',
'PUT',
]
# The default api version request if none is requested in the headers
# Note(cyeoh): This only applies for the v2.1 API once microversions
# support is fully merged. It does not affect the V2 API.
DEFAULT_API_VERSION = "2.1"
# name of attribute to keep version method information
VER_METHOD_ATTR = 'versioned_methods'
# Name of header used by clients to request a specific version
# of the REST API
API_VERSION_REQUEST_HEADER = 'X-OpenStack-Nova-API-Version'
ENV_LEGACY_V2 = 'openstack.legacy_v2'
def get_supported_content_types():
return _SUPPORTED_CONTENT_TYPES
def get_media_map():
return dict(_MEDIA_TYPE_MAP.items())
# NOTE(rlrossit): This function allows a get on both a dict-like and an
# object-like object. cache_db_items() is used on both versioned objects and
# dicts, so the function can't be totally changed over to [] syntax, nor
# can it be changed over to use getattr().
def item_get(item, item_key):
if hasattr(item, '__getitem__'):
return item[item_key]
else:
return getattr(item, item_key)
class Request(wsgi.Request):
"""Add some OpenStack API-specific logic to the base webob.Request."""
def __init__(self, *args, **kwargs):
super(Request, self).__init__(*args, **kwargs)
self._extension_data = {'db_items': {}}
if not hasattr(self, 'api_version_request'):
self.api_version_request = api_version.APIVersionRequest()
def cache_db_items(self, key, items, item_key='id'):
"""Allow API methods to store objects from a DB query to be
used by API extensions within the same API request.
An instance of this class only lives for the lifetime of a
single API request, so there's no need to implement full
cache management.
"""
db_items = self._extension_data['db_items'].setdefault(key, {})
for item in items:
db_items[item_get(item, item_key)] = item
def get_db_items(self, key):
"""Allow an API extension to get previously stored objects within
the same API request.
Note that the object data will be slightly stale.
"""
return self._extension_data['db_items'][key]
def get_db_item(self, key, item_key):
"""Allow an API extension to get a previously stored object
within the same API request.
Note that the object data will be slightly stale.
"""
return self.get_db_items(key).get(item_key)
def cache_db_instances(self, instances):
self.cache_db_items('instances', instances, 'uuid')
def cache_db_instance(self, instance):
self.cache_db_items('instances', [instance], 'uuid')
def get_db_instances(self):
return self.get_db_items('instances')
def get_db_instance(self, instance_uuid):
return self.get_db_item('instances', instance_uuid)
def cache_db_flavors(self, flavors):
self.cache_db_items('flavors', flavors, 'flavorid')
def cache_db_flavor(self, flavor):
self.cache_db_items('flavors', [flavor], 'flavorid')
def get_db_flavors(self):
return self.get_db_items('flavors')
def get_db_flavor(self, flavorid):
return self.get_db_item('flavors', flavorid)
def cache_db_compute_nodes(self, compute_nodes):
self.cache_db_items('compute_nodes', compute_nodes, 'id')
def cache_db_compute_node(self, compute_node):
self.cache_db_items('compute_nodes', [compute_node], 'id')
def get_db_compute_nodes(self):
return self.get_db_items('compute_nodes')
def get_db_compute_node(self, id):
return self.get_db_item('compute_nodes', id)
def best_match_content_type(self):
"""Determine the requested response content-type."""
if 'nova.best_content_type' not in self.environ:
# Calculate the best MIME type
content_type = None
# Check URL path suffix
parts = self.path.rsplit('.', 1)
if len(parts) > 1:
possible_type = 'application/' + parts[1]
if possible_type in get_supported_content_types():
content_type = possible_type
if not content_type:
content_type = self.accept.best_match(
get_supported_content_types())
self.environ['nova.best_content_type'] = (content_type or
'application/json')
return self.environ['nova.best_content_type']
def get_content_type(self):
"""Determine content type of the request body.
Does not do any body introspection, only checks header
"""
if "Content-Type" not in self.headers:
return None
content_type = self.content_type
# NOTE(markmc): text/plain is the default for eventlet and
# other webservers which use mimetools.Message.gettype()
# whereas twisted defaults to ''.
if not content_type or content_type == 'text/plain':
return None
if content_type not in get_supported_content_types():
raise exception.InvalidContentType(content_type=content_type)
return content_type
def best_match_language(self):
"""Determine the best available language for the request.
:returns: the best language match or None if the 'Accept-Language'
header was not available in the request.
"""
if not self.accept_language:
return None
return self.accept_language.best_match(
i18n.get_available_languages())
def set_api_version_request(self):
"""Set API version request based on the request header information."""
if API_VERSION_REQUEST_HEADER in self.headers:
hdr_string = self.headers[API_VERSION_REQUEST_HEADER]
# 'latest' is a special keyword which is equivalent to requesting
# the maximum version of the API supported
if hdr_string == 'latest':
self.api_version_request = api_version.max_api_version()
else:
self.api_version_request = api_version.APIVersionRequest(
hdr_string)
# Check that the version requested is within the global
# minimum/maximum of supported API versions
if not self.api_version_request.matches(
api_version.min_api_version(),
api_version.max_api_version()):
raise exception.InvalidGlobalAPIVersion(
req_ver=self.api_version_request.get_string(),
min_ver=api_version.min_api_version().get_string(),
max_ver=api_version.max_api_version().get_string())
else:
self.api_version_request = api_version.APIVersionRequest(
api_version.DEFAULT_API_VERSION)
def set_legacy_v2(self):
self.environ[ENV_LEGACY_V2] = True
def is_legacy_v2(self):
return self.environ.get(ENV_LEGACY_V2, False)
class ActionDispatcher(object):
"""Maps method name to local methods through action name."""
def dispatch(self, *args, **kwargs):
"""Find and call local method."""
action = kwargs.pop('action', 'default')
action_method = getattr(self, str(action), self.default)
return action_method(*args, **kwargs)
def default(self, data):
raise NotImplementedError()
class JSONDeserializer(ActionDispatcher):
def _from_json(self, datastring):
try:
return jsonutils.loads(datastring)
except ValueError:
msg = _("cannot understand JSON")
raise exception.MalformedRequestBody(reason=msg)
def deserialize(self, datastring, action='default'):
return self.dispatch(datastring, action=action)
def default(self, datastring):
return {'body': self._from_json(datastring)}
class JSONDictSerializer(ActionDispatcher):
"""Default JSON request body serialization."""
def serialize(self, data, action='default'):
return self.dispatch(data, action=action)
def default(self, data):
return six.text_type(jsonutils.dumps(data))
def response(code):
"""Attaches response code to a method.
This decorator associates a response code with a method. Note
that the function attributes are directly manipulated; the method
is not wrapped.
"""
def decorator(func):
func.wsgi_code = code
return func
return decorator
class ResponseObject(object):
"""Bundles a response object
Object that app methods may return in order to allow its response
to be modified by extensions in the code. Its use is optional (and
should only be used if you really know what you are doing).
"""
def __init__(self, obj, code=None, headers=None):
"""Builds a response object."""
self.obj = obj
self._default_code = 200
self._code = code
self._headers = headers or {}
self.serializer = JSONDictSerializer()
def __getitem__(self, key):
"""Retrieves a header with the given name."""
return self._headers[key.lower()]
def __setitem__(self, key, value):
"""Sets a header with the given name to the given value."""
self._headers[key.lower()] = value
def __delitem__(self, key):
"""Deletes the header with the given name."""
del self._headers[key.lower()]
def serialize(self, request, content_type):
"""Serializes the wrapped object.
Utility method for serializing the wrapped object. Returns a
webob.Response object.
"""
serializer = self.serializer
body = None
if self.obj is not None:
body = serializer.serialize(self.obj)
response = webob.Response(body=body)
if response.headers.get('Content-Length'):
# NOTE(andreykurilin): we need to encode 'Content-Length' header,
# since webob.Response auto sets it if "body" attr is presented.
# https://github.com/Pylons/webob/blob/1.5.0b0/webob/response.py#L147
response.headers['Content-Length'] = utils.utf8(
response.headers['Content-Length'])
response.status_int = self.code
for hdr, value in self._headers.items():
response.headers[hdr] = utils.utf8(value)
response.headers['Content-Type'] = utils.utf8(content_type)
return response
@property
def code(self):
"""Retrieve the response status."""
return self._code or self._default_code
@property
def headers(self):
"""Retrieve the headers."""
return self._headers.copy()
def action_peek(body):
"""Determine action to invoke.
This looks inside the json body and fetches out the action method
name.
"""
try:
decoded = jsonutils.loads(body)
except ValueError:
msg = _("cannot understand JSON")
raise exception.MalformedRequestBody(reason=msg)
# Make sure there's exactly one key...
if len(decoded) != 1:
msg = _("too many body keys")
raise exception.MalformedRequestBody(reason=msg)
# Return the action name
return list(decoded.keys())[0]
class ResourceExceptionHandler(object):
"""Context manager to handle Resource exceptions.
Used when processing exceptions generated by API implementation
methods (or their extensions). Converts most exceptions to Fault
exceptions, with the appropriate logging.
"""
def __enter__(self):
return None
def __exit__(self, ex_type, ex_value, ex_traceback):
if not ex_value:
return True
if isinstance(ex_value, exception.Forbidden):
raise Fault(webob.exc.HTTPForbidden(
explanation=ex_value.format_message()))
elif isinstance(ex_value, exception.VersionNotFoundForAPIMethod):
raise
elif isinstance(ex_value, exception.Invalid):
raise Fault(exception.ConvertedException(
code=ex_value.code,
explanation=ex_value.format_message()))
elif isinstance(ex_value, TypeError):
exc_info = (ex_type, ex_value, ex_traceback)
LOG.error(_LE('Exception handling resource: %s'), ex_value,
exc_info=exc_info)
raise Fault(webob.exc.HTTPBadRequest())
elif isinstance(ex_value, Fault):
LOG.info(_LI("Fault thrown: %s"), ex_value)
raise ex_value
elif isinstance(ex_value, webob.exc.HTTPException):
LOG.info(_LI("HTTP exception thrown: %s"), ex_value)
raise Fault(ex_value)
# We didn't handle the exception
return False
class Resource(wsgi.Application):
"""WSGI app that handles (de)serialization and controller dispatch.
WSGI app that reads routing information supplied by RoutesMiddleware
and calls the requested action method upon its controller. All
controller action methods must accept a 'req' argument, which is the
incoming wsgi.Request. If the operation is a PUT or POST, the controller
method must also accept a 'body' argument (the deserialized request body).
They may raise a webob.exc exception or return a dict, which will be
serialized by requested content type.
Exceptions derived from webob.exc.HTTPException will be automatically
wrapped in Fault() to provide API friendly error responses.
"""
support_api_request_version = False
def __init__(self, controller, inherits=None):
""":param controller: object that implement methods created by routes
lib
:param inherits: another resource object that this resource should
inherit extensions from. Any action extensions that
are applied to the parent resource will also apply
to this resource.
"""
self.controller = controller
self.default_serializers = dict(json=JSONDictSerializer)
# Copy over the actions dictionary
self.wsgi_actions = {}
if controller:
self.register_actions(controller)
# Save a mapping of extensions
self.wsgi_extensions = {}
self.wsgi_action_extensions = {}
self.inherits = inherits
def register_actions(self, controller):
"""Registers controller actions with this resource."""
actions = getattr(controller, 'wsgi_actions', {})
for key, method_name in actions.items():
self.wsgi_actions[key] = getattr(controller, method_name)
def register_extensions(self, controller):
"""Registers controller extensions with this resource."""
extensions = getattr(controller, 'wsgi_extensions', [])
for method_name, action_name in extensions:
# Look up the extending method
extension = getattr(controller, method_name)
if action_name:
# Extending an action...
if action_name not in self.wsgi_action_extensions:
self.wsgi_action_extensions[action_name] = []
self.wsgi_action_extensions[action_name].append(extension)
else:
# Extending a regular method
if method_name not in self.wsgi_extensions:
self.wsgi_extensions[method_name] = []
self.wsgi_extensions[method_name].append(extension)
def get_action_args(self, request_environment):
"""Parse dictionary created by routes library."""
# NOTE(Vek): Check for get_action_args() override in the
# controller
if hasattr(self.controller, 'get_action_args'):
return self.controller.get_action_args(request_environment)
try:
args = request_environment['wsgiorg.routing_args'][1].copy()
except (KeyError, IndexError, AttributeError):
return {}
try:
del args['controller']
except KeyError:
pass
try:
del args['format']
except KeyError:
pass
return args
def get_body(self, request):
content_type = request.get_content_type()
return content_type, request.body
def deserialize(self, body):
return JSONDeserializer().deserialize(body)
# NOTE(sdague): I didn't start the fire, however here is what all
# of this is about.
#
# In the legacy v2 code stack, extensions could extend actions
# with a generator that let 1 method be split into a top and
# bottom half. The top half gets executed before the main
# processing of the request (so effectively gets to modify the
# request before it gets to the main method).
#
# Returning a response triggers a shortcut to fail out. The
# response will nearly always be a failure condition, as it ends
# up skipping further processing one level up from here.
#
# This then passes on the list of extensions, in reverse order,
# on. post_process will run through all those, again with same
# basic logic.
#
# In tree this is only used in the legacy v2 stack, and only in
# the DiskConfig and SchedulerHints from what I can see.
#
# pre_process_extensions can be removed when the legacyv2 code
# goes away. post_process_extensions can be massively simplified
# at that point.
def pre_process_extensions(self, extensions, request, action_args):
# List of callables for post-processing extensions
post = []
for ext in extensions:
if inspect.isgeneratorfunction(ext):
response = None
# If it's a generator function, the part before the
# yield is the preprocessing stage
try:
with ResourceExceptionHandler():
gen = ext(req=request, **action_args)
response = next(gen)
except Fault as ex:
response = ex
# We had a response...
if response:
return response, []
# No response, queue up generator for post-processing
post.append(gen)
else:
# Regular functions only perform post-processing
post.append(ext)
# None is response, it means we keep going. We reverse the
# extension list for post-processing.
return None, reversed(post)
def post_process_extensions(self, extensions, resp_obj, request,
action_args):
for ext in extensions:
response = None
if inspect.isgenerator(ext):
# If it's a generator, run the second half of
# processing
try:
with ResourceExceptionHandler():
response = ext.send(resp_obj)
except StopIteration:
# Normal exit of generator
continue
except Fault as ex:
response = ex
else:
# Regular functions get post-processing...
try:
with ResourceExceptionHandler():
response = ext(req=request, resp_obj=resp_obj,
**action_args)
except exception.VersionNotFoundForAPIMethod:
# If an attached extension (@wsgi.extends) for the
# method has no version match its not an error. We
# just don't run the extends code
continue
except Fault as ex:
response = ex
# We had a response...
if response:
return response
return None
def _should_have_body(self, request):
return request.method in _METHODS_WITH_BODY
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, request):
"""WSGI method that controls (de)serialization and method dispatch."""
if self.support_api_request_version:
# Set the version of the API requested based on the header
try:
request.set_api_version_request()
except exception.InvalidAPIVersionString as e:
return Fault(webob.exc.HTTPBadRequest(
explanation=e.format_message()))
except exception.InvalidGlobalAPIVersion as e:
return Fault(webob.exc.HTTPNotAcceptable(
explanation=e.format_message()))
# Identify the action, its arguments, and the requested
# content type
action_args = self.get_action_args(request.environ)
action = action_args.pop('action', None)
# NOTE(sdague): we filter out InvalidContentTypes early so we
# know everything is good from here on out.
try:
content_type, body = self.get_body(request)
accept = request.best_match_content_type()
except exception.InvalidContentType:
msg = _("Unsupported Content-Type")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
# NOTE(Vek): Splitting the function up this way allows for
# auditing by external tools that wrap the existing
# function. If we try to audit __call__(), we can
# run into troubles due to the @webob.dec.wsgify()
# decorator.
return self._process_stack(request, action, action_args,
content_type, body, accept)
def _process_stack(self, request, action, action_args,
content_type, body, accept):
"""Implement the processing stack."""
# Get the implementing method
try:
meth, extensions = self.get_method(request, action,
content_type, body)
except (AttributeError, TypeError):
return Fault(webob.exc.HTTPNotFound())
except KeyError as ex:
msg = _("There is no such action: %s") % ex.args[0]
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
except exception.MalformedRequestBody:
msg = _("Malformed request body")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
if body:
msg = _("Action: '%(action)s', calling method: %(meth)s, body: "
"%(body)s") % {'action': action,
'body': six.text_type(body, 'utf-8'),
'meth': str(meth)}
LOG.debug(strutils.mask_password(msg))
else:
LOG.debug("Calling method '%(meth)s'",
{'meth': str(meth)})
# Now, deserialize the request body...
try:
contents = {}
if self._should_have_body(request):
# allow empty body with PUT and POST
if request.content_length == 0:
contents = {'body': None}
else:
contents = self.deserialize(body)
except exception.MalformedRequestBody:
msg = _("Malformed request body")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
# Update the action args
action_args.update(contents)
project_id = action_args.pop("project_id", None)
context = request.environ.get('nova.context')
if (context and project_id and (project_id != context.project_id)):
msg = _("Malformed request URL: URL's project_id '%(project_id)s'"
" doesn't match Context's project_id"
" '%(context_project_id)s'") % \
{'project_id': project_id,
'context_project_id': context.project_id}
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
# Run pre-processing extensions
response, post = self.pre_process_extensions(extensions,
request, action_args)
if not response:
try:
with ResourceExceptionHandler():
action_result = self.dispatch(meth, request, action_args)
except Fault as ex:
response = ex
if not response:
# No exceptions; convert action_result into a
# ResponseObject
resp_obj = None
if type(action_result) is dict or action_result is None:
resp_obj = ResponseObject(action_result)
elif isinstance(action_result, ResponseObject):
resp_obj = action_result
else:
response = action_result
# Run post-processing extensions
if resp_obj:
# Do a preserialize to set up the response object
if hasattr(meth, 'wsgi_code'):
resp_obj._default_code = meth.wsgi_code
# Process post-processing extensions
response = self.post_process_extensions(post, resp_obj,
request, action_args)
if resp_obj and not response:
response = resp_obj.serialize(request, accept)
if hasattr(response, 'headers'):
for hdr, val in list(response.headers.items()):
# Headers must be utf-8 strings
response.headers[hdr] = utils.utf8(val)
if not request.api_version_request.is_null():
response.headers[API_VERSION_REQUEST_HEADER] = \
request.api_version_request.get_string()
response.headers['Vary'] = API_VERSION_REQUEST_HEADER
return response
def get_method(self, request, action, content_type, body):
meth, extensions = self._get_method(request,
action,
content_type,
body)
if self.inherits:
_meth, parent_ext = self.inherits.get_method(request,
action,
content_type,
body)
extensions.extend(parent_ext)
return meth, extensions
def _get_method(self, request, action, content_type, body):
"""Look up the action-specific method and its extensions."""
# Look up the method
try:
if not self.controller:
meth = getattr(self, action)
else:
meth = getattr(self.controller, action)
except AttributeError:
if (not self.wsgi_actions or
action not in _ROUTES_METHODS + ['action']):
# Propagate the error
raise
else:
return meth, self.wsgi_extensions.get(action, [])
if action == 'action':
action_name = action_peek(body)
else:
action_name = action
# Look up the action method
return (self.wsgi_actions[action_name],
self.wsgi_action_extensions.get(action_name, []))
def dispatch(self, method, request, action_args):
"""Dispatch a call to the action-specific method."""
try:
return method(req=request, **action_args)
except exception.VersionNotFoundForAPIMethod:
# We deliberately don't return any message information
# about the exception to the user so it looks as if
# the method is simply not implemented.
return Fault(webob.exc.HTTPNotFound())
class ResourceV21(Resource):
support_api_request_version = True
def action(name):
"""Mark a function as an action.
The given name will be taken as the action key in the body.
This is also overloaded to allow extensions to provide
non-extending definitions of create and delete operations.
"""
def decorator(func):
func.wsgi_action = name
return func
return decorator
def extends(*args, **kwargs):
"""Indicate a function extends an operation.
Can be used as either::
@extends
def index(...):
pass
or as::
@extends(action='resize')
def _action_resize(...):
pass
"""
def decorator(func):
# Store enough information to find what we're extending
func.wsgi_extends = (func.__name__, kwargs.get('action'))
return func
# If we have positional arguments, call the decorator
if args:
return decorator(*args)
# OK, return the decorator instead
return decorator
class ControllerMetaclass(type):
"""Controller metaclass.
This metaclass automates the task of assembling a dictionary
mapping action keys to method names.
"""
def __new__(mcs, name, bases, cls_dict):
"""Adds the wsgi_actions dictionary to the class."""
# Find all actions
actions = {}
extensions = []
versioned_methods = None
# start with wsgi actions from base classes
for base in bases:
actions.update(getattr(base, 'wsgi_actions', {}))
if base.__name__ == "Controller":
# NOTE(cyeoh): This resets the VER_METHOD_ATTR attribute
# between API controller class creations. This allows us
# to use a class decorator on the API methods that doesn't
# require naming explicitly what method is being versioned as
# it can be implicit based on the method decorated. It is a bit
# ugly.
if VER_METHOD_ATTR in base.__dict__:
versioned_methods = getattr(base, VER_METHOD_ATTR)
delattr(base, VER_METHOD_ATTR)
for key, value in cls_dict.items():
if not callable(value):
continue
if getattr(value, 'wsgi_action', None):
actions[value.wsgi_action] = key
elif getattr(value, 'wsgi_extends', None):
extensions.append(value.wsgi_extends)
# Add the actions and extensions to the class dict
cls_dict['wsgi_actions'] = actions
cls_dict['wsgi_extensions'] = extensions
if versioned_methods:
cls_dict[VER_METHOD_ATTR] = versioned_methods
return super(ControllerMetaclass, mcs).__new__(mcs, name, bases,
cls_dict)
@six.add_metaclass(ControllerMetaclass)
class Controller(object):
"""Default controller."""
_view_builder_class = None
def __init__(self, view_builder=None):
"""Initialize controller with a view builder instance."""
if view_builder:
self._view_builder = view_builder
elif self._view_builder_class:
self._view_builder = self._view_builder_class()
else:
self._view_builder = None
def __getattribute__(self, key):
def version_select(*args, **kwargs):
"""Look for the method which matches the name supplied and version
constraints and calls it with the supplied arguments.
@return: Returns the result of the method called
@raises: VersionNotFoundForAPIMethod if there is no method which
matches the name and version constraints
"""
# The first arg to all versioned methods is always the request
# object. The version for the request is attached to the
# request object
if len(args) == 0:
ver = kwargs['req'].api_version_request
else:
ver = args[0].api_version_request
func_list = self.versioned_methods[key]
for func in func_list:
if ver.matches(func.start_version, func.end_version):
# Update the version_select wrapper function so
# other decorator attributes like wsgi.response
# are still respected.
functools.update_wrapper(version_select, func.func)
return func.func(self, *args, **kwargs)
# No version match
raise exception.VersionNotFoundForAPIMethod(version=ver)
try:
version_meth_dict = object.__getattribute__(self, VER_METHOD_ATTR)
except AttributeError:
# No versioning on this class
return object.__getattribute__(self, key)
if version_meth_dict and \
key in object.__getattribute__(self, VER_METHOD_ATTR):
return version_select
return object.__getattribute__(self, key)
# NOTE(cyeoh): This decorator MUST appear first (the outermost
# decorator) on an API method for it to work correctly
@classmethod
def api_version(cls, min_ver, max_ver=None):
"""Decorator for versioning api methods.
Add the decorator to any method which takes a request object
as the first parameter and belongs to a class which inherits from
wsgi.Controller.
@min_ver: string representing minimum version
@max_ver: optional string representing maximum version
"""
def decorator(f):
obj_min_ver = api_version.APIVersionRequest(min_ver)
if max_ver:
obj_max_ver = api_version.APIVersionRequest(max_ver)
else:
obj_max_ver = api_version.APIVersionRequest()
# Add to list of versioned methods registered
func_name = f.__name__
new_func = versioned_method.VersionedMethod(
func_name, obj_min_ver, obj_max_ver, f)
func_dict = getattr(cls, VER_METHOD_ATTR, {})
if not func_dict:
setattr(cls, VER_METHOD_ATTR, func_dict)
func_list = func_dict.get(func_name, [])
if not func_list:
func_dict[func_name] = func_list
func_list.append(new_func)
# Ensure the list is sorted by minimum version (reversed)
# so later when we work through the list in order we find
# the method which has the latest version which supports
# the version requested.
# TODO(cyeoh): Add check to ensure that there are no overlapping
# ranges of valid versions as that is amibiguous
func_list.sort(key=lambda f: f.start_version, reverse=True)
return f
return decorator
@staticmethod
def is_valid_body(body, entity_name):
if not (body and entity_name in body):
return False
def is_dict(d):
try:
d.get(None)
return True
except AttributeError:
return False
return is_dict(body[entity_name])
class Fault(webob.exc.HTTPException):
"""Wrap webob.exc.HTTPException to provide API friendly response."""
_fault_names = {
400: "badRequest",
401: "unauthorized",
403: "forbidden",
404: "itemNotFound",
405: "badMethod",
409: "conflictingRequest",
413: "overLimit",
415: "badMediaType",
429: "overLimit",
501: "notImplemented",
503: "serviceUnavailable"}
def __init__(self, exception):
"""Create a Fault for the given webob.exc.exception."""
self.wrapped_exc = exception
for key, value in list(self.wrapped_exc.headers.items()):
self.wrapped_exc.headers[key] = str(value)
self.status_int = exception.status_int
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
"""Generate a WSGI response based on the exception passed to ctor."""
user_locale = req.best_match_language()
# Replace the body with fault details.
code = self.wrapped_exc.status_int
fault_name = self._fault_names.get(code, "computeFault")
explanation = self.wrapped_exc.explanation
LOG.debug("Returning %(code)s to user: %(explanation)s",
{'code': code, 'explanation': explanation})
explanation = i18n.translate(explanation, user_locale)
fault_data = {
fault_name: {
'code': code,
'message': explanation}}
if code == 413 or code == 429:
retry = self.wrapped_exc.headers.get('Retry-After', None)
if retry:
fault_data[fault_name]['retryAfter'] = retry
if not req.api_version_request.is_null():
self.wrapped_exc.headers[API_VERSION_REQUEST_HEADER] = \
req.api_version_request.get_string()
self.wrapped_exc.headers['Vary'] = \
API_VERSION_REQUEST_HEADER
self.wrapped_exc.content_type = 'application/json'
self.wrapped_exc.charset = 'UTF-8'
self.wrapped_exc.text = JSONDictSerializer().serialize(fault_data)
return self.wrapped_exc
def __str__(self):
return self.wrapped_exc.__str__()
class RateLimitFault(webob.exc.HTTPException):
"""Rate-limited request response."""
def __init__(self, message, details, retry_time):
"""Initialize new `RateLimitFault` with relevant information."""
hdrs = RateLimitFault._retry_after(retry_time)
self.wrapped_exc = webob.exc.HTTPTooManyRequests(headers=hdrs)
self.content = {
"overLimit": {
"code": self.wrapped_exc.status_int,
"message": message,
"details": details,
"retryAfter": hdrs['Retry-After'],
},
}
@staticmethod
def _retry_after(retry_time):
delay = int(math.ceil(retry_time - time.time()))
retry_after = delay if delay > 0 else 0
headers = {'Retry-After': '%d' % retry_after}
return headers
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, request):
"""Return the wrapped exception with a serialized body conforming
to our error format.
"""
user_locale = request.best_match_language()
self.content['overLimit']['message'] = \
i18n.translate(self.content['overLimit']['message'], user_locale)
self.content['overLimit']['details'] = \
i18n.translate(self.content['overLimit']['details'], user_locale)
content = JSONDictSerializer().serialize(self.content)
self.wrapped_exc.charset = 'UTF-8'
self.wrapped_exc.content_type = "application/json"
self.wrapped_exc.text = content
return self.wrapped_exc
|
py | 1a53027983ad437a6559075ba7befcaa6e4b972d | # AUTO GENERATED FILE - DO NOT EDIT
from dash.development.base_component import Component, _explicitize_args
class ModalFooter(Component):
"""A ModalFooter component.
Add a footer to any modal.
Keyword arguments:
- children (a list of or a singular dash component, string or number; optional):
The children of this component.
- id (string; optional):
The ID of this component, used to identify dash components in
callbacks. The ID needs to be unique across all of the components
in an app.
- className (string; optional):
Often used with CSS to style elements with common properties.
- style (dict; optional):
Defines CSS styles which will override styles previously set.
- tag (string; optional):
HTML tag to use for the ModalFooter, default: div."""
@_explicitize_args
def __init__(self, children=None, id=Component.UNDEFINED, style=Component.UNDEFINED, className=Component.UNDEFINED, tag=Component.UNDEFINED, **kwargs):
self._prop_names = ['children', 'id', 'className', 'style', 'tag']
self._type = 'ModalFooter'
self._namespace = 'dash_bootstrap_components'
self._valid_wildcard_attributes = []
self.available_properties = ['children', 'id', 'className', 'style', 'tag']
self.available_wildcard_properties = []
_explicit_args = kwargs.pop('_explicit_args')
_locals = locals()
_locals.update(kwargs) # For wildcard attrs
args = {k: _locals[k] for k in _explicit_args if k != 'children'}
for k in []:
if k not in args:
raise TypeError(
'Required argument `' + k + '` was not specified.')
super(ModalFooter, self).__init__(children=children, **args)
|
py | 1a53035d9d22efbba530992f072128aa9d8a2154 | # -*- coding: utf-8 -*-
from contextlib import contextmanager
import base64
import datetime
import json
import pickle
import os
from cms.api import create_page
from django import http
from django.conf import settings
from django.conf.urls import url
from django.contrib import admin
from django.contrib.admin.widgets import FilteredSelectMultiple, RelatedFieldWidgetWrapper
from django.core import urlresolvers
from django.core.cache import cache
from django.core.exceptions import (
ValidationError, ImproperlyConfigured, ObjectDoesNotExist)
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.management import call_command
from django.forms.widgets import Media
from django.test.testcases import TestCase
from django.utils import timezone
from cms import api
from cms.constants import PLUGIN_MOVE_ACTION, PLUGIN_COPY_ACTION
from cms.exceptions import PluginAlreadyRegistered, PluginNotRegistered, DontUsePageAttributeWarning
from cms.models import Page, Placeholder
from cms.models.pluginmodel import CMSPlugin
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from cms.sitemaps.cms_sitemap import CMSSitemap
from cms.test_utils.project.pluginapp.plugins.manytomany_rel.models import (
Article, Section, ArticlePluginModel,
FKModel,
M2MTargetModel)
from cms.test_utils.project.pluginapp.plugins.meta.cms_plugins import (
TestPlugin, TestPlugin2, TestPlugin3, TestPlugin4, TestPlugin5)
from cms.test_utils.project.pluginapp.plugins.validation.cms_plugins import (
NonExisitngRenderTemplate, NoRender, NoRenderButChildren, DynTemplate)
from cms.test_utils.testcases import (
CMSTestCase, URL_CMS_PAGE, URL_CMS_PLUGIN_MOVE, URL_CMS_PAGE_ADD,
URL_CMS_PLUGIN_ADD, URL_CMS_PLUGIN_EDIT, URL_CMS_PAGE_CHANGE,
URL_CMS_PLUGIN_REMOVE, URL_CMS_PAGE_PUBLISH, URL_CMS_PLUGINS_COPY)
from cms.test_utils.util.fuzzy_int import FuzzyInt
from cms.toolbar.toolbar import CMSToolbar
from cms.utils.conf import get_cms_setting
from cms.utils.copy_plugins import copy_plugins_to
from cms.utils.i18n import force_language
from cms.utils.plugins import get_plugins_for_page, get_plugins
from django.utils.http import urlencode
from djangocms_googlemap.models import GoogleMap
from djangocms_inherit.cms_plugins import InheritPagePlaceholderPlugin
from djangocms_file.models import File
from djangocms_inherit.models import InheritPagePlaceholder
from djangocms_link.forms import LinkForm
from djangocms_link.models import Link
from djangocms_picture.models import Picture
from djangocms_text_ckeditor.models import Text
from djangocms_text_ckeditor.utils import plugin_to_tag
@contextmanager
def register_plugins(*plugins):
for plugin in plugins:
plugin_pool.register_plugin(plugin)
try:
yield
finally:
for plugin in plugins:
plugin_pool.unregister_plugin(plugin)
class DumbFixturePlugin(CMSPluginBase):
model = CMSPlugin
name = "Dumb Test Plugin. It does nothing."
render_template = ""
admin_preview = False
render_plugin = False
def render(self, context, instance, placeholder):
return context
class DumbFixturePluginWithUrls(DumbFixturePlugin):
name = DumbFixturePlugin.name + " With custom URLs."
render_plugin = False
def _test_view(self, request):
return http.HttpResponse("It works")
def get_plugin_urls(self):
return [
url(r'^testview/$', admin.site.admin_view(self._test_view), name='dumbfixtureplugin'),
]
plugin_pool.register_plugin(DumbFixturePluginWithUrls)
class PluginsTestBaseCase(CMSTestCase):
def setUp(self):
self.super_user = self._create_user("test", True, True)
self.slave = self._create_user("slave", True)
self.FIRST_LANG = settings.LANGUAGES[0][0]
self.SECOND_LANG = settings.LANGUAGES[1][0]
self._login_context = self.login_user_context(self.super_user)
self._login_context.__enter__()
def tearDown(self):
self._login_context.__exit__(None, None, None)
def approve_page(self, page):
response = self.client.get(URL_CMS_PAGE + "%d/approve/" % page.pk)
self.assertRedirects(response, URL_CMS_PAGE)
# reload page
return self.reload_page(page)
def get_request(self, *args, **kwargs):
request = super(PluginsTestBaseCase, self).get_request(*args, **kwargs)
request.placeholder_media = Media()
request.toolbar = CMSToolbar(request)
return request
def get_response_pk(self, response):
return int(response.content.decode('utf8').split("/edit-plugin/")[1].split("/")[0])
def get_placeholder(self):
return Placeholder.objects.create(slot='test')
class PluginsTestCase(PluginsTestBaseCase):
def _create_text_plugin_on_page(self, page):
plugin = api.add_plugin(
placeholder=page.placeholders.get(slot="body"),
plugin_type='TextPlugin',
language=settings.LANGUAGES[0][0],
body=''
)
return plugin.pk
def _edit_text_plugin(self, plugin_id, text):
edit_url = "%s%s/" % (URL_CMS_PLUGIN_EDIT, plugin_id)
response = self.client.get(edit_url)
self.assertEqual(response.status_code, 200)
data = {
"body": text
}
response = self.client.post(edit_url, data)
self.assertEqual(response.status_code, 200)
txt = Text.objects.get(pk=plugin_id)
return txt
def test_add_edit_plugin(self):
"""
Test that you can add a text plugin
"""
# add a new text plugin
page_data = self.get_new_page_data()
self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
created_plugin_id = self._create_text_plugin_on_page(page)
# now edit the plugin
txt = self._edit_text_plugin(created_plugin_id, "Hello World")
self.assertEqual("Hello World", txt.body)
def test_plugin_add_form_integrity(self):
admin.autodiscover()
admin_instance = admin.site._registry[ArticlePluginModel]
placeholder = self.get_placeholder()
url = URL_CMS_PLUGIN_ADD + '?' + urlencode({
'plugin_type': "ArticlePlugin",
'plugin_language': settings.LANGUAGES[0][0],
'placeholder_id': placeholder.pk,
})
superuser = self.get_superuser()
plugin = plugin_pool.get_plugin('ArticlePlugin')
with self.login_user_context(superuser):
request = self.get_request(url)
PluginFormClass = plugin(
model=plugin.model,
admin_site=admin.site,
).get_form(request)
plugin_fields = list(PluginFormClass.base_fields.keys())
OriginalFormClass = admin_instance.get_form(request)
original_fields = list(OriginalFormClass.base_fields.keys())
# Assert both forms have the same fields
self.assertEqual(plugin_fields, original_fields)
# Now assert the plugin form has the related field wrapper
# widget on the sections field.
self.assertIsInstance(
PluginFormClass.base_fields['sections'].widget,
RelatedFieldWidgetWrapper,
)
# Now assert the admin form has the related field wrapper
# widget on the sections field.
self.assertIsInstance(
OriginalFormClass.base_fields['sections'].widget,
RelatedFieldWidgetWrapper,
)
# Now assert the plugin form has the filtered select multiple
# widget wrapped by the related field wrapper
self.assertIsInstance(
PluginFormClass.base_fields['sections'].widget.widget,
FilteredSelectMultiple,
)
# Now assert the admin form has the filtered select multiple
# widget wrapped by the related field wrapper
self.assertIsInstance(
OriginalFormClass.base_fields['sections'].widget.widget,
FilteredSelectMultiple,
)
def test_excluded_plugin(self):
"""
Test that you can't add a text plugin
"""
CMS_PLACEHOLDER_CONF = {
'body': {
'excluded_plugins': ['TextPlugin']
}
}
# try to add a new text plugin
with self.settings(CMS_PLACEHOLDER_CONF=CMS_PLACEHOLDER_CONF):
page_data = self.get_new_page_data()
self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
installed_plugins = plugin_pool.get_all_plugins('body', page)
installed_plugins = [cls.__name__ for cls in installed_plugins]
self.assertNotIn('TextPlugin', installed_plugins)
CMS_PLACEHOLDER_CONF = {
'body': {
'plugins': ['TextPlugin'],
'excluded_plugins': ['TextPlugin']
}
}
# try to add a new text plugin
with self.settings(CMS_PLACEHOLDER_CONF=CMS_PLACEHOLDER_CONF):
page_data = self.get_new_page_data()
self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
installed_plugins = plugin_pool.get_all_plugins('body', page)
installed_plugins = [cls.__name__ for cls in installed_plugins]
self.assertNotIn('TextPlugin', installed_plugins)
def test_plugin_edit_marks_page_dirty(self):
page_data = self.get_new_page_data()
response = self.client.post(URL_CMS_PAGE_ADD, page_data)
self.assertEqual(response.status_code, 302)
page = Page.objects.all()[0]
response = self.client.post(URL_CMS_PAGE_PUBLISH % (page.pk, 'en'))
self.assertEqual(response.status_code, 302)
created_plugin_id = self._create_text_plugin_on_page(page)
page = Page.objects.all()[0]
self.assertEqual(page.is_dirty('en'), True)
response = self.client.post(URL_CMS_PAGE_PUBLISH % (page.pk, 'en'))
self.assertEqual(response.status_code, 302)
page = Page.objects.all()[0]
self.assertEqual(page.is_dirty('en'), False)
self._edit_text_plugin(created_plugin_id, "Hello World")
page = Page.objects.all()[0]
self.assertEqual(page.is_dirty('en'), True)
def test_plugin_order(self):
"""
Test that plugin position is saved after creation
"""
page_en = api.create_page("PluginOrderPage", "col_two.html", "en",
slug="page1", published=True, in_navigation=True)
ph_en = page_en.placeholders.get(slot="col_left")
# We check created objects and objects from the DB to be sure the position value
# has been saved correctly
text_plugin_1 = api.add_plugin(ph_en, "TextPlugin", "en", body="I'm the first")
text_plugin_2 = api.add_plugin(ph_en, "TextPlugin", "en", body="I'm the second")
db_plugin_1 = CMSPlugin.objects.get(pk=text_plugin_1.pk)
db_plugin_2 = CMSPlugin.objects.get(pk=text_plugin_2.pk)
with self.settings(CMS_PERMISSION=False):
self.assertEqual(text_plugin_1.position, 0)
self.assertEqual(db_plugin_1.position, 0)
self.assertEqual(text_plugin_2.position, 1)
self.assertEqual(db_plugin_2.position, 1)
## Finally we render the placeholder to test the actual content
rendered_placeholder = ph_en.render(self.get_context(page_en.get_absolute_url(), page=page_en), None)
self.assertEqual(rendered_placeholder, "I'm the firstI'm the second")
def test_plugin_order_alt(self):
"""
Test that plugin position is saved after creation
"""
draft_page = api.create_page("PluginOrderPage", "col_two.html", "en",
slug="page1", published=False, in_navigation=True)
placeholder = draft_page.placeholders.get(slot="col_left")
# We check created objects and objects from the DB to be sure the position value
# has been saved correctly
text_plugin_2 = api.add_plugin(placeholder, "TextPlugin", "en", body="I'm the second")
text_plugin_3 = api.add_plugin(placeholder, "TextPlugin", "en", body="I'm the third")
# Publish to create a 'live' version
draft_page.publish('en')
draft_page = draft_page.reload()
placeholder = draft_page.placeholders.get(slot="col_left")
# Add a plugin and move it to the first position
text_plugin_1 = api.add_plugin(placeholder, "TextPlugin", "en", body="I'm the first")
data = {
'placeholder_id': placeholder.id,
'plugin_id': text_plugin_1.id,
'plugin_parent': '',
'plugin_language': 'en',
'plugin_order[]': [text_plugin_1.id, text_plugin_2.id, text_plugin_3.id],
}
self.client.post(URL_CMS_PLUGIN_MOVE, data)
draft_page.publish('en')
draft_page = draft_page.reload()
live_page = draft_page.get_public_object()
placeholder = draft_page.placeholders.get(slot="col_left")
live_placeholder = live_page.placeholders.get(slot="col_left")
with self.settings(CMS_PERMISSION=False):
self.assertEqual(CMSPlugin.objects.get(pk=text_plugin_1.pk).position, 0)
self.assertEqual(CMSPlugin.objects.get(pk=text_plugin_2.pk).position, 1)
self.assertEqual(CMSPlugin.objects.get(pk=text_plugin_3.pk).position, 2)
## Finally we render the placeholder to test the actual content
rendered_placeholder = placeholder.render(self.get_context(draft_page.get_absolute_url(), page=draft_page), None)
self.assertEqual(rendered_placeholder, "I'm the firstI'm the secondI'm the third")
rendered_live_placeholder = live_placeholder.render(self.get_context(live_page.get_absolute_url(), page=live_page), None)
self.assertEqual(rendered_live_placeholder, "I'm the firstI'm the secondI'm the third")
columns = api.add_plugin(placeholder, "MultiColumnPlugin", "en")
column = api.add_plugin(
placeholder,
"ColumnPlugin",
"en",
target=columns,
width='10%',
)
data = {
'placeholder_id': placeholder.id,
'plugin_id': text_plugin_1.id,
'plugin_parent': '',
'plugin_language': 'en',
'plugin_order[]': [
text_plugin_1.id,
text_plugin_2.id,
text_plugin_3.id,
columns.id,
column.id,
],
}
response = self.client.post(URL_CMS_PLUGIN_MOVE, data)
self.assertEqual(response.status_code, 400)
self.assertContains(
response,
'order parameter references plugins in different trees',
status_code=400,
)
def test_plugin_breadcrumbs(self):
"""
Test the plugin breadcrumbs order
"""
draft_page = api.create_page("home", "col_two.html", "en",
slug="page1", published=False, in_navigation=True)
placeholder = draft_page.placeholders.get(slot="col_left")
columns = api.add_plugin(placeholder, "MultiColumnPlugin", "en")
column = api.add_plugin(placeholder, "ColumnPlugin", "en", target=columns, width='10%')
text_plugin = api.add_plugin(placeholder, "TextPlugin", "en", target=column, body="I'm the second")
text_breadcrumbs = text_plugin.get_breadcrumb()
self.assertEqual(len(columns.get_breadcrumb()), 1)
self.assertEqual(len(column.get_breadcrumb()), 2)
self.assertEqual(len(text_breadcrumbs), 3)
self.assertTrue(text_breadcrumbs[0]['title'], columns.get_plugin_class().name)
self.assertTrue(text_breadcrumbs[1]['title'], column.get_plugin_class().name)
self.assertTrue(text_breadcrumbs[2]['title'], text_plugin.get_plugin_class().name)
self.assertTrue('/edit-plugin/%s/'% columns.pk in text_breadcrumbs[0]['url'])
self.assertTrue('/edit-plugin/%s/'% column.pk, text_breadcrumbs[1]['url'])
self.assertTrue('/edit-plugin/%s/'% text_plugin.pk, text_breadcrumbs[2]['url'])
def test_extract_images_from_text(self):
img_path = os.path.join(os.path.dirname(__file__), 'data', 'image.jpg')
with open(img_path, 'rb') as fobj:
img_data = base64.b64encode(fobj.read()).decode('utf-8')
body = """<p>
<img alt='' src='data:image/jpeg;base64,{data}' />
</p>""".format(data=img_data)
page = api.create_page(
title='test page',
template='nav_playground.html',
language=settings.LANGUAGES[0][0],
)
plugin = api.add_plugin(
page.placeholders.get(slot="body"),
plugin_type='TextPlugin',
language=settings.LANGUAGES[0][0],
body=body,
)
self.assertEqual(plugin.get_children().count(), 1)
def test_add_text_plugin_empty_tag(self):
"""
Test that you can add a text plugin
"""
# add a new text plugin
page = api.create_page(
title='test page',
template='nav_playground.html',
language=settings.LANGUAGES[0][0],
)
plugin = api.add_plugin(
placeholder=page.placeholders.get(slot='body'),
plugin_type='TextPlugin',
language=settings.LANGUAGES[0][0],
body='<div class="someclass"></div><p>foo</p>'
)
self.assertEqual(plugin.body, '<div class="someclass"></div><p>foo</p>')
def test_add_text_plugin_html_sanitizer(self):
"""
Test that you can add a text plugin
"""
# add a new text plugin
page = api.create_page(
title='test page',
template='nav_playground.html',
language=settings.LANGUAGES[0][0],
)
plugin = api.add_plugin(
placeholder=page.placeholders.get(slot='body'),
plugin_type='TextPlugin',
language=settings.LANGUAGES[0][0],
body='<script>var bar="hacked"</script>'
)
self.assertEqual(
plugin.body,
'<script>var bar="hacked"</script>'
)
def test_copy_plugins_method(self):
"""
Test that CMSPlugin copy does not have side effects
"""
# create some objects
page_en = api.create_page("CopyPluginTestPage (EN)", "nav_playground.html", "en")
page_de = api.create_page("CopyPluginTestPage (DE)", "nav_playground.html", "de")
ph_en = page_en.placeholders.get(slot="body")
ph_de = page_de.placeholders.get(slot="body")
# add the text plugin
text_plugin_en = api.add_plugin(ph_en, "TextPlugin", "en", body="Hello World")
self.assertEqual(text_plugin_en.pk, CMSPlugin.objects.all()[0].pk)
# add a *nested* link plugin
link_plugin_en = api.add_plugin(ph_en, "LinkPlugin", "en", target=text_plugin_en,
name="A Link", url="https://www.django-cms.org")
#
text_plugin_en.body += plugin_to_tag(link_plugin_en)
text_plugin_en.save()
# the call above to add a child makes a plugin reload required here.
text_plugin_en = self.reload(text_plugin_en)
# setup the plugins to copy
plugins = [text_plugin_en, link_plugin_en]
# save the old ids for check
old_ids = [plugin.pk for plugin in plugins]
new_plugins = []
plugins_ziplist = []
old_parent_cache = {}
# This is a stripped down version of cms.copy_plugins.copy_plugins_to
# to low-level testing the copy process
for plugin in plugins:
new_plugins.append(plugin.copy_plugin(ph_de, 'de', old_parent_cache))
plugins_ziplist.append((new_plugins[-1], plugin))
for idx, plugin in enumerate(plugins):
inst, _ = new_plugins[idx].get_plugin_instance()
new_plugins[idx] = inst
new_plugins[idx].post_copy(plugin, plugins_ziplist)
for idx, plugin in enumerate(plugins):
# original plugin instance reference should stay unmodified
self.assertEqual(old_ids[idx], plugin.pk)
# new plugin instance should be different from the original
self.assertNotEqual(new_plugins[idx], plugin.pk)
# text plugins (both old and new) should contain a reference
# to the link plugins
if plugin.plugin_type == 'TextPlugin':
self.assertTrue('Link - A Link' in plugin.body)
self.assertTrue('id="%s"' % plugin.get_children()[0].pk in plugin.body)
self.assertTrue('Link - A Link' in new_plugins[idx].body)
self.assertTrue('id="%s"' % new_plugins[idx].get_children()[0].pk in new_plugins[idx].body)
def test_plugin_position(self):
page_en = api.create_page("CopyPluginTestPage (EN)", "nav_playground.html", "en")
placeholder = page_en.placeholders.get(slot="body") # ID 2
placeholder_right = page_en.placeholders.get(slot="right-column")
columns = api.add_plugin(placeholder, "MultiColumnPlugin", "en") # ID 1
column_1 = api.add_plugin(placeholder, "ColumnPlugin", "en", target=columns, width='10%') # ID 2
column_2 = api.add_plugin(placeholder, "ColumnPlugin", "en", target=columns, width='30%') # ID 3
first_text_plugin = api.add_plugin(placeholder, "TextPlugin", "en", target=column_1, body="I'm the first") # ID 4
text_plugin = api.add_plugin(placeholder, "TextPlugin", "en", target=column_1, body="I'm the second") # ID 5
returned_1 = copy_plugins_to([text_plugin], placeholder, 'en', column_1.pk) # ID 6
returned_2 = copy_plugins_to([text_plugin], placeholder_right, 'en') # ID 7
returned_3 = copy_plugins_to([text_plugin], placeholder, 'en', column_2.pk) # ID 8
# STATE AT THIS POINT:
# placeholder
# - columns
# - column_1
# - text_plugin "I'm the first" created here
# - text_plugin "I'm the second" created here
# - text_plugin "I'm the second" (returned_1) copied here
# - column_2
# - text_plugin "I'm the second" (returned_3) copied here
# placeholder_right
# - text_plugin "I'm the second" (returned_2) copied here
# First plugin in the plugin branch
self.assertEqual(first_text_plugin.position, 0)
# Second plugin in the plugin branch
self.assertEqual(text_plugin.position, 1)
# Added as third plugin in the same branch as the above
self.assertEqual(returned_1[0][0].position, 2)
# First plugin in a placeholder
self.assertEqual(returned_2[0][0].position, 0)
# First plugin nested in a plugin
self.assertEqual(returned_3[0][0].position, 0)
def test_copy_plugins(self):
"""
Test that copying plugins works as expected.
"""
# create some objects
page_en = api.create_page("CopyPluginTestPage (EN)", "nav_playground.html", "en")
page_de = api.create_page("CopyPluginTestPage (DE)", "nav_playground.html", "de")
ph_en = page_en.placeholders.get(slot="body")
ph_de = page_de.placeholders.get(slot="body")
# add the text plugin
text_plugin_en = api.add_plugin(ph_en, "TextPlugin", "en", body="Hello World")
self.assertEqual(text_plugin_en.pk, CMSPlugin.objects.all()[0].pk)
# add a *nested* link plugin
link_plugin_en = api.add_plugin(ph_en, "LinkPlugin", "en", target=text_plugin_en,
name="A Link", url="https://www.django-cms.org")
# the call above to add a child makes a plugin reload required here.
text_plugin_en = self.reload(text_plugin_en)
# check the relations
self.assertEqual(text_plugin_en.get_children().count(), 1)
self.assertEqual(link_plugin_en.parent.pk, text_plugin_en.pk)
# just sanity check that so far everything went well
self.assertEqual(CMSPlugin.objects.count(), 2)
# copy the plugins to the german placeholder
copy_plugins_to(ph_en.get_plugins(), ph_de, 'de')
self.assertEqual(ph_de.cmsplugin_set.filter(parent=None).count(), 1)
text_plugin_de = ph_de.cmsplugin_set.get(parent=None).get_plugin_instance()[0]
self.assertEqual(text_plugin_de.get_children().count(), 1)
link_plugin_de = text_plugin_de.get_children().get().get_plugin_instance()[0]
# check we have twice as many plugins as before
self.assertEqual(CMSPlugin.objects.count(), 4)
# check language plugins
self.assertEqual(CMSPlugin.objects.filter(language='de').count(), 2)
self.assertEqual(CMSPlugin.objects.filter(language='en').count(), 2)
text_plugin_en = self.reload(text_plugin_en)
link_plugin_en = self.reload(link_plugin_en)
# check the relations in english didn't change
self.assertEqual(text_plugin_en.get_children().count(), 1)
self.assertEqual(link_plugin_en.parent.pk, text_plugin_en.pk)
self.assertEqual(link_plugin_de.name, link_plugin_en.name)
self.assertEqual(link_plugin_de.url, link_plugin_en.url)
self.assertEqual(text_plugin_de.body, text_plugin_en.body)
# test subplugin copy
copy_plugins_to([link_plugin_en], ph_de, 'de')
def test_deep_copy_plugins(self):
page_en = api.create_page("CopyPluginTestPage (EN)", "nav_playground.html", "en")
ph_en = page_en.placeholders.get(slot="body")
# Grid wrapper 1
mcol1_en = api.add_plugin(ph_en, "MultiColumnPlugin", "en", position="first-child")
# Grid column 1.1
col1_en = api.add_plugin(ph_en, "ColumnPlugin", "en", position="first-child", target=mcol1_en)
# Grid column 1.2
col2_en = api.add_plugin(ph_en, "ColumnPlugin", "en", position="first-child", target=mcol1_en)
# add a *nested* link plugin
link_plugin_en = api.add_plugin(
ph_en,
"LinkPlugin",
"en",
target=col2_en,
name="A Link",
url="https://www.django-cms.org"
)
old_plugins = [mcol1_en, col1_en, col2_en, link_plugin_en]
page_de = api.create_page("CopyPluginTestPage (DE)", "nav_playground.html", "de")
ph_de = page_de.placeholders.get(slot="body")
# Grid wrapper 1
mcol1_de = api.add_plugin(ph_de, "MultiColumnPlugin", "de", position="first-child")
# Grid column 1.1
col1_de = api.add_plugin(ph_de, "ColumnPlugin", "de", position="first-child", target=mcol1_de)
copy_plugins_to(
old_plugins=[mcol1_en, col1_en, col2_en, link_plugin_en],
to_placeholder=ph_de,
to_language='de',
parent_plugin_id=col1_de.pk,
)
col1_de = self.reload(col1_de)
new_plugins = col1_de.get_descendants().order_by('path')
self.assertEqual(new_plugins.count(), len(old_plugins))
for old_plugin, new_plugin in zip(old_plugins, new_plugins):
self.assertEqual(old_plugin.numchild, new_plugin.numchild)
with self.assertNumQueries(FuzzyInt(0, 207)):
page_en.publish('en')
def test_plugin_validation(self):
self.assertRaises(ImproperlyConfigured, plugin_pool.validate_templates, NonExisitngRenderTemplate)
self.assertRaises(ImproperlyConfigured, plugin_pool.validate_templates, NoRender)
self.assertRaises(ImproperlyConfigured, plugin_pool.validate_templates, NoRenderButChildren)
plugin_pool.validate_templates(DynTemplate)
def test_remove_plugin_before_published(self):
"""
When removing a draft plugin we would expect the public copy of the plugin to also be removed
"""
# add a page
page = api.create_page(
title='test page',
language=settings.LANGUAGES[0][0],
template='nav_playground.html'
)
plugin = api.add_plugin(
placeholder=page.placeholders.get(slot="body"),
language='en',
plugin_type='TextPlugin',
body=''
)
# there should be only 1 plugin
self.assertEqual(CMSPlugin.objects.all().count(), 1)
# delete the plugin
plugin_data = {
'plugin_id': plugin.pk
}
remove_url = URL_CMS_PLUGIN_REMOVE + "%s/" % plugin.pk
response = self.client.post(remove_url, plugin_data)
self.assertEqual(response.status_code, 302)
# there should be no plugins
self.assertEqual(0, CMSPlugin.objects.all().count())
def test_remove_plugin_after_published(self):
# add a page
page = api.create_page("home", "nav_playground.html", "en")
# add a plugin
plugin = api.add_plugin(
placeholder=page.placeholders.get(slot='body'),
plugin_type='TextPlugin',
language=settings.LANGUAGES[0][0],
body=''
)
# there should be only 1 plugin
self.assertEqual(CMSPlugin.objects.all().count(), 1)
self.assertEqual(CMSPlugin.objects.filter(placeholder__page__publisher_is_draft=True).count(), 1)
# publish page
response = self.client.post(URL_CMS_PAGE + "%d/en/publish/" % page.pk, {1: 1})
self.assertEqual(response.status_code, 302)
self.assertEqual(Page.objects.count(), 2)
# there should now be two plugins - 1 draft, 1 public
self.assertEqual(CMSPlugin.objects.all().count(), 2)
# delete the plugin
plugin_data = {
'plugin_id': plugin.pk
}
remove_url = URL_CMS_PLUGIN_REMOVE + "%s/" % plugin.pk
response = self.client.post(remove_url, plugin_data)
self.assertEqual(response.status_code, 302)
# there should be no plugins
self.assertEqual(CMSPlugin.objects.all().count(), 1)
self.assertEqual(CMSPlugin.objects.filter(placeholder__page__publisher_is_draft=False).count(), 1)
def test_remove_plugin_not_associated_to_page(self):
"""
Test case for PlaceholderField
"""
page = api.create_page(
title='test page',
template='nav_playground.html',
language='en'
)
# add a plugin
plugin = api.add_plugin(
placeholder=page.placeholders.get(slot='body'),
plugin_type='TextPlugin',
language=settings.LANGUAGES[0][0],
body=''
)
# there should be only 1 plugin
self.assertEqual(CMSPlugin.objects.all().count(), 1)
ph = Placeholder(slot="subplugin")
ph.save()
url = URL_CMS_PLUGIN_ADD + '?' + urlencode({
'plugin_type': "TextPlugin",
'plugin_language': settings.LANGUAGES[0][0],
'placeholder': ph.pk,
'plugin_parent': plugin.pk
})
response = self.client.post(url, {'body': ''})
# no longer allowed for security reasons
self.assertEqual(response.status_code, 400)
def test_register_plugin_twice_should_raise(self):
number_of_plugins_before = len(plugin_pool.get_all_plugins())
# The first time we register the plugin is should work
with register_plugins(DumbFixturePlugin):
# Let's add it a second time. We should catch and exception
raised = False
try:
plugin_pool.register_plugin(DumbFixturePlugin)
except PluginAlreadyRegistered:
raised = True
self.assertTrue(raised)
# Let's make sure we have the same number of plugins as before:
number_of_plugins_after = len(plugin_pool.get_all_plugins())
self.assertEqual(number_of_plugins_before, number_of_plugins_after)
def test_unregister_non_existing_plugin_should_raise(self):
number_of_plugins_before = len(plugin_pool.get_all_plugins())
raised = False
try:
# There should not be such a plugin registered if the others tests
# don't leak plugins
plugin_pool.unregister_plugin(DumbFixturePlugin)
except PluginNotRegistered:
raised = True
self.assertTrue(raised)
# Let's count, to make sure we didn't remove a plugin accidentally.
number_of_plugins_after = len(plugin_pool.get_all_plugins())
self.assertEqual(number_of_plugins_before, number_of_plugins_after)
def test_inheritplugin_media(self):
"""
Test case for InheritPagePlaceholder
"""
inheritfrompage = api.create_page('page to inherit from',
'nav_playground.html',
'en')
body = inheritfrompage.placeholders.get(slot="body")
plugin = GoogleMap(
plugin_type='GoogleMapPlugin',
placeholder=body,
position=1,
language=settings.LANGUAGE_CODE,
address="Riedtlistrasse 16",
zipcode="8006",
city="Zurich",
)
plugin.add_root(instance=plugin)
inheritfrompage.publish('en')
page = api.create_page('inherit from page',
'nav_playground.html',
'en',
published=True)
inherited_body = page.placeholders.get(slot="body")
inherit_plugin = InheritPagePlaceholder(
plugin_type='InheritPagePlaceholderPlugin',
placeholder=inherited_body,
position=1,
language=settings.LANGUAGE_CODE,
from_page=inheritfrompage,
from_language=settings.LANGUAGE_CODE)
inherit_plugin.add_root(instance=inherit_plugin)
page.publish('en')
self.client.logout()
cache.clear()
# TODO: Replace this test using a Test Plugin, not an externally managed one.
# response = self.client.get(page.get_absolute_url())
# self.assertTrue(
# 'https://maps-api-ssl.google.com/maps/api/js' in response.content.decode('utf8').replace("&", "&"))
def test_inherit_plugin_with_empty_plugin(self):
inheritfrompage = api.create_page('page to inherit from',
'nav_playground.html',
'en', published=True)
body = inheritfrompage.placeholders.get(slot="body")
empty_plugin = CMSPlugin(
plugin_type='TextPlugin', # create an empty plugin
placeholder=body,
position=1,
language='en',
)
empty_plugin.add_root(instance=empty_plugin)
other_page = api.create_page('other page', 'nav_playground.html', 'en', published=True)
inherited_body = other_page.placeholders.get(slot="body")
api.add_plugin(inherited_body, InheritPagePlaceholderPlugin, 'en', position='last-child',
from_page=inheritfrompage, from_language='en')
api.add_plugin(inherited_body, "TextPlugin", "en", body="foobar")
# this should not fail, even if there in an empty plugin
rendered = inherited_body.render(context=self.get_context(other_page.get_absolute_url(), page=other_page), width=200)
self.assertIn("foobar", rendered)
def test_search_pages(self):
"""
Test search for pages
To be fully useful, this testcase needs to have the following different
Plugin configurations within the project:
* unaltered cmsplugin_ptr
* cmsplugin_ptr with related_name='+'
* cmsplugin_ptr with related_query_name='+'
* cmsplugin_ptr with related_query_name='whatever_foo'
* cmsplugin_ptr with related_name='whatever_bar'
* cmsplugin_ptr with related_query_name='whatever_foo' and related_name='whatever_bar'
Those plugins are in cms/test_utils/project/pluginapp/revdesc/models.py
"""
page = api.create_page("page", "nav_playground.html", "en")
placeholder = page.placeholders.get(slot='body')
text = Text(body="hello", language="en", placeholder=placeholder, plugin_type="TextPlugin", position=1)
text.save()
page.publish('en')
self.assertEqual(Page.objects.search("hi").count(), 0)
self.assertEqual(Page.objects.search("hello").count(), 1)
def test_empty_plugin_is_not_ignored(self):
page = api.create_page("page", "nav_playground.html", "en")
placeholder = page.placeholders.get(slot='body')
plugin = CMSPlugin(
plugin_type='TextPlugin',
placeholder=placeholder,
position=1,
language=self.FIRST_LANG)
plugin.add_root(instance=plugin)
# this should not raise any errors, but just ignore the empty plugin
out = placeholder.render(self.get_context(), width=300)
self.assertFalse(len(out))
self.assertTrue(len(placeholder._plugins_cache))
def test_pickle(self):
page = api.create_page("page", "nav_playground.html", "en")
placeholder = page.placeholders.get(slot='body')
text_plugin = api.add_plugin(
placeholder,
"TextPlugin",
'en',
body="Hello World",
)
cms_plugin = text_plugin.cmsplugin_ptr
# assert we can pickle and unpickle a solid plugin (subclass)
self.assertEqual(text_plugin, pickle.loads(pickle.dumps(text_plugin)))
# assert we can pickle and unpickle a cms plugin (parent)
self.assertEqual(cms_plugin, pickle.loads(pickle.dumps(cms_plugin)))
def test_defer_pickle(self):
page = api.create_page("page", "nav_playground.html", "en")
placeholder = page.placeholders.get(slot='body')
api.add_plugin(placeholder, "TextPlugin", 'en', body="Hello World")
plugins = Text.objects.all().defer('path')
import io
a = io.BytesIO()
pickle.dump(plugins[0], a)
def test_empty_plugin_description(self):
page = api.create_page("page", "nav_playground.html", "en")
placeholder = page.placeholders.get(slot='body')
a = CMSPlugin(
plugin_type='TextPlugin',
placeholder=placeholder,
position=1,
language=self.FIRST_LANG
)
self.assertEqual(a.get_short_description(), "<Empty>")
def test_page_attribute_warns(self):
page = api.create_page("page", "nav_playground.html", "en")
placeholder = page.placeholders.get(slot='body')
a = CMSPlugin(
plugin_type='TextPlugin',
placeholder=placeholder,
position=1,
language=self.FIRST_LANG
)
a.save()
def get_page(plugin):
return plugin.page
self.assertWarns(
DontUsePageAttributeWarning,
"Don't use the page attribute on CMSPlugins! CMSPlugins are not guaranteed to have a page associated with them!",
get_page, a
)
def test_set_translatable_content(self):
a = Text(body="hello")
self.assertTrue(a.set_translatable_content({'body': 'world'}))
b = Link(name="hello")
self.assertTrue(b.set_translatable_content({'name': 'world'}))
def test_editing_plugin_changes_page_modification_time_in_sitemap(self):
now = timezone.now()
one_day_ago = now - datetime.timedelta(days=1)
page = api.create_page("page", "nav_playground.html", "en", published=True)
title = page.get_title_obj('en')
page.creation_date = one_day_ago
page.changed_date = one_day_ago
plugin_id = self._create_text_plugin_on_page(page)
plugin = self._edit_text_plugin(plugin_id, "fnord")
actual_last_modification_time = CMSSitemap().lastmod(title)
actual_last_modification_time -= datetime.timedelta(microseconds=actual_last_modification_time.microsecond)
self.assertEqual(plugin.changed_date.date(), actual_last_modification_time.date())
def test_moving_plugin_to_different_placeholder(self):
with register_plugins(DumbFixturePlugin):
page = api.create_page(
"page",
"nav_playground.html",
"en"
)
plugin = api.add_plugin(
placeholder=page.placeholders.get(slot='body'),
plugin_type='DumbFixturePlugin',
language=settings.LANGUAGES[0][0]
)
child_plugin = api.add_plugin(
placeholder=page.placeholders.get(slot='body'),
plugin_type='DumbFixturePlugin',
language=settings.LANGUAGES[0][0],
parent=plugin
)
post = {
'plugin_id': child_plugin.pk,
'placeholder_id': page.placeholders.get(slot='right-column').pk,
'plugin_parent': '',
}
response = self.client.post(URL_CMS_PLUGIN_MOVE, post)
self.assertEqual(response.status_code, 200)
from cms.utils.plugins import build_plugin_tree
build_plugin_tree(page.placeholders.get(slot='right-column').get_plugins_list())
def test_get_plugins_for_page(self):
page_en = api.create_page("PluginOrderPage", "col_two.html", "en",
slug="page1", published=True, in_navigation=True)
ph_en = page_en.placeholders.get(slot="col_left")
text_plugin_1 = api.add_plugin(ph_en, "TextPlugin", "en", body="I'm inside an existing placeholder.")
# This placeholder is not in the template.
ph_en_not_used = page_en.placeholders.create(slot="not_used")
text_plugin_2 = api.add_plugin(ph_en_not_used, "TextPlugin", "en", body="I'm inside a non-existent placeholder.")
page_plugins = get_plugins_for_page(None, page_en, page_en.get_title_obj_attribute('language'))
db_text_plugin_1 = page_plugins.get(pk=text_plugin_1.pk)
self.assertRaises(CMSPlugin.DoesNotExist, page_plugins.get, pk=text_plugin_2.pk)
self.assertEqual(db_text_plugin_1.pk, text_plugin_1.pk)
def test_plugin_move_with_reload(self):
action_options = {
PLUGIN_MOVE_ACTION: {
'requires_reload': True
},
PLUGIN_COPY_ACTION: {
'requires_reload': True
},
}
non_reload_action_options = {
PLUGIN_MOVE_ACTION: {
'requires_reload': False
},
PLUGIN_COPY_ACTION: {
'requires_reload': False
},
}
ReloadDrivenPlugin = type('ReloadDrivenPlugin', (CMSPluginBase,), dict(action_options=action_options, render_plugin=False))
NonReloadDrivenPlugin = type('NonReloadDrivenPlugin', (CMSPluginBase,), dict(action_options=non_reload_action_options, render_plugin=False))
with register_plugins(ReloadDrivenPlugin, NonReloadDrivenPlugin):
page = api.create_page("page", "nav_playground.html", "en", published=True)
source_placeholder = page.placeholders.get(slot='body')
target_placeholder = page.placeholders.get(slot='right-column')
plugin_1 = api.add_plugin(source_placeholder, ReloadDrivenPlugin, settings.LANGUAGES[0][0])
plugin_2 = api.add_plugin(source_placeholder, NonReloadDrivenPlugin, settings.LANGUAGES[0][0])
with force_language('en'):
plugin_1_action_urls = plugin_1.get_action_urls()
reload_expected = {
'reload': True,
'urls': plugin_1_action_urls,
}
# Test Plugin reload == True on Move
post = {
'plugin_id': plugin_1.pk,
'placeholder_id': target_placeholder.pk,
'plugin_parent': '',
}
response = self.client.post(URL_CMS_PLUGIN_MOVE, post)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content.decode('utf8')), reload_expected)
with force_language('en'):
plugin_2_action_urls = plugin_2.get_action_urls()
no_reload_expected = {
'reload': False,
'urls': plugin_2_action_urls,
}
# Test Plugin reload == False on Move
post = {
'plugin_id': plugin_2.pk,
'placeholder_id': target_placeholder.pk,
'plugin_parent': '',
}
response = self.client.post(URL_CMS_PLUGIN_MOVE, post)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content.decode('utf8')), no_reload_expected)
def test_plugin_copy_with_reload(self):
action_options = {
PLUGIN_MOVE_ACTION: {
'requires_reload': True
},
PLUGIN_COPY_ACTION: {
'requires_reload': True
},
}
non_reload_action_options = {
PLUGIN_MOVE_ACTION: {
'requires_reload': False
},
PLUGIN_COPY_ACTION: {
'requires_reload': False
},
}
ReloadDrivenPlugin = type('ReloadDrivenPlugin', (CMSPluginBase,), dict(action_options=action_options, render_plugin=False))
NonReloadDrivenPlugin = type('NonReloadDrivenPlugin', (CMSPluginBase,), dict(action_options=non_reload_action_options, render_plugin=False))
with register_plugins(ReloadDrivenPlugin, NonReloadDrivenPlugin):
page = api.create_page("page", "nav_playground.html", "en", published=True)
source_placeholder = page.placeholders.get(slot='body')
target_placeholder = page.placeholders.get(slot='right-column')
api.add_plugin(source_placeholder, ReloadDrivenPlugin, settings.LANGUAGES[0][0])
plugin_2 = api.add_plugin(source_placeholder, NonReloadDrivenPlugin, settings.LANGUAGES[0][0])
# Test Plugin reload == True on Copy
copy_data = {
'source_placeholder_id': source_placeholder.pk,
'target_placeholder_id': target_placeholder.pk,
'target_language': settings.LANGUAGES[0][0],
'source_language': settings.LANGUAGES[0][0],
}
response = self.client.post(URL_CMS_PAGE + "copy-plugins/", copy_data)
self.assertEqual(response.status_code, 200)
json_response = json.loads(response.content.decode('utf8'))
self.assertEqual(json_response['reload'], True)
# Test Plugin reload == False on Copy
copy_data = {
'source_placeholder_id': source_placeholder.pk,
'source_plugin_id': plugin_2.pk,
'target_placeholder_id': target_placeholder.pk,
'target_language': settings.LANGUAGES[0][0],
'source_language': settings.LANGUAGES[0][0],
}
response = self.client.post(URL_CMS_PAGE + "copy-plugins/", copy_data)
self.assertEqual(response.status_code, 200)
json_response = json.loads(response.content.decode('utf8'))
self.assertEqual(json_response['reload'], False)
def test_custom_plugin_urls(self):
plugin_url = urlresolvers.reverse('admin:dumbfixtureplugin')
response = self.client.get(plugin_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"It works")
def test_plugin_require_parent(self):
"""
Assert that a plugin marked as 'require_parent' is not listed
in the plugin pool when a placeholder is specified
"""
ParentRequiredPlugin = type('ParentRequiredPlugin', (CMSPluginBase,),
dict(require_parent=True, render_plugin=False))
with register_plugins(ParentRequiredPlugin):
page = api.create_page("page", "nav_playground.html", "en", published=True)
placeholder = page.placeholders.get(slot='body')
plugin_list = plugin_pool.get_all_plugins(placeholder=placeholder, page=page)
self.assertFalse(ParentRequiredPlugin in plugin_list)
def test_plugin_toolbar_struct(self):
# Tests that the output of the plugin toolbar structure.
GenericParentPlugin = type('GenericParentPlugin', (CMSPluginBase,), {'render_plugin':False})
with register_plugins(GenericParentPlugin):
page = api.create_page("page", "nav_playground.html", "en", published=True)
placeholder = page.placeholders.get(slot='body')
from cms.utils.placeholder import get_toolbar_plugin_struct
expected_struct = {'module': u'Generic',
'name': u'Parent Classes Plugin',
'value': 'ParentClassesPlugin'}
toolbar_struct = get_toolbar_plugin_struct([GenericParentPlugin],
placeholder.slot,
page,)
self.assertFalse(expected_struct in toolbar_struct)
def test_plugin_child_classes_from_settings(self):
page = api.create_page("page", "nav_playground.html", "en", published=True)
placeholder = page.placeholders.get(slot='body')
ChildClassesPlugin = type('ChildClassesPlugin', (CMSPluginBase,),
dict(child_classes=['TextPlugin'], render_template='allow_children_plugin.html'))
with register_plugins(ChildClassesPlugin):
plugin = api.add_plugin(placeholder, ChildClassesPlugin, settings.LANGUAGES[0][0])
plugin = plugin.get_plugin_class_instance()
## assert baseline
self.assertEqual(['TextPlugin'], plugin.get_child_classes(placeholder.slot, page))
CMS_PLACEHOLDER_CONF = {
'body': {
'child_classes': {
'ChildClassesPlugin': ['LinkPlugin', 'PicturePlugin'],
}
}
}
with self.settings(CMS_PLACEHOLDER_CONF=CMS_PLACEHOLDER_CONF):
self.assertEqual(['LinkPlugin', 'PicturePlugin'],
plugin.get_child_classes(placeholder.slot, page))
def test_plugin_parent_classes_from_settings(self):
page = api.create_page("page", "nav_playground.html", "en", published=True)
placeholder = page.placeholders.get(slot='body')
ParentClassesPlugin = type('ParentClassesPlugin', (CMSPluginBase,),
dict(parent_classes=['TextPlugin'], render_plugin=False))
with register_plugins(ParentClassesPlugin):
plugin = api.add_plugin(placeholder, ParentClassesPlugin, settings.LANGUAGES[0][0])
plugin = plugin.get_plugin_class_instance()
## assert baseline
self.assertEqual(['TextPlugin'], plugin.get_parent_classes(placeholder.slot, page))
CMS_PLACEHOLDER_CONF = {
'body': {
'parent_classes': {
'ParentClassesPlugin': ['TestPlugin'],
}
}
}
with self.settings(CMS_PLACEHOLDER_CONF=CMS_PLACEHOLDER_CONF):
self.assertEqual(['TestPlugin'],
plugin.get_parent_classes(placeholder.slot, page))
def test_plugin_parent_classes_from_object(self):
page = api.create_page("page", "nav_playground.html", "en", published=True)
placeholder = page.placeholders.get(slot='body')
ParentPlugin = type('ParentPlugin', (CMSPluginBase,),
dict(render_plugin=False))
ChildPlugin = type('ChildPlugin', (CMSPluginBase,),
dict(parent_classes=['ParentPlugin'], render_plugin=False))
with register_plugins(ParentPlugin, ChildPlugin):
plugin = api.add_plugin(placeholder, ParentPlugin, settings.LANGUAGES[0][0])
plugin = plugin.get_plugin_class_instance()
## assert baseline
child_classes = plugin.get_child_classes(placeholder.slot, page)
self.assertIn('ChildPlugin', child_classes)
self.assertIn('ParentPlugin', child_classes)
def test_plugin_require_parent_from_object(self):
page = api.create_page("page", "nav_playground.html", "en", published=True)
placeholder = page.placeholders.get(slot='body')
ParentPlugin = type('ParentPlugin', (CMSPluginBase,),
dict(render_plugin=False))
ChildPlugin = type('ChildPlugin', (CMSPluginBase,),
dict(require_parent=True, render_plugin=False))
with register_plugins(ParentPlugin, ChildPlugin):
plugin = api.add_plugin(placeholder, ParentPlugin, settings.LANGUAGES[0][0])
plugin = plugin.get_plugin_class_instance()
## assert baseline
child_classes = plugin.get_child_classes(placeholder.slot, page)
self.assertIn('ChildPlugin', child_classes)
self.assertIn('ParentPlugin', child_classes)
def test_plugin_translatable_content_getter_setter(self):
"""
Test that you can add a text plugin
"""
# add a new text plugin
page_data = self.get_new_page_data()
self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
created_plugin_id = self._create_text_plugin_on_page(page)
# now edit the plugin
plugin = self._edit_text_plugin(created_plugin_id, "Hello World")
self.assertEqual("Hello World", plugin.body)
# see if the getter works
self.assertEqual({'body': "Hello World"}, plugin.get_translatable_content())
# change the content
self.assertEqual(True, plugin.set_translatable_content({'body': "It works!"}))
# check if it changed
self.assertEqual("It works!", plugin.body)
# double check through the getter
self.assertEqual({'body': "It works!"}, plugin.get_translatable_content())
def test_plugin_pool_register_returns_plugin_class(self):
@plugin_pool.register_plugin
class DecoratorTestPlugin(CMSPluginBase):
render_plugin = False
name = "Test Plugin"
self.assertIsNotNone(DecoratorTestPlugin)
class FileSystemPluginTests(PluginsTestBaseCase):
def setUp(self):
super(FileSystemPluginTests, self).setUp()
call_command('collectstatic', interactive=False, verbosity=0, link=True)
def tearDown(self):
for directory in [settings.STATIC_ROOT, settings.MEDIA_ROOT]:
for root, dirs, files in os.walk(directory, topdown=False):
# We need to walk() the directory tree since rmdir() does not allow
# to remove non-empty directories...
for name in files:
# Start by killing all files we walked
os.remove(os.path.join(root, name))
for name in dirs:
# Now all directories we walked...
os.rmdir(os.path.join(root, name))
super(FileSystemPluginTests, self).tearDown()
def test_fileplugin_icon_uppercase(self):
page = api.create_page('testpage', 'nav_playground.html', 'en')
body = page.placeholders.get(slot="body")
plugin = File(
plugin_type='FilePlugin',
placeholder=body,
position=1,
language=settings.LANGUAGE_CODE,
)
# This try/except block allows older and newer versions of the
# djangocms-file plugin to work here.
try:
plugin.file.save("UPPERCASE.JPG", SimpleUploadedFile(
"UPPERCASE.jpg", b"content"), False)
except ObjectDoesNotExist: # catches 'RelatedObjectDoesNotExist'
plugin.source.save("UPPERCASE.JPG", SimpleUploadedFile(
"UPPERCASE.jpg", b"content"), False)
plugin.add_root(instance=plugin)
self.assertNotEquals(plugin.get_icon_url().find('jpg'), -1)
class PluginManyToManyTestCase(PluginsTestBaseCase):
def setUp(self):
self.super_user = self._create_user("test", True, True)
self.slave = self._create_user("slave", True)
self._login_context = self.login_user_context(self.super_user)
self._login_context.__enter__()
# create 3 sections
self.sections = []
self.section_pks = []
for i in range(3):
section = Section.objects.create(name="section %s" % i)
self.sections.append(section)
self.section_pks.append(section.pk)
self.section_count = len(self.sections)
# create 10 articles by section
for section in self.sections:
for j in range(10):
Article.objects.create(
title="article %s" % j,
section=section
)
self.FIRST_LANG = settings.LANGUAGES[0][0]
self.SECOND_LANG = settings.LANGUAGES[1][0]
def test_dynamic_plugin_template(self):
page_en = api.create_page("CopyPluginTestPage (EN)", "nav_playground.html", "en")
ph_en = page_en.placeholders.get(slot="body")
api.add_plugin(ph_en, "ArticleDynamicTemplatePlugin", "en", title="a title")
api.add_plugin(ph_en, "ArticleDynamicTemplatePlugin", "en", title="custom template")
request = self.get_request(path=page_en.get_absolute_url())
plugins = get_plugins(request, ph_en, page_en.template)
for plugin in plugins:
if plugin.title == 'custom template':
self.assertEqual(plugin.get_plugin_class_instance().get_render_template({}, plugin, ph_en), 'articles_custom.html')
self.assertTrue('Articles Custom template' in plugin.render_plugin({}, ph_en))
else:
self.assertEqual(plugin.get_plugin_class_instance().get_render_template({}, plugin, ph_en), 'articles.html')
self.assertFalse('Articles Custom template' in plugin.render_plugin({}, ph_en))
def test_add_plugin_with_m2m(self):
# add a new text plugin
self.assertEqual(ArticlePluginModel.objects.count(), 0)
page_data = self.get_new_page_data()
self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
page.publish('en')
placeholder = page.placeholders.get(slot="body")
add_url = URL_CMS_PLUGIN_ADD + '?' + urlencode({
'plugin_type': "ArticlePlugin",
'plugin_language': self.FIRST_LANG,
'placeholder_id': placeholder.pk,
})
data = {
'title': "Articles Plugin 1",
"sections": self.section_pks
}
response = self.client.post(add_url, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(ArticlePluginModel.objects.count(), 1)
plugin = ArticlePluginModel.objects.all()[0]
self.assertEqual(self.section_count, plugin.sections.count())
response = self.client.get('/en/?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON'))
self.assertEqual(response.status_code, 200)
self.assertEqual(plugin.sections.through._meta.db_table, 'manytomany_rel_articlepluginmodel_sections')
def test_add_plugin_with_m2m_and_publisher(self):
self.assertEqual(ArticlePluginModel.objects.count(), 0)
page_data = self.get_new_page_data()
response = self.client.post(URL_CMS_PAGE_ADD, page_data)
self.assertEqual(response.status_code, 302)
page = Page.objects.all()[0]
placeholder = page.placeholders.get(slot="body")
# add a plugin
data = {
'title': "Articles Plugin 1",
'sections': self.section_pks
}
add_url = URL_CMS_PLUGIN_ADD + '?' + urlencode({
'plugin_type': "ArticlePlugin",
'plugin_language': self.FIRST_LANG,
'placeholder_id': placeholder.pk,
})
response = self.client.post(add_url, data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'admin/cms/page/plugin/confirm_form.html')
# there should be only 1 plugin
self.assertEqual(1, CMSPlugin.objects.all().count())
self.assertEqual(1, ArticlePluginModel.objects.count())
articles_plugin = ArticlePluginModel.objects.all()[0]
self.assertEqual(u'Articles Plugin 1', articles_plugin.title)
self.assertEqual(self.section_count, articles_plugin.sections.count())
# check publish box
api.publish_page(page, self.super_user, 'en')
# there should now be two plugins - 1 draft, 1 public
self.assertEqual(2, CMSPlugin.objects.all().count())
self.assertEqual(2, ArticlePluginModel.objects.all().count())
db_counts = [plugin.sections.count() for plugin in ArticlePluginModel.objects.all()]
expected = [self.section_count for i in range(len(db_counts))]
self.assertEqual(expected, db_counts)
def test_copy_plugin_with_m2m(self):
page = api.create_page("page", "nav_playground.html", "en")
placeholder = page.placeholders.get(slot='body')
plugin = ArticlePluginModel(
plugin_type='ArticlePlugin',
placeholder=placeholder,
position=1,
language=self.FIRST_LANG)
plugin.add_root(instance=plugin)
edit_url = URL_CMS_PLUGIN_EDIT + str(plugin.pk) + "/"
data = {
'title': "Articles Plugin 1",
"sections": self.section_pks
}
response = self.client.post(edit_url, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(ArticlePluginModel.objects.count(), 1)
self.assertEqual(ArticlePluginModel.objects.all()[0].sections.count(), self.section_count)
page_data = self.get_new_page_data()
#create 2nd language page
page_data.update({
'language': self.SECOND_LANG,
'title': "%s %s" % (page.get_title(), self.SECOND_LANG),
})
response = self.client.post(URL_CMS_PAGE_CHANGE % page.pk + "?language=%s" % self.SECOND_LANG, page_data)
self.assertRedirects(response, URL_CMS_PAGE + "?language=%s" % self.SECOND_LANG)
self.assertEqual(CMSPlugin.objects.filter(language=self.FIRST_LANG).count(), 1)
self.assertEqual(CMSPlugin.objects.filter(language=self.SECOND_LANG).count(), 0)
self.assertEqual(CMSPlugin.objects.count(), 1)
self.assertEqual(Page.objects.all().count(), 1)
copy_data = {
'source_placeholder_id': placeholder.pk,
'target_placeholder_id': placeholder.pk,
'target_language': self.SECOND_LANG,
'source_language': self.FIRST_LANG,
}
response = self.client.post(URL_CMS_PLUGINS_COPY, copy_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content.decode('utf8').count('"position":'), 1)
# assert copy success
self.assertEqual(CMSPlugin.objects.filter(language=self.FIRST_LANG).count(), 1)
self.assertEqual(CMSPlugin.objects.filter(language=self.SECOND_LANG).count(), 1)
self.assertEqual(CMSPlugin.objects.count(), 2)
db_counts = [plgn.sections.count() for plgn in ArticlePluginModel.objects.all()]
expected = [self.section_count for _ in range(len(db_counts))]
self.assertEqual(expected, db_counts)
class PluginCopyRelationsTestCase(PluginsTestBaseCase):
"""Test the suggestions in the docs for copy_relations()"""
def setUp(self):
self.super_user = self._create_user("test", True, True)
self.FIRST_LANG = settings.LANGUAGES[0][0]
self._login_context = self.login_user_context(self.super_user)
self._login_context.__enter__()
page_data1 = self.get_new_page_data_dbfields()
page_data1['published'] = False
self.page1 = api.create_page(**page_data1)
page_data2 = self.get_new_page_data_dbfields()
page_data2['published'] = False
self.page2 = api.create_page(**page_data2)
self.placeholder1 = self.page1.placeholders.get(slot='body')
self.placeholder2 = self.page2.placeholders.get(slot='body')
def test_copy_fk_from_model(self):
plugin = api.add_plugin(
placeholder=self.placeholder1,
plugin_type="PluginWithFKFromModel",
language=self.FIRST_LANG,
)
FKModel.objects.create(fk_field=plugin)
old_public_count = FKModel.objects.filter(
fk_field__placeholder__page__publisher_is_draft=False
).count()
api.publish_page(
self.page1,
self.super_user,
self.FIRST_LANG
)
new_public_count = FKModel.objects.filter(
fk_field__placeholder__page__publisher_is_draft=False
).count()
self.assertEqual(
new_public_count,
old_public_count + 1
)
def test_copy_m2m_to_model(self):
plugin = api.add_plugin(
placeholder=self.placeholder1,
plugin_type="PluginWithM2MToModel",
language=self.FIRST_LANG,
)
m2m_target = M2MTargetModel.objects.create()
plugin.m2m_field.add(m2m_target)
old_public_count = M2MTargetModel.objects.filter(
pluginmodelwithm2mtomodel__placeholder__page__publisher_is_draft=False
).count()
api.publish_page(
self.page1,
self.super_user,
self.FIRST_LANG
)
new_public_count = M2MTargetModel.objects.filter(
pluginmodelwithm2mtomodel__placeholder__page__publisher_is_draft=False
).count()
self.assertEqual(
new_public_count,
old_public_count + 1
)
class PluginsMetaOptionsTests(TestCase):
''' TestCase set for ensuring that bugs like #992 are caught '''
# these plugins are inlined because, due to the nature of the #992
# ticket, we cannot actually import a single file with all the
# plugin variants in, because that calls __new__, at which point the
# error with splitted occurs.
def test_meta_options_as_defaults(self):
''' handling when a CMSPlugin meta options are computed defaults '''
# this plugin relies on the base CMSPlugin and Model classes to
# decide what the app_label and db_table should be
plugin = TestPlugin.model
self.assertEqual(plugin._meta.db_table, 'meta_testpluginmodel')
self.assertEqual(plugin._meta.app_label, 'meta')
def test_meta_options_as_declared_defaults(self):
''' handling when a CMSPlugin meta options are declared as per defaults '''
# here, we declare the db_table and app_label explicitly, but to the same
# values as would be computed, thus making sure it's not a problem to
# supply options.
plugin = TestPlugin2.model
self.assertEqual(plugin._meta.db_table, 'meta_testpluginmodel2')
self.assertEqual(plugin._meta.app_label, 'meta')
def test_meta_options_custom_app_label(self):
''' make sure customised meta options on CMSPlugins don't break things '''
plugin = TestPlugin3.model
self.assertEqual(plugin._meta.db_table, 'one_thing_testpluginmodel3')
self.assertEqual(plugin._meta.app_label, 'one_thing')
def test_meta_options_custom_db_table(self):
''' make sure custom database table names are OK. '''
plugin = TestPlugin4.model
self.assertEqual(plugin._meta.db_table, 'or_another_4')
self.assertEqual(plugin._meta.app_label, 'meta')
def test_meta_options_custom_both(self):
''' We should be able to customise app_label and db_table together '''
plugin = TestPlugin5.model
self.assertEqual(plugin._meta.db_table, 'or_another_5')
self.assertEqual(plugin._meta.app_label, 'one_thing')
class LinkPluginTestCase(PluginsTestBaseCase):
def test_does_not_verify_existance_of_url(self):
form = LinkForm(
{'name': 'Linkname', 'url': 'http://www.nonexistant.test'})
self.assertTrue(form.is_valid())
def test_opens_in_same_window_by_default(self):
"""Could not figure out how to render this plugin
Checking only for the values in the model"""
form = LinkForm({'name': 'Linkname',
'url': 'http://www.nonexistant.test'})
link = form.save()
self.assertEqual(link.target, '')
def test_open_in_blank_window(self):
form = LinkForm({'name': 'Linkname',
'url': 'http://www.nonexistant.test', 'target': '_blank'})
link = form.save()
self.assertEqual(link.target, '_blank')
def test_open_in_parent_window(self):
form = LinkForm({'name': 'Linkname',
'url': 'http://www.nonexistant.test', 'target': '_parent'})
link = form.save()
self.assertEqual(link.target, '_parent')
def test_open_in_top_window(self):
form = LinkForm({'name': 'Linkname',
'url': 'http://www.nonexistant.test', 'target': '_top'})
link = form.save()
self.assertEqual(link.target, '_top')
def test_open_in_nothing_else(self):
form = LinkForm({'name': 'Linkname',
'url': 'http://www.nonexistant.test', 'target': 'artificial'})
self.assertFalse(form.is_valid())
class NoDatabasePluginTests(TestCase):
def test_render_meta_is_unique(self):
text = Text()
link = Link()
self.assertNotEqual(id(text._render_meta), id(link._render_meta))
def test_render_meta_does_not_leak(self):
text = Text()
link = Link()
text._render_meta.text_enabled = False
link._render_meta.text_enabled = False
self.assertFalse(text._render_meta.text_enabled)
self.assertFalse(link._render_meta.text_enabled)
link._render_meta.text_enabled = True
self.assertFalse(text._render_meta.text_enabled)
self.assertTrue(link._render_meta.text_enabled)
def test_db_table_hack(self):
# Plugin models have been moved away due to Django's AppConfig
from cms.test_utils.project.bunch_of_plugins.models import TestPlugin1
self.assertEqual(TestPlugin1._meta.db_table, 'bunch_of_plugins_testplugin1')
def test_db_table_hack_with_mixin(self):
# Plugin models have been moved away due to Django's AppConfig
from cms.test_utils.project.bunch_of_plugins.models import TestPlugin2
self.assertEqual(TestPlugin2._meta.db_table, 'bunch_of_plugins_testplugin2')
class PicturePluginTests(PluginsTestBaseCase):
def test_link_or_page(self):
"""Test a validator: you can enter a url or a page_link, but not both."""
page_data = self.get_new_page_data()
self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
picture = Picture(url="test")
# Note: don't call full_clean as it will check ALL fields - including
# the image, which we haven't defined. Call clean() instead which
# just validates the url and page_link fields.
picture.clean()
picture.page_link = page
picture.url = None
picture.clean()
picture.url = "test"
self.assertRaises(ValidationError, picture.clean)
class SimplePluginTests(TestCase):
def test_simple_naming(self):
class MyPlugin(CMSPluginBase):
render_template = 'base.html'
self.assertEqual(MyPlugin.name, 'My Plugin')
def test_simple_context(self):
class MyPlugin(CMSPluginBase):
render_template = 'base.html'
plugin = MyPlugin(ArticlePluginModel, admin.site)
context = {}
out_context = plugin.render(context, 1, 2)
self.assertEqual(out_context['instance'], 1)
self.assertEqual(out_context['placeholder'], 2)
self.assertIs(out_context, context)
class BrokenPluginTests(TestCase):
def test_import_broken_plugin(self):
"""
If there is an import error in the actual cms_plugin file it should
raise the ImportError rather than silently swallowing it -
in opposition to the ImportError if the file 'cms_plugins.py' doesn't
exist.
"""
new_apps = ['cms.test_utils.project.brokenpluginapp']
with self.settings(INSTALLED_APPS=new_apps):
plugin_pool.discovered = False
self.assertRaises(ImportError, plugin_pool.discover_plugins)
class MTIPluginsTestCase(PluginsTestBaseCase):
def test_add_edit_plugin(self):
from cms.test_utils.project.mti_pluginapp.models import TestPluginBetaModel
"""
Test that we can instantiate and use a MTI plugin
"""
# Create a page
page = create_page("Test", "nav_playground.html", settings.LANGUAGES[0][0])
placeholder = page.placeholders.get(slot="body")
# Add the MTI plugin
add_url = URL_CMS_PLUGIN_ADD + '?' + urlencode({
'plugin_type': "TestPluginBeta",
'plugin_language': settings.LANGUAGES[0][0],
'placeholder_id': placeholder.pk,
})
data = {
'alpha': 'ALPHA',
'beta': 'BETA'
}
response = self.client.post(add_url, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(TestPluginBetaModel.objects.count(), 1)
plugin_model = TestPluginBetaModel.objects.all()[0]
self.assertEqual("ALPHA", plugin_model.alpha)
self.assertEqual("BETA", plugin_model.beta)
def test_related_name(self):
from cms.test_utils.project.mti_pluginapp.models import (
TestPluginAlphaModel, TestPluginBetaModel, ProxiedAlphaPluginModel,
ProxiedBetaPluginModel, AbstractPluginParent, TestPluginGammaModel, MixedPlugin,
LessMixedPlugin, NonPluginModel
)
# the first concrete class of the following four plugins is TestPluginAlphaModel
self.assertEqual(TestPluginAlphaModel.cmsplugin_ptr.field.rel.related_name,
'mti_pluginapp_testpluginalphamodel')
self.assertEqual(TestPluginBetaModel.cmsplugin_ptr.field.rel.related_name,
'mti_pluginapp_testpluginalphamodel')
self.assertEqual(ProxiedAlphaPluginModel.cmsplugin_ptr.field.rel.related_name,
'mti_pluginapp_testpluginalphamodel')
self.assertEqual(ProxiedBetaPluginModel.cmsplugin_ptr.field.rel.related_name,
'mti_pluginapp_testpluginalphamodel')
# Abstract plugins will have the dynamic format for related name
self.assertEqual(
AbstractPluginParent.cmsplugin_ptr.field.rel.related_name,
'%(app_label)s_%(class)s'
)
# Concrete plugin of an abstract plugin gets its relatedname
self.assertEqual(TestPluginGammaModel.cmsplugin_ptr.field.rel.related_name,
'mti_pluginapp_testplugingammamodel')
# Child plugin gets it's own related name
self.assertEqual(MixedPlugin.cmsplugin_ptr.field.rel.related_name,
'mti_pluginapp_mixedplugin')
# If the child plugin inherit straight from CMSPlugin, even if composed with
# other models, gets its own related_name
self.assertEqual(LessMixedPlugin.cmsplugin_ptr.field.rel.related_name,
'mti_pluginapp_lessmixedplugin')
# Non plugins are skipped
self.assertFalse(hasattr(NonPluginModel, 'cmsplugin_ptr'))
|
py | 1a530382cb129c98598aab5d6cbfc088a21e9521 | from django.contrib.auth.models import User, Group
from coffee.models import *
from rest_framework import serializers
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ('url', 'username', 'email', 'is_staff')
class GroupSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Group
fields = ('url', 'name')
class CompanySerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Company
fields = '__all__'
class RouteSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Route
fields = '__all__'
|
py | 1a530387dcfcb06befd3d4898b89863cdf6e7c2c | #!/usr/bin/python2.7
from __future__ import print_function
import sys
import os
import copy
import argparse
def outputReactions(fn, sfx, rxns, db_rxns, db_enz, gf=None, v=False):
fn += "_"
fh = open(fn + sfx, "w")
if gf:
fh.write("reaction\tcomplex\tfunction\tgapfilled\tgfstep\tequation\n")
else:
fh.write("reaction\tcomplex\tfunction\tequation\n")
for r in rxns:
myEnz = db_rxns[r].enzymes
eqn = db_rxns[r].equation
if not gf:
currGF = ""
gfstep = ""
elif r in gf:
currGF = "yes\t"
gfstep = db_rxns[r].gapfill_method + "\t"
else:
currGF = "no\t"
gfstep = "\t"
if len(myEnz) == 0:
if v:
print("No enzymes found for reaction", r, file=sys.stderr)
fh.write("{}\tnull\tnull\t{}{}{}\n".format(r, currGF, gfstep, eqn))
continue
for e in myEnz:
if e not in db_enz:
if v:
print(e, "does not exist in 'enzymes'", file=sys.stderr)
fh.write("{}\t{}\tnull\t{}{}{}\n".format(r, e, currGF,
gfstep, eqn))
continue
myRoles = db_enz[e].roles
if len(myRoles) == 0:
if v:
print("No roles found for enzyme", e, file=sys.stderr)
fh.write("{}\t{}\tnull\t{}{}{}\n".format(r, e, currGF,
gfstep, eqn))
continue
for role in myRoles:
fh.write("{}\t{}\t{}\t{}{}{}\n".format(r, e, role, currGF,
gfstep, eqn))
fh.close()
def outputFlux(fn, sfx):
fh = open(fn + "_reactions_" + sfx, "w")
for rxn, val in PyFBA.lp.col_primal_hash().items():
fh.write(rxn + "\t" + str(val) + "\n")
fh.close()
parser = argparse.ArgumentParser(description="Build model from roles then gap-fill model")
parser.add_argument("functions", help="Assigned functions file")
parser.add_argument("cgfunctions", help="Closest genomes functions file")
parser.add_argument("media", help="Media file")
parser.add_argument("-o", "--outsuffix", help="Suffix for output files")
parser.add_argument("--draft", help="Output starting reactions",
action="store_true")
parser.add_argument("-v", "--verbose", help="Verbose stderr output",
action="store_true")
parser.add_argument("--dev", help="Use PyFBA dev code",
action="store_true")
args = parser.parse_args()
outsfx = args.outsuffix if args.outsuffix else "out"
if args.dev:
# Import PyFBA from absoulte path
sys.path.insert(0, os.path.expanduser("~") + "/Projects/PyFBA/")
sys.path.insert(0, os.path.expanduser("~") + "/PyFBA/")
print("IN DEV MODE", file=sys.stderr)
import PyFBA
# Load ModelSEED database
modeldata = PyFBA.parse.model_seed.parse_model_seed_data('gramnegative', verbose=True)
# Read in assigned functions file
assigned_functions = PyFBA.parse.read_assigned_functions(args.functions)
roles = set([i[0] for i in [list(j) for j in assigned_functions.values()]])
print("There are {} unique roles in this genome".format(len(roles)),
file=sys.stderr)
# Obtain dictionary of roles and their reactions
#roles_to_reactions = PyFBA.filters.roles_to_reactions(roles)
#reactions_to_run = set()
#for role in roles_to_reactions:
# reactions_to_run.update(roles_to_reactions[role])
#print("There are {}".format(len(reactions_to_run)),
# "unique reactions associated with this genome", file=sys.stderr)
# Obtain enzyme complexes from roles
complexes = PyFBA.filters.roles_to_complexes(roles)
if args.verbose:
print("There are", len(complexes["complete"]), "complete and",
len(complexes["incomplete"]), "incomplete enzyme complexes",
file=sys.stderr)
# Get reactions from only completed complexes
reactions_to_run = set()
for c in complexes["complete"]:
reactions_to_run.update(modeldata.enzymes[c].reactions)
print("There are {}".format(len(reactions_to_run)),
"unique reactions associated with this genome", file=sys.stderr)
# Remove reactions IDs that do not not have a reaction equation associated
tempset = set()
for r in reactions_to_run:
if r in modeldata.reactions:
tempset.add(r)
elif args.verbose:
print("Reaction ID {}".format(r),
"is not in our reactions list. Skipped",
file=sys.stderr)
reactions_to_run = tempset
if args.draft:
outputReactions("origreactions", outsfx,
reactions_to_run, modeldata.reactions, modeldata.enzymes, gf=None, v=args.verbose)
# Load our media
media = PyFBA.parse.read_media_file(args.media)
print("Our media has {} components".format(len(media)), file=sys.stderr)
# Define a biomass equation
biomass_equation = PyFBA.metabolism.biomass_equation('gramnegative')
# Run FBA on our media
status, value, growth = PyFBA.fba.run_fba(modeldata,
reactions_to_run,
media,
biomass_equation)
print("Initial run has a biomass flux value of",
"{} --> Growth: {}".format(value, growth), file=sys.stderr)
# Check to see if model needs any gap-filling
if growth:
print("Model grew without gap-filling", file=sys.stderr)
sys.exit()
# Gap-fill the model
added_reactions = []
original_reactions_to_run = copy.copy(reactions_to_run)
# Media import reactions
if not growth:
print("Adding media import reactions", file=sys.stderr)
media_reactions = PyFBA.gapfill.suggest_from_media(modeldata,
reactions_to_run, media)
added_reactions.append(("media", media_reactions))
reactions_to_run.update(media_reactions)
print("Attempting to add", len(media_reactions), "reacitons",
file=sys.stderr)
print("Total # reactions:", len(reactions_to_run), file=sys.stderr)
status, value, growth = PyFBA.fba.run_fba(modeldata,
reactions_to_run,
media,
biomass_equation)
print("The biomass reaction has a flux of",
"{} --> Growth: {}".format(value, growth), file=sys.stderr)
# Essential reactions
if not growth:
print("Adding essential reactions", file=sys.stderr)
essential_reactions = PyFBA.gapfill.suggest_essential_reactions()
added_reactions.append(("essential", essential_reactions))
reactions_to_run.update(essential_reactions)
print("Attempting to add", len(essential_reactions), "reacitons",
file=sys.stderr)
print("Total # reactions:", len(reactions_to_run), file=sys.stderr)
status, value, growth = PyFBA.fba.run_fba(modeldata,
reactions_to_run,
media,
biomass_equation)
print("The biomass reaction has a flux of",
"{} --> Growth: {}".format(value, growth), file=sys.stderr)
# Reactions from closely related organisms
if not growth:
print("Adding close organisms reactions", file=sys.stderr)
reactions_from_other_orgs =\
PyFBA.gapfill.suggest_from_roles(args.cgfunctions, modeldata.reactions)
added_reactions.append(("close genomes", reactions_from_other_orgs))
reactions_to_run.update(reactions_from_other_orgs)
print("Attempting to add", len(reactions_from_other_orgs), "reacitons",
file=sys.stderr)
print("Total # reactions:", len(reactions_to_run), file=sys.stderr)
status, value, growth = PyFBA.fba.run_fba(modeldata,
reactions_to_run,
media,
biomass_equation)
print("The biomass reaction has a flux of",
"{} --> Growth: {}".format(value, growth), file=sys.stderr)
# Subsystems
if not growth:
print("Adding subsystem reactions", file=sys.stderr)
subsystem_reactions =\
PyFBA.gapfill.suggest_reactions_from_subsystems(modeldata.reactions,
reactions_to_run,
threshold=0.5)
added_reactions.append(("subsystems", subsystem_reactions))
reactions_to_run.update(subsystem_reactions)
print("Attempting to add", len(subsystem_reactions), "reacitons",
file=sys.stderr)
print("Total # reactions:", len(reactions_to_run), file=sys.stderr)
status, value, growth = PyFBA.fba.run_fba(modeldata,
reactions_to_run,
media,
biomass_equation)
print("The biomass reaction has a flux of",
"{} --> Growth: {}".format(value, growth), file=sys.stderr)
# EC-related reactions
if not growth:
print("Adding EC-related reactions", file=sys.stderr)
ec_reactions = PyFBA.gapfill.suggest_reactions_using_ec(roles,
modeldata.reactions,
reactions_to_run)
added_reactions.append(("ec", ec_reactions))
reactions_to_run.update(ec_reactions)
print("Attempting to add", len(ec_reactions), "reacitons",
file=sys.stderr)
print("Total # reactions:", len(reactions_to_run), file=sys.stderr)
status, value, growth = PyFBA.fba.run_fba(modeldata,
reactions_to_run,
media,
biomass_equation)
print("The biomass reaction has a flux of",
"{} --> Growth: {}".format(value, growth), file=sys.stderr)
# Compound-probability-based reactions
if not growth:
print("Adding compound-probability-based reactions", file=sys.stderr)
probable_reactions = PyFBA.gapfill.compound_probability(modeldata.reactions,
reactions_to_run,
cutoff=0,
rxn_with_proteins=True)
added_reactions.append(("probable", probable_reactions))
reactions_to_run.update(probable_reactions)
print("Attempting to add", len(probable_reactions), "reacitons",
file=sys.stderr)
print("Total # reactions:", len(reactions_to_run), file=sys.stderr)
status, value, growth = PyFBA.fba.run_fba(modeldata,
reactions_to_run,
media,
biomass_equation)
print("The biomass reaction has a flux of",
"{} --> Growth: {}".format(value, growth), file=sys.stderr)
# Orphan compounds
if not growth:
print("Adding orphan-compound reactions", file=sys.stderr)
orphan_reactions =\
PyFBA.gapfill.suggest_by_compound(modeldata,
reactions_to_run,
max_reactions=1)
added_reactions.append(("orphans", orphan_reactions))
reactions_to_run.update(orphan_reactions)
print("Attempting to add", len(orphan_reactions), "reacitons",
file=sys.stderr)
print("Total # reactions:", len(reactions_to_run), file=sys.stderr)
status, value, growth = PyFBA.fba.run_fba(modeldata,
reactions_to_run,
media,
biomass_equation)
print("The biomass reaction has a flux of",
"{} --> Growth: {}".format(value, growth), file=sys.stderr)
if not growth:
print("UNABLE TO GAP-FILL MODEL", file=sys.stderr)
sys.exit()
# Trimming the model
reqd_additional = set()
# Begin loop through all gap-filled reactions
while added_reactions:
ori = copy.copy(original_reactions_to_run)
ori.update(reqd_additional)
# Test next set of gap-filled reactions
# Each set is based on a method described above
how, new = added_reactions.pop()
# Get all the other gap-filled reactions we need to add
for tple in added_reactions:
ori.update(tple[1])
# Use minimization function to determine the minimal
# set of gap-filled reactions from the current method
new_essential =\
PyFBA.gapfill.minimize_additional_reactions(ori,
new,
modeldata,
media,
biomass_equation)
# Record the method used to determine
# how the reaction was gap-filled
for new_r in new_essential:
modeldata.reactions[new_r].is_gapfilled = True
modeldata.reactions[new_r].gapfill_method = how
reqd_additional.update(new_essential)
# Combine old and new reactions
all_reactions = original_reactions_to_run.union(reqd_additional)
status, value, growth = PyFBA.fba.run_fba(modeldata, all_reactions,
media, biomass_equation)
print("The biomass reaction has a flux of",
"{} --> Growth: {}".format(value, growth), file=sys.stderr)
# Save flux values
outputFlux("flux", outsfx)
# Save all reactions
outputReactions("allreactions", outsfx, all_reactions, modeldata.reactions, modeldata.enzymes, reqd_additional,
args.verbose) |
py | 1a5303af245faf5bc2debb10d1308faa1233dc0c | #!/Users/kamurayuki/FoodDetection/myenv/bin/python3.9
# $Id: rst2html4.py 7994 2016-12-10 17:41:45Z milde $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing (X)HTML.
The output conforms to XHTML 1.0 transitional
and almost to HTML 4.01 transitional (except for closing empty tags).
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates (X)HTML documents from standalone reStructuredText '
'sources. ' + default_description)
publish_cmdline(writer_name='html4', description=description)
|
py | 1a5304961dacc61be9e1f6c8e6461419d086c678 |
class Config:
# Derived from here:
# https://sites.google.com/site/tomihasa/google-language-codes#searchlanguage
LANGUAGES = [
{'name': 'English', 'value': 'lang_en'},
{'name': 'Afrikaans', 'value': 'lang_af'},
{'name': 'Arabic', 'value': 'lang_ar'},
{'name': 'Armenian', 'value': 'lang_hy'},
{'name': 'Belarusian', 'value': 'lang_be'},
{'name': 'Bulgarian', 'value': 'lang_bg'},
{'name': 'Catalan', 'value': 'lang_ca'},
{'name': 'Chinese (Simplified)', 'value': 'lang_zh-CN'},
{'name': 'Chinese (Traditional)', 'value': 'lang_zh-TW'},
{'name': 'Croatian', 'value': 'lang_hr'},
{'name': 'Czech', 'value': 'lang_cs'},
{'name': 'Danish', 'value': 'lang_da'},
{'name': 'Dutch', 'value': 'lang_nl'},
{'name': 'Esperanto', 'value': 'lang_eo'},
{'name': 'Estonian', 'value': 'lang_et'},
{'name': 'Filipino', 'value': 'lang_tl'},
{'name': 'Finnish', 'value': 'lang_fi'},
{'name': 'French', 'value': 'lang_fr'},
{'name': 'German', 'value': 'lang_de'},
{'name': 'Greek', 'value': 'lang_el'},
{'name': 'Hebrew', 'value': 'lang_iw'},
{'name': 'Hindi', 'value': 'lang_hi'},
{'name': 'Hungarian', 'value': 'lang_hu'},
{'name': 'Icelandic', 'value': 'lang_is'},
{'name': 'Indonesian', 'value': 'lang_id'},
{'name': 'Italian', 'value': 'lang_it'},
{'name': 'Japanese', 'value': 'lang_ja'},
{'name': 'Korean', 'value': 'lang_ko'},
{'name': 'Latvian', 'value': 'lang_lv'},
{'name': 'Lithuanian', 'value': 'lang_lt'},
{'name': 'Norwegian', 'value': 'lang_no'},
{'name': 'Persian', 'value': 'lang_fa'},
{'name': 'Polish', 'value': 'lang_pl'},
{'name': 'Portuguese', 'value': 'lang_pt'},
{'name': 'Romanian', 'value': 'lang_ro'},
{'name': 'Russian', 'value': 'lang_ru'},
{'name': 'Serbian', 'value': 'lang_sr'},
{'name': 'Slovak', 'value': 'lang_sk'},
{'name': 'Slovenian', 'value': 'lang_sl'},
{'name': 'Spanish', 'value': 'lang_es'},
{'name': 'Swahili', 'value': 'lang_sw'},
{'name': 'Swedish', 'value': 'lang_sv'},
{'name': 'Thai', 'value': 'lang_th'},
{'name': 'Turkish', 'value': 'lang_tr'},
{'name': 'Ukrainian', 'value': 'lang_uk'},
{'name': 'Vietnamese', 'value': 'lang_vi'},
]
def __init__(self, **kwargs):
self.url = ''
self.lang = 'lang_en'
self.dark = False
self.nojs = False
self.near = ''
self.new_tab = False
self.get_only = False
for key, value in kwargs.items():
setattr(self, key, value)
def __getitem__(self, name):
return getattr(self, name)
def __setitem__(self, name, value):
return setattr(self, name, value)
def __delitem__(self, name):
return delattr(self, name)
def __contains__(self, name):
return hasattr(self, name) |
py | 1a53051af3ac2eb3287f7b18ffd05ee732ca84ee | import unittest
import numpy as np
from slice_merge import TiledImage
class TestTiledImage(unittest.TestCase):
def test_slicing(self):
data = np.zeros((5, 5, 2))
sliced = TiledImage(data, tile_size=2, keep_rest=True)
self.assertEqual(sliced.data.shape, (3, 3, 2, 2, 2))
self.assertEqual(sliced.image().shape, data.shape)
sliced = TiledImage(data, tile_size=2, keep_rest=False)
self.assertEqual(sliced.data.shape, (2, 2, 2, 2, 2))
sliced = TiledImage(data, number_of_tiles=2, keep_rest=True)
self.assertEqual(sliced.data.shape, (2, 2, 3, 3, 2))
self.assertEqual(sliced.image().shape, data.shape)
sliced = TiledImage(data, number_of_tiles=2, keep_rest=False)
self.assertEqual(sliced.data.shape, (2, 2, 2, 2, 2))
def test_set_tile(self):
data = np.zeros((2, 2, 1))
sliced = TiledImage(data, tile_size=1, keep_rest=True)
new_tile = np.ones((1, 1, 1))
data[1, 0, 0] = 1
sliced.set_tile(1, 0, new_tile)
self.assertTrue(np.array_equal(sliced.image(), data))
self.assertTrue(np.array_equal(new_tile, sliced.get_tile(1, 0)))
def test_apply(self):
data = np.arange(25).reshape((5, 5, 1))
true_result = data**2
sliced = TiledImage(data, tile_size=2, keep_rest=True)
result = sliced.merge(sliced.apply(lambda x: x**2))
self.assertTrue(np.array_equal(result, true_result))
result = sliced.merge(sliced.apply(np.square, parallel=True))
def test_list_tiles_2d(self):
data = np.arange(25).reshape((5, 5, 1))
true_result = np.arange(4).reshape((2, 2))
sliced = TiledImage(data, 2)
self.assertEqual(sliced.list_tiles(tile_2d=True)[0].shape, true_result.shape)
def test_list_indices(self):
data = np.arange(25).reshape((5, 5, 1))
sliced = TiledImage(data, tile_size=2)
tile_indices = sliced.list_tile_indices()
tile_list = sliced.list_tiles()
tile_by_index = sliced.get_tile(*tile_indices[1])
tile_from_list = tile_list[1]
self.assertTrue(np.array_equal(tile_from_list, tile_by_index))
if __name__ == '__main__':
unittest.main()
|
py | 1a53071100d86b88076d75fb80015add2493366f | # -*- coding: utf-8 -*-
'''
plexOdus
'''
import re, datetime
import json, requests, xbmc
from resources.lib.modules import control
from resources.lib.modules import client
from resources.lib.modules import metacache
from resources.lib.modules import log_utils
# from resources.lib.modules import trakt
class Movies:
def __init__(self):
self.list = []
self.meta = []
self.datetime = (datetime.datetime.utcnow() - datetime.timedelta(hours = 5))
self.lang = control.apiLanguage()['trakt']
self.tmdb_key = control.setting('tm.user')
if self.tmdb_key == '' or self.tmdb_key is None:
self.tmdb_key = '534af3567d39c2b265ee5251537e13c2'
self.tmdb_link = 'http://api.themoviedb.org'
self.tmdb_image = 'http://image.tmdb.org/t/p/original'
# self.tmdb_poster = 'http://image.tmdb.org/t/p/w500'
self.tmdb_poster = 'http://image.tmdb.org/t/p/w300'
self.tmdb_fanart = 'http://image.tmdb.org/t/p/w1280'
self.tmdb_info_link = 'http://api.themoviedb.org/3/movie/%s?api_key=%s&language=%s&append_to_response=credits,release_dates,external_ids' % ('%s', self.tmdb_key, self.lang)
### other "append_to_response" options alternative_titles,videos,images
self.tmdb_art_link = 'http://api.themoviedb.org/3/movie/%s/images?api_key=%s&include_image_language=en,%s,null' % ('%s', self.tmdb_key, self.lang)
def get_request(self, url):
try:
try:
response = requests.get(url)
except requests.exceptions.SSLError:
response = requests.get(url, verify=False)
except requests.exceptions.ConnectionError:
control.notification(title='default', message=32024, icon='INFO')
return
if '200' in str(response):
return json.loads(response.text)
elif 'Retry-After' in response.headers:
# API REQUESTS ARE BEING THROTTLED, INTRODUCE WAIT TIME
throttleTime = response.headers['Retry-After']
log_utils.log2('TMDB Throttling Applied, Sleeping for %s seconds' % throttleTime, '')
sleep(int(throttleTime) + 1)
return self.get_request(url)
else:
log_utils.log2('Get request failed to TMDB URL: %s' % url, 'error')
log_utils.log2('TMDB Response: %s' % response.text, 'error')
return None
def tmdb_list(self, url):
next = url
try:
result = self.get_request(url % self.tmdb_key)
items = result['results']
except:
return
# try:
# page = int(result['page'])
# total = int(result['total_pages'])
# if page >= total: raise Exception()
# url2 = '%s&page=%s' % (url.split('&page=', 1)[0], str(page+1))
# result = self.get_request(url2 % self.tmdb_key)
# # result = client.request(url2 % self.tmdb_key)
# # result = json.loads(result)
# items += result['results']
# except: pass
try:
page = int(result['page'])
total = int(result['total_pages'])
if page >= total: raise Exception()
if not 'page=' in url: raise Exception()
next = '%s&page=%s' % (next.split('&page=', 1)[0], str(page+1))
next = next.encode('utf-8')
except:
next = ''
for item in items:
try:
title = item['title']
title = client.replaceHTMLCodes(title)
title = title.encode('utf-8')
try:
originaltitle = item['original_title']
originaltitle = client.replaceHTMLCodes(title)
originaltitle = title.encode('utf-8')
except:
originaltitle = title
year = item['release_date']
year = re.compile('(\d{4})').findall(year)[-1]
year = year.encode('utf-8')
tmdb = item['id']
tmdb = re.sub('[^0-9]', '', str(tmdb))
tmdb = tmdb.encode('utf-8')
# try:
# meta_chk = []
# meta_chk.append({'tmdb': tmdb, 'imdb': '0', 'tvdb': '0'})
# meta_chk = metacache.fetch(meta_chk, self.lang, self.tmdb_key)
# log_utils.log('meta_chk = %s' % str(meta_chk), __name__, log_utils.LOGDEBUG)
# for i in meta_chk:
# if 'metacache' in i:
# if i['metacache'] is True:
# item = meta_chk
# log_utils.log('metacache = %s' % i['metacache'], __name__, log_utils.LOGDEBUG)
# raise Exception()
poster = item['poster_path']
if poster == '' or poster is None:
poster = '0'
if not poster == '0':
poster = '%s%s' % (self.tmdb_poster, poster)
poster = poster.encode('utf-8')
fanart = item['backdrop_path']
if fanart == '' or fanart is None:
fanart = '0'
if not fanart == '0':
fanart = '%s%s' % (self.tmdb_image, fanart)
fanart = fanart.encode('utf-8')
premiered = item['release_date']
try:
premiered = re.compile('(\d{4}-\d{2}-\d{2})').findall(premiered)[0]
except:
premiered = '0'
premiered = premiered.encode('utf-8')
try:
rating = str(item['vote_average']).encode('utf-8')
except:
rating = '0'
try:
votes = str(format(int(item['vote_count']),',d')).encode('utf-8')
except:
votes = '0'
plot = item['overview']
if plot == '' or plot is None:
plot = '0'
plot = client.replaceHTMLCodes(plot)
plot = plot.encode('utf-8')
try:
tagline = item['tagline']
tagline = re.compile('[.!?][\s]{1,2}(?=[A-Z])').split(plot)[0]
tagline = tagline.encode('utf-8')
except:
tagline = '0'
##--TMDb additional info
url = self.tmdb_info_link % tmdb
item = self.get_request(url)
imdb = item['external_ids']['imdb_id']
if imdb == '' or imdb is None: imdb = '0'
imdb = imdb.encode('utf-8')
# studio = item['production_companies']
# try: studio = [x['name'] for x in studio][0]
# except: studio = '0'
# if studio == '' or studio is None: studio = '0'
# studio = studio.encode('utf-8')
try:
genre = item['genres']
genre = [x['name'] for x in genre]
genre = (' / '.join(genre)).encode('utf-8')
except:
genre = 'NA'
try:
duration = (str(item['runtime'])).encode('utf-8')
except:
duration = '0'
mpaa = item['release_dates']['results']
mpaa = [i for i in mpaa if i['iso_3166_1'] == 'US']
try:
mpaa = mpaa[0].get('release_dates')[-1].get('certification')
if not mpaa:
mpaa = mpaa[0].get('release_dates')[0].get('certification')
if not mpaa:
mpaa = mpaa[0].get('release_dates')[1].get('certification')
mpaa = str(mpaa).encode('utf-8')
except: mpaa = '0'
director = item['credits']['crew']
try: director = [x['name'] for x in director if x['job'].encode('utf-8') == 'Director']
except: director = '0'
if director == '' or director is None or director == []: director = '0'
director = (' / '.join(director)).encode('utf-8')
writer = item['credits']['crew']
try: writer = [x['name'] for x in writer if x['job'].encode('utf-8') in ['Writer', 'Screenplay']]
except: writer = '0'
try: writer = [x for n,x in enumerate(writer) if x not in writer[:n]]
except: writer = '0'
if writer == '' or writer is None or writer == []: writer = '0'
writer = (' / '.join(writer)).encode('utf-8')
cast = item['credits']['cast']
try: cast = [(x['name'].encode('utf-8'), x['character'].encode('utf-8')) for x in cast]
except: cast = []
try:
if not imdb is None or not imdb == '0':
url = self.imdbinfo % imdb
item = client.request(url, timeout='30')
item = json.loads(item)
plot2 = item['Plot']
plot2 = client.replaceHTMLCodes(plot2)
plot2 = plot.encode('utf-8')
if plot == '0' or plot == '' or plot is None: plot = plot2
rating2 = str(item['imdbRating'])
rating2 = rating2.encode('utf-8')
if rating == '0' or rating == '' or rating is None: rating = rating2
votes2 = str(item['imdbVotes'])
votes2 = str(format(int(votes2),',d'))
votes2 = votes2.encode('utf-8')
if votes == '0' or votes == '' or votes is None: votes = votes2
except:
pass
item = {}
item = {'content': 'movie', 'title': title, 'originaltitle': originaltitle, 'year': year, 'premiered': premiered, 'studio': '0', 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes,
'mpaa': mpaa, 'director': director, 'writer': writer, 'cast': cast, 'plot': plot, 'tagline': tagline, 'code': tmdb, 'imdb': imdb, 'tmdb': tmdb, 'tvdb': '0', 'poster': poster,
'poster2': '0', 'poster3': '0', 'banner': '0', 'fanart': fanart, 'fanart2': '0', 'fanart3': '0', 'clearlogo': '0', 'clearart': '0', 'landscape': '0', 'metacache': False, 'next': next}
meta = {}
meta = {'imdb': imdb, 'tmdb': tmdb, 'tvdb': '0', 'lang': self.lang, 'user': self.tmdb_key, 'item': item}
# fanart_thread = threading.Thread
from resources.lib.indexers import fanarttv
extended_art = fanarttv.get_movie_art(tmdb)
if not extended_art is None:
item.update(extended_art)
meta.update(item)
self.list.append(item)
self.meta.append(meta)
metacache.insert(self.meta)
# log_utils.log('self.list = %s' % str(self.list), __name__, log_utils.LOGDEBUG)
except:
pass
return self.list
def tmdb_collections_list(self, url):
try:
result = self.get_request(url)
items = result['items']
except:
return
next = ''
for item in items:
try:
media_type = item['media_type']
title = item['title']
if not media_type == 'movie': title = item['name']
title = client.replaceHTMLCodes(title)
title = title.encode('utf-8')
try:
originaltitle = item['original_title']
originaltitle = client.replaceHTMLCodes(title)
originaltitle = title.encode('utf-8')
except: originaltitle = title
year = item['release_date']
year = re.compile('(\d{4})').findall(year)[0]
year = year.encode('utf-8')
tmdb = item['id']
tmdb = re.sub('[^0-9]', '', str(tmdb))
tmdb = tmdb.encode('utf-8')
poster = item['poster_path']
if poster == '' or poster is None: poster = '0'
if not poster == '0': poster = '%s%s' % (self.tmdb_poster, poster)
poster = poster.encode('utf-8')
fanart = item['backdrop_path']
if fanart == '' or fanart is None: fanart = '0'
if not fanart == '0': fanart = '%s%s' % (self.tmdb_image, fanart)
fanart = fanart.encode('utf-8')
premiered = item['release_date']
try: premiered = re.compile('(\d{4}-\d{2}-\d{2})').findall(premiered)[0]
except: premiered = '0'
premiered = premiered.encode('utf-8')
try:
rating = str(item['vote_average']).encode('utf-8')
except: rating = '0'
try:
votes = str(format(int(item['vote_count']),',d')).encode('utf-8')
except: votes = '0'
plot = item['overview']
if plot == '' or plot is None: plot = '0'
plot = client.replaceHTMLCodes(plot)
plot = plot.encode('utf-8')
try:
tagline = item['tagline']
if tagline == '' or tagline == '0' or tagline is None:
tagline = re.compile('[.!?][\s]{1,2}(?=[A-Z])').split(plot)[0]
tagline = tagline.encode('utf-8')
except: tagline = '0'
##--TMDb additional info
url = self.tmdb_info_link % tmdb
item = self.get_request(url)
imdb = item['external_ids']['imdb_id']
if imdb == '' or imdb is None: imdb = '0'
imdb = imdb.encode('utf-8')
# studio = item['production_companies']
# try: studio = [x['name'] for x in studio][0]
# except: studio = '0'
# if studio == '' or studio is None: studio = '0'
# studio = studio.encode('utf-8')
genre = item['genres']
try: genre = [x['name'] for x in genre]
except: genre = '0'
genre = ' / '.join(genre)
genre = genre.encode('utf-8')
if not genre: genre = 'NA'
try: duration = str(item['runtime'])
except: duration = '0'
if duration == '' or duration is None or duration == 'N/A': duration = '0'
duration = duration.encode('utf-8')
mpaa = item['release_dates']['results']
mpaa = [i for i in mpaa if i['iso_3166_1'] == 'US']
try:
mpaa = mpaa[0].get('release_dates')[-1].get('certification')
if not mpaa:
mpaa = mpaa[0].get('release_dates')[0].get('certification')
if not mpaa:
mpaa = mpaa[0].get('release_dates')[1].get('certification')
mpaa = str(mpaa).encode('utf-8')
except: mpaa = '0'
director = item['credits']['crew']
try: director = [x['name'] for x in director if x['job'].encode('utf-8') == 'Director']
except: director = '0'
if director == '' or director is None or director == []: director = '0'
director = ' / '.join(director)
director = director.encode('utf-8')
writer = item['credits']['crew']
try: writer = [x['name'] for x in writer if x['job'].encode('utf-8') in ['Writer', 'Screenplay']]
except: writer = '0'
try: writer = [x for n,x in enumerate(writer) if x not in writer[:n]]
except: writer = '0'
if writer == '' or writer is None or writer == []: writer = '0'
writer = ' / '.join(writer)
writer = writer.encode('utf-8')
cast = item['credits']['cast']
try: cast = [(x['name'].encode('utf-8'), x['character'].encode('utf-8')) for x in cast]
except: cast = []
try:
if not imdb is None or not imdb == '0':
url = self.imdbinfo % imdb
item = client.request(url, timeout='30')
item = json.loads(item)
plot2 = item['Plot']
plot2 = client.replaceHTMLCodes(plot2)
plot2 = plot.encode('utf-8')
if plot == '0' or plot == '' or plot is None: plot = plot2
rating2 = str(item['imdbRating'])
rating2 = rating2.encode('utf-8')
if rating == '0' or rating == '' or rating is None: rating = rating2
votes2 = str(item['imdbVotes'])
votes2 = str(format(int(votes2),',d'))
votes2 = votes2.encode('utf-8')
if votes == '0' or votes == '' or votes is None: votes = votes2
except:
pass
item = {}
item = {'content': 'movie', 'title': title, 'originaltitle': originaltitle, 'year': year, 'premiered': premiered, 'studio': '0', 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes,
'mpaa': mpaa, 'director': director, 'writer': writer, 'cast': cast, 'plot': plot, 'tagline': tagline, 'code': tmdb, 'imdb': imdb, 'tmdb': tmdb, 'tvdb': '0', 'poster': poster,
'poster2': '0', 'poster3': '0', 'banner': '0', 'fanart': fanart, 'fanart2': '0', 'fanart3': '0', 'clearlogo': '0', 'clearart': '0', 'landscape': '0', 'metacache': False, 'next': next}
meta = {}
meta = {'imdb': imdb, 'tmdb': tmdb, 'tvdb': '0', 'lang': self.lang, 'user': self.tmdb_key, 'item': item}
# fanart_thread = threading.Thread
from resources.lib.indexers import fanarttv
extended_art = fanarttv.get_movie_art(imdb)
if not extended_art is None:
item.update(extended_art)
meta.update(item)
self.list.append(item)
self.meta.append(meta)
metacache.insert(self.meta)
except:
pass
return self.list
def tmdb_art(self, tmdb):
try:
if self.tmdb_key == '':
raise Exception()
art3 = self.get_request(self.tmdb_art_link % tmdb)
except:
import traceback
traceback.print_exc()
return None
url = (self.tmdb_art_link % tmdb)
try:
poster3 = art3['posters']
poster3 = [(x['width'], x['file_path']) for x in poster3]
poster3 = [x[1] for x in poster3]
poster3 = self.tmdb_poster + poster3[0]
except:
poster3 = '0'
try:
fanart3 = art3['backdrops']
fanart3 = [(x['width'], x['file_path']) for x in fanart3]
fanart3 = [x[1] for x in fanart3]
fanart3 = self.tmdb_fanart + fanart3[0]
except:
fanart3 = '0'
extended_art = {'extended': True, 'poster3': poster3, 'fanart3': fanart3}
return extended_art
class TVshows:
def __init__(self):
self.list = []
self.meta = []
self.datetime = (datetime.datetime.utcnow() - datetime.timedelta(hours = 5))
self.lang = control.apiLanguage()['tvdb']
self.tmdb_key = control.setting('tm.user')
if self.tmdb_key == '' or self.tmdb_key is None:
self.tmdb_key = '534af3567d39c2b265ee5251537e13c2'
self.tmdb_link = 'http://api.themoviedb.org'
self.tmdb_image = 'http://image.tmdb.org/t/p/original'
self.tmdb_poster = 'http://image.tmdb.org/t/p/w500'
self.tmdb_fanart = 'http://image.tmdb.org/t/p/w1280'
self.tmdb_info_link = 'http://api.themoviedb.org/3/tv/%s?api_key=%s&language=%s&append_to_response=credits,content_ratings,external_ids' % ('%s', self.tmdb_key, self.lang)
### other "append_to_response" options alternative_titles,videos,images
self.tmdb_art_link = 'http://api.themoviedb.org/3/tv/%s/images?api_key=%s&include_image_language=en,%s,null' % ('%s', self.tmdb_key, self.lang)
def get_request(self, url):
try:
try:
response = requests.get(url)
except requests.exceptions.SSLError:
response = requests.get(url, verify=False)
except requests.exceptions.ConnectionError:
control.notification(title='default', message=32024, icon='INFO')
return
if '200' in str(response):
return json.loads(response.text)
elif 'Retry-After' in response.headers:
# API REQUESTS ARE BEING THROTTLED, INTRODUCE WAIT TIME
throttleTime = response.headers['Retry-After']
log_utils.log2('TMDB Throttling Applied, Sleeping for %s seconds' % throttleTime, '')
sleep(int(throttleTime) + 1)
return self.get_request(url)
else:
log_utils.log2('Get request failed to TMDB URL: %s' % url, 'error')
log_utils.log2('TMDB Response: %s' % response.text, 'error')
return None
def tmdb_list(self, url):
next = url
try:
result = self.get_request(url % self.tmdb_key)
items = result['results']
except:
return
# try:
# page = int(result['page'])
# total = int(result['total_pages'])
# if page >= total: raise Exception()
# url2 = '%s&page=%s' % (url.split('&page=', 1)[0], str(page+1))
# result = self.get_request(url2 % self.tmdb_key)
# # result = client.request(url2 % self.tmdb_key)
# # result = json.loads(result)
# items += result['results']
# except: pass
try:
page = int(result['page'])
total = int(result['total_pages'])
if page >= total: raise Exception()
if not 'page=' in url: raise Exception()
next = '%s&page=%s' % (next.split('&page=', 1)[0], str(page+1))
next = next.encode('utf-8')
except:
next = ''
for item in items:
try:
title = item['name']
title = client.replaceHTMLCodes(title)
title = title.encode('utf-8')
year = item['first_air_date']
year = re.compile('(\d{4})').findall(year)[-1]
year = year.encode('utf-8')
tmdb = item['id']
tmdb = re.sub('[^0-9]', '', str(tmdb))
tmdb = tmdb.encode('utf-8')
poster = item['poster_path']
if poster == '' or poster is None: poster = '0'
if not poster == '0': poster = '%s%s' % (self.tmdb_poster, poster)
poster = poster.encode('utf-8')
fanart = item['backdrop_path']
if fanart == '' or fanart is None: fanart = '0'
if not fanart == '0': fanart = '%s%s' % (self.tmdb_image, fanart)
fanart = fanart.encode('utf-8')
# bannner = item['banner_path']
# if banner == '' or banner is None: banner = '0'
# if not banner == '0': banner = self.tmdb_image + banner
# banner = banner.encode('utf-8')
premiered = item['first_air_date']
try: premiered = re.compile('(\d{4}-\d{2}-\d{2})').findall(premiered)[0]
except: premiered = '0'
premiered = premiered.encode('utf-8')
rating = str(item['vote_average'])
if rating == '' or rating is None: rating = '0'
rating = rating.encode('utf-8')
votes = str(item['vote_count'])
try: votes = str(format(int(votes),',d'))
except: pass
if votes == '' or votes is None: votes = '0'
votes = votes.encode('utf-8')
plot = item['overview']
if plot == '' or plot is None: plot = '0'
plot = client.replaceHTMLCodes(plot)
plot = plot.encode('utf-8')
tagline = re.compile('[.!?][\s]{1,2}(?=[A-Z])').split(plot)[0]
try: tagline = tagline.encode('utf-8')
except: tagline = 'NA'
##--TMDb additional info
url = self.tmdb_info_link % tmdb
item = self.get_request(url)
tvdb = item['external_ids']['tvdb_id']
if tvdb == '' or tvdb is None or tvdb == 'N/A' or tvdb == 'NA': tvdb = '0'
tvdb = re.sub('[^0-9]', '', str(tvdb))
tvdb = tvdb.encode('utf-8')
imdb = item['external_ids']['imdb_id']
if imdb == '' or imdb is None or imdb == 'N/A' or imdb == 'NA': imdb = '0'
imdb = imdb.encode('utf-8')
genre = item['genres']
try: genre = [x['name'] for x in genre]
except: genre = '0'
genre = ' / '.join(genre)
genre = genre.encode('utf-8')
if not genre: genre = 'NA'
duration = str(item['episode_run_time'][0])
try: duration = duration.strip("[]")
except: duration = '0'
duration = duration.encode('utf-8')
try:
mpaa = [i['rating'] for i in item['content_ratings']['results'] if i['iso_3166_1'] == 'US'][0]
except:
try:
mpaa = item['content_ratings'][0]['rating']
except: mpaa = 'NR'
studio = item['networks']
try: studio = [x['name'] for x in studio][0]
except: studio = '0'
if studio == '' or studio is None: studio = '0'
studio = studio.encode('utf-8')
director = item['credits']['crew']
try: director = [x['name'] for x in director if x['job'].encode('utf-8') == 'Director']
except: director = '0'
if director == '' or director is None or director == []: director = '0'
director = ' / '.join(director)
director = director.encode('utf-8')
cast = item['credits']['cast']
try: cast = [(x['name'].encode('utf-8'), x['character'].encode('utf-8')) for x in cast]
except: cast = []
# ##--IMDb additional info
if not imdb == '0' or None:
try:
url = self.imdb_by_query % imdb
item2 = client.request(url, timeout='30')
item2 = json.loads(item2)
except: Exception()
try:
mpaa2 = item2['Rated']
except: mpaa2 = 'NR'
mpaa2 = mpaa.encode('utf-8')
if mpaa == '0' or mpaa == 'NR' and not mpaa2 == 'NR': mpaa = mpaa2
try:
writer = item2['Writer']
except: writer = 'NA'
writer = writer.replace(', ', ' / ')
writer = re.sub(r'\(.*?\)', '', writer)
writer = ' '.join(writer.split())
writer = writer.encode('utf-8')
item = {}
item = {'content': 'tvshow', 'title': title, 'originaltitle': title, 'year': year, 'premiered': premiered, 'studio': studio, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes,
'mpaa': mpaa, 'director': director, 'writer': writer, 'cast': cast, 'plot': plot, 'tagline': tagline, 'code': tmdb, 'imdb': imdb, 'tmdb': tmdb, 'tvdb': tvdb, 'poster': poster,
'poster2': '0', 'banner': '0', 'banner2': '0', 'fanart': fanart, 'fanart2': '0', 'clearlogo': '0', 'clearart': '0', 'landscape': '0', 'metacache': False, 'next': next}
meta = {}
meta = {'tmdb': tmdb, 'imdb': imdb, 'tvdb': tvdb, 'lang': self.lang, 'user': self.tmdb_key, 'item': item}
# fanart_thread = threading.Thread
from resources.lib.indexers import fanarttv
extended_art = fanarttv.get_tvshow_art(tvdb)
if not extended_art is None:
item.update(extended_art)
meta.update(item)
self.list.append(item)
self.meta.append(meta)
metacache.insert(self.meta)
except:
pass
return self.list
def tmdb_collections_list(self, url):
result = self.get_request(url)
items = result['items']
next = ''
for item in items:
try:
title = item['name']
title = client.replaceHTMLCodes(title)
title = title.encode('utf-8')
year = item['first_air_date']
year = re.compile('(\d{4})').findall(year)[-1]
year = year.encode('utf-8')
tmdb = item['id']
if tmdb == '' or tmdb is None: tmdb = '0'
tmdb = re.sub('[^0-9]', '', str(tmdb))
tmdb = tmdb.encode('utf-8')
imdb = '0'
tvdb = '0'
poster = item['poster_path']
if poster == '' or poster is None: poster = '0'
else: poster = self.tmdb_poster + poster
poster = poster.encode('utf-8')
fanart = item['backdrop_path']
if fanart == '' or fanart is None: fanart = '0'
if not fanart == '0': fanart = '%s%s' % (self.tmdb_image, fanart)
fanart = fanart.encode('utf-8')
premiered = item['first_air_date']
try: premiered = re.compile('(\d{4}-\d{2}-\d{2})').findall(premiered)[0]
except: premiered = '0'
premiered = premiered.encode('utf-8')
rating = str(item['vote_average'])
if rating == '' or rating is None: rating = '0'
rating = rating.encode('utf-8')
votes = str(item['vote_count'])
try: votes = str(format(int(votes),',d'))
except: pass
if votes == '' or votes is None: votes = '0'
votes = votes.encode('utf-8')
plot = item['overview']
if plot == '' or plot is None: plot = '0'
plot = client.replaceHTMLCodes(plot)
plot = plot.encode('utf-8')
try:
tagline = item['tagline']
if tagline == '' or tagline == '0' or tagline is None:
tagline = re.compile('[.!?][\s]{1,2}(?=[A-Z])').split(plot)[0]
tagline = tagline.encode('utf-8')
except: tagline = '0'
##--TMDb additional info
url = self.tmdb_info_link % tmdb
item = self.get_request(url)
tvdb = item['external_ids']['tvdb_id']
if tvdb == '' or tvdb is None or tvdb == 'N/A' or tvdb == 'NA': tvdb = '0'
tvdb = re.sub('[^0-9]', '', str(tvdb))
tvdb = tvdb.encode('utf-8')
imdb = item['external_ids']['imdb_id']
if imdb == '' or imdb is None or imdb == 'N/A' or imdb == 'NA': imdb = '0'
imdb = imdb.encode('utf-8')
genre = item['genres']
try: genre = [x['name'] for x in genre]
except: genre = '0'
genre = ' / '.join(genre)
genre = genre.encode('utf-8')
if not genre: genre = 'NA'
try: duration = str(item['runtime'])
except: duration = '0'
if duration == '' or duration is None or duration == 'N/A': duration = '0'
duration = duration.encode('utf-8')
try:
mpaa = [i['rating'] for i in item['content_ratings']['results'] if i['iso_3166_1'] == 'US'][0]
except:
try:
mpaa = item['content_ratings'][0]['rating']
except: mpaa = 'NR'
# studio = item['production_companies']
# try: studio = [x['name'] for x in studio][0]
# except: studio = '0'
# if studio == '' or studio is None: studio = '0'
# studio = studio.encode('utf-8')
studio = item['networks']
try: studio = [x['name'] for x in studio][0]
except: studio = '0'
if studio == '' or studio is None: studio = '0'
studio = studio.encode('utf-8')
director = item['credits']['crew']
try: director = [x['name'] for x in director if x['job'].encode('utf-8') == 'Director']
except: director = '0'
if director == '' or director is None or director == []: director = '0'
director = ' / '.join(director)
director = director.encode('utf-8')
writer = item['credits']['crew']
try: writer = [x['name'] for x in writer if x['job'].encode('utf-8') in ['Writer', 'Screenplay']]
except: writer = '0'
try: writer = [x for n,x in enumerate(writer) if x not in writer[:n]]
except: writer = '0'
if writer == '' or writer is None or writer == []: writer = '0'
writer = ' / '.join(writer)
writer = writer.encode('utf-8')
cast = item['credits']['cast']
try: cast = [(x['name'].encode('utf-8'), x['character'].encode('utf-8')) for x in cast]
except: cast = []
try:
if not imdb is None or not imdb == '0':
url = self.imdbinfo % imdb
item = client.request(url, timeout='30')
item = json.loads(item)
plot2 = item['Plot']
plot2 = client.replaceHTMLCodes(plot2)
plot2 = plot.encode('utf-8')
if plot == '0' or plot == '' or plot is None: plot = plot2
rating2 = str(item['imdbRating'])
rating2 = rating2.encode('utf-8')
if rating == '0' or rating == '' or rating is None: rating = rating2
votes2 = str(item['imdbVotes'])
votes2 = str(format(int(votes2),',d'))
votes2 = votes2.encode('utf-8')
if votes == '0' or votes == '' or votes is None: votes = votes2
except:
pass
item = {}
item = {'content': 'movie', 'title': title, 'originaltitle': title, 'year': year, 'premiered': premiered, 'studio': studio, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes,
'mpaa': mpaa, 'director': director, 'writer': writer, 'cast': cast, 'plot': plot, 'tagline': tagline, 'code': tmdb, 'imdb': imdb, 'tmdb': tmdb, 'tvdb': '0', 'poster': poster,
'poster2': '0', 'poster3': '0', 'banner': '0', 'fanart': fanart, 'fanart2': '0', 'fanart3': '0', 'clearlogo': '0', 'clearart': '0', 'landscape': '0', 'metacache': False, 'next': next}
meta = {}
meta = {'imdb': imdb, 'tmdb': tmdb, 'tvdb': '0', 'lang': self.lang, 'user': self.tmdb_key, 'item': item}
# fanart_thread = threading.Thread
from resources.lib.indexers import fanarttv
extended_art = fanarttv.get_tvshow_art(tvdb)
if not extended_art is None:
item.update(extended_art)
meta.update(item)
self.list.append(item)
self.meta.append(meta)
metacache.insert(self.meta)
except:
pass
return self.list
def tmdb_art(self, tmdb):
try:
if self.tmdb_key == '':
raise Exception()
art3 = self.get_request(self.tmdb_art_link % tmdb)
except:
return None
try:
poster3 = art3['posters']
poster3 = [(x['width'], x['file_path']) for x in poster3]
poster3 = [x[1] for x in poster3]
poster3 = self.tmdb_poster + poster3[0]
except:
poster3 = '0'
try:
fanart3 = art3['backdrops']
fanart3 = [(x['width'], x['file_path']) for x in fanart3]
fanart3 = [x[1] for x in fanart3]
fanart3 = self.tmdb_fanart + fanart3[0]
except:
fanart3 = '0'
extended_art = {'extended': True, 'poster3': poster3, 'fanart3': fanart3}
return extended_art |
py | 1a53074dbb96a0e078abf7bc6516992a6156590c | from django.shortcuts import render
# Create your views here.
def base(request):
return render(request, 'baseApp/index.html')
|
py | 1a5307a597e3b4aefbf7a2ae70e127de85c0ac41 | '''
(C) 2014-2016 Roman Sirokov and contributors
Licensed under BSD license
http://github.com/r0x0r/pywebview/
'''
import os
import json
import logging
from uuid import uuid1
from copy import deepcopy
from threading import Semaphore, Event
from webview.localization import localization
from webview import _parse_api_js, _js_bridge_call, _convert_string, _escape_string
from webview import OPEN_DIALOG, FOLDER_DIALOG, SAVE_DIALOG
logger = logging.getLogger(__name__)
# Try importing Qt5 modules
try:
from PyQt5 import QtCore
# Check to see if we're running Qt > 5.5
from PyQt5.QtCore import QT_VERSION_STR
_qt_version = [int(n) for n in QT_VERSION_STR.split('.')]
if _qt_version >= [5, 5]:
from PyQt5.QtWebEngineWidgets import QWebEngineView as QWebView
from PyQt5.QtWebChannel import QWebChannel
else:
from PyQt5.QtWebKitWidgets import QWebView
from PyQt5.QtWidgets import QWidget, QMainWindow, QVBoxLayout, QApplication, QFileDialog, QMessageBox
from PyQt5.QtGui import QColor
logger.debug('Using Qt5')
except ImportError as e:
logger.debug('PyQt5 or one of dependencies is not found', exc_info=True)
_import_error = True
else:
_import_error = False
if _import_error:
# Try importing Qt4 modules
try:
from PyQt4 import QtCore
from PyQt4.QtWebKit import QWebView, QWebFrame
from PyQt4.QtGui import QWidget, QMainWindow, QVBoxLayout, QApplication, QDialog, QFileDialog, QMessageBox, QColor
_qt_version = [4, 0]
logger.debug('Using Qt4')
except ImportError as e:
_import_error = True
else:
_import_error = False
if _import_error:
raise Exception('This module requires PyQt4 or PyQt5 to work under Linux.')
class BrowserView(QMainWindow):
instances = {}
create_window_trigger = QtCore.pyqtSignal(object)
set_title_trigger = QtCore.pyqtSignal(str)
load_url_trigger = QtCore.pyqtSignal(str)
html_trigger = QtCore.pyqtSignal(str)
dialog_trigger = QtCore.pyqtSignal(int, str, bool, str, str)
destroy_trigger = QtCore.pyqtSignal()
fullscreen_trigger = QtCore.pyqtSignal()
current_url_trigger = QtCore.pyqtSignal()
evaluate_js_trigger = QtCore.pyqtSignal(str, str)
class JSBridge(QtCore.QObject):
api = None
parent_uid = None
try:
qtype = QtCore.QJsonValue # QT5
except AttributeError:
qtype = str # QT4
def __init__(self):
super(BrowserView.JSBridge, self).__init__()
@QtCore.pyqtSlot(str, qtype, result=str)
def call(self, func_name, param):
func_name = BrowserView._convert_string(func_name)
param = BrowserView._convert_string(param)
return _js_bridge_call(self.parent_uid, self.api, func_name, param)
def __init__(self, uid, title, url, width, height, resizable, fullscreen,
min_size, confirm_quit, background_color, debug, js_api, webview_ready):
super(BrowserView, self).__init__()
BrowserView.instances[uid] = self
self.uid = uid
self.js_bridge = BrowserView.JSBridge()
self.js_bridge.api = js_api
self.js_bridge.parent_uid = self.uid
self.is_fullscreen = False
self.confirm_quit = confirm_quit
self._file_name_semaphore = Semaphore(0)
self._current_url_semaphore = Semaphore(0)
self.load_event = Event()
self._js_results = {}
self._current_url = None
self._file_name = None
self.resize(width, height)
self.title = title
self.setWindowTitle(title)
# Set window background color
self.background_color = QColor()
self.background_color.setNamedColor(background_color)
palette = self.palette()
palette.setColor(self.backgroundRole(), self.background_color)
self.setPalette(palette)
if not resizable:
self.setFixedSize(width, height)
self.setMinimumSize(min_size[0], min_size[1])
self.view = QWebView(self)
if url is not None:
self.view.setUrl(QtCore.QUrl(url))
else:
self.load_event.set()
self.setCentralWidget(self.view)
self.create_window_trigger.connect(BrowserView.on_create_window)
self.load_url_trigger.connect(self.on_load_url)
self.html_trigger.connect(self.on_load_html)
self.dialog_trigger.connect(self.on_file_dialog)
self.destroy_trigger.connect(self.on_destroy_window)
self.fullscreen_trigger.connect(self.on_fullscreen)
self.current_url_trigger.connect(self.on_current_url)
self.evaluate_js_trigger.connect(self.on_evaluate_js)
self.set_title_trigger.connect(self.on_set_title)
if _qt_version >= [5, 5]:
self.channel = QWebChannel(self.view.page())
self.view.page().setWebChannel(self.channel)
self.view.page().loadFinished.connect(self.on_load_finished)
if fullscreen:
self.toggle_fullscreen()
self.view.setContextMenuPolicy(QtCore.Qt.NoContextMenu) # disable right click context menu
self.move(QApplication.desktop().availableGeometry().center() - self.rect().center())
self.activateWindow()
self.raise_()
webview_ready.set()
def on_set_title(self, title):
self.setWindowTitle(title)
def on_file_dialog(self, dialog_type, directory, allow_multiple, save_filename, file_filter):
if dialog_type == FOLDER_DIALOG:
self._file_name = QFileDialog.getExistingDirectory(self, localization['linux.openFolder'], options=QFileDialog.ShowDirsOnly)
elif dialog_type == OPEN_DIALOG:
if allow_multiple:
self._file_name = QFileDialog.getOpenFileNames(self, localization['linux.openFiles'], directory, file_filter)
else:
self._file_name = QFileDialog.getOpenFileName(self, localization['linux.openFile'], directory, file_filter)
elif dialog_type == SAVE_DIALOG:
if directory:
save_filename = os.path.join(str(directory), str(save_filename))
self._file_name = QFileDialog.getSaveFileName(self, localization['global.saveFile'], save_filename)
self._file_name_semaphore.release()
def on_current_url(self):
url = BrowserView._convert_string(self.view.url().toString())
self._current_url = None if url == '' else url
self._current_url_semaphore.release()
def on_load_url(self, url):
self.view.setUrl(QtCore.QUrl(url))
def on_load_html(self, content):
self.view.setHtml(content, QtCore.QUrl(''))
def closeEvent(self, event):
if self.confirm_quit:
reply = QMessageBox.question(self, self.title, localization['global.quitConfirmation'],
QMessageBox.Yes, QMessageBox.No)
if reply == QMessageBox.No:
event.ignore()
return
event.accept()
del BrowserView.instances[self.uid]
def on_destroy_window(self):
self.close()
def on_fullscreen(self):
if self.is_fullscreen:
self.showNormal()
else:
self.showFullScreen()
self.is_fullscreen = not self.is_fullscreen
def on_evaluate_js(self, script, uuid):
def return_result(result):
result = BrowserView._convert_string(result)
uuid_ = BrowserView._convert_string(uuid)
js_result = self._js_results[uuid_]
js_result['result'] = None if result is None or result == 'null' else result if result == '' else json.loads(result)
js_result['semaphore'].release()
escaped_script = 'JSON.stringify(eval("{0}"))'.format(_escape_string(script))
try: # PyQt4
result = self.view.page().mainFrame().evaluateJavaScript(escaped_script)
return_result(result)
except AttributeError: # PyQt5
self.view.page().runJavaScript(escaped_script, return_result)
def on_load_finished(self):
if self.js_bridge.api:
self._set_js_api()
else:
self.load_event.set()
def set_title(self, title):
self.set_title_trigger.emit(title)
def get_current_url(self):
self.load_event.wait()
self.current_url_trigger.emit()
self._current_url_semaphore.acquire()
return self._current_url
def load_url(self, url):
self.load_event.clear()
self.load_url_trigger.emit(url)
def load_html(self, content):
self.load_event.clear()
self.html_trigger.emit(content)
def create_file_dialog(self, dialog_type, directory, allow_multiple, save_filename, file_filter):
self.dialog_trigger.emit(dialog_type, directory, allow_multiple, save_filename, file_filter)
self._file_name_semaphore.acquire()
if _qt_version >= [5, 0]: # QT5
if dialog_type == FOLDER_DIALOG:
file_names = (self._file_name,)
elif dialog_type == SAVE_DIALOG or not allow_multiple:
file_names = (self._file_name[0],)
else:
file_names = tuple(self._file_name[0])
else: # QT4
if dialog_type == FOLDER_DIALOG:
file_names = (BrowserView._convert_string(self._file_name),)
elif dialog_type == SAVE_DIALOG or not allow_multiple:
file_names = (BrowserView._convert_string(self._file_name[0]),)
else:
file_names = tuple([BrowserView._convert_string(s) for s in self._file_name])
# Check if we got an empty tuple, or a tuple with empty string
if len(file_names) == 0 or len(file_names[0]) == 0:
return None
else:
return file_names
def destroy_(self):
self.destroy_trigger.emit()
def toggle_fullscreen(self):
self.fullscreen_trigger.emit()
def evaluate_js(self, script):
self.load_event.wait()
result_semaphore = Semaphore(0)
unique_id = uuid1().hex
self._js_results[unique_id] = {'semaphore': result_semaphore, 'result': ''}
self.evaluate_js_trigger.emit(script, unique_id)
result_semaphore.acquire()
result = deepcopy(self._js_results[unique_id]['result'])
del self._js_results[unique_id]
return result
def _set_js_api(self):
def _register_window_object():
frame.addToJavaScriptWindowObject('external', self.js_bridge)
script = _parse_api_js(self.js_bridge.api)
if _qt_version >= [5, 5]:
qwebchannel_js = QtCore.QFile('://qtwebchannel/qwebchannel.js')
if qwebchannel_js.open(QtCore.QFile.ReadOnly):
source = bytes(qwebchannel_js.readAll()).decode('utf-8')
self.view.page().runJavaScript(source)
self.channel.registerObject('external', self.js_bridge)
qwebchannel_js.close()
elif _qt_version >= [5, 0]:
frame = self.view.page().mainFrame()
_register_window_object()
else:
frame = self.view.page().mainFrame()
_register_window_object()
try: # PyQt4
self.view.page().mainFrame().evaluateJavaScript(script)
except AttributeError: # PyQt5
self.view.page().runJavaScript(script)
self.load_event.set()
@staticmethod
def _convert_string(result):
try:
if result is None or result.isNull():
return None
result = result.toString() # QJsonValue conversion
except AttributeError:
pass
return _convert_string(result)
@staticmethod
# Receive func from subthread and execute it on the main thread
def on_create_window(func):
func()
def create_window(uid, title, url, width, height, resizable, fullscreen, min_size,
confirm_quit, background_color, debug, js_api, webview_ready):
app = QApplication.instance() or QApplication([])
def _create():
browser = BrowserView(uid, title, url, width, height, resizable, fullscreen,
min_size, confirm_quit, background_color, debug, js_api,
webview_ready)
browser.show()
if uid == 'master':
_create()
app.exec_()
else:
i = list(BrowserView.instances.values())[0] # arbitrary instance
i.create_window_trigger.emit(_create)
def set_title(title, uid):
BrowserView.instances[uid].set_title(title)
def get_current_url(uid):
return BrowserView.instances[uid].get_current_url()
def load_url(url, uid):
BrowserView.instances[uid].load_url(url)
def load_html(content, uid):
BrowserView.instances[uid].load_html(content)
def destroy_window(uid):
BrowserView.instances[uid].destroy_()
def toggle_fullscreen(uid):
BrowserView.instances[uid].toggle_fullscreen()
def create_file_dialog(dialog_type, directory, allow_multiple, save_filename, file_types):
# Create a file filter by parsing allowed file types
file_types = [s.replace(';', ' ') for s in file_types]
file_filter = ';;'.join(file_types)
i = list(BrowserView.instances.values())[0]
return i.create_file_dialog(dialog_type, directory, allow_multiple, save_filename, file_filter)
def evaluate_js(script, uid):
return BrowserView.instances[uid].evaluate_js(script)
|
py | 1a5307ca797d1890660b391fb837e00f8c833923 | from __future__ import absolute_import, unicode_literals
from django.core import management
from celery import shared_task
import os
@shared_task
def radius_tasks():
management.call_command("delete_old_radacct",
int(os.environ['CRON_DELETE_OLD_RADACCT']))
management.call_command("delete_old_postauth",
int(os.environ['CRON_DELETE_OLD_POSTAUTH']))
management.call_command("cleanup_stale_radacct",
int(os.environ['CRON_CLEANUP_STALE_RADACCT']))
management.call_command("deactivate_expired_users")
management.call_command("delete_old_users",
older_than_months=int(os.environ['CRON_DELETE_OLD_USERS']))
@shared_task
def save_snapshot():
management.call_command("save_snapshot")
@shared_task
def update_topology():
management.call_command("update_topology")
|
py | 1a5308e15e3234da139a575d0ed26222161f9ff1 | #!/usr/bin/env python
from translate.misc import autoencode
from py import test
class TestAutoencode:
type2test = autoencode.autoencode
def test_default_encoding(self):
"""tests that conversion to string uses the encoding attribute"""
s = self.type2test(u'unicode string', 'utf-8')
assert s.encoding == 'utf-8'
assert str(s) == 'unicode string'
s = self.type2test(u'\u20ac')
assert str(self.type2test(u'\u20ac', 'utf-8')) == '\xe2\x82\xac'
def test_uniqueness(self):
"""tests constructor creates unique objects"""
s1 = unicode(u'unicode string')
s2 = unicode(u'unicode string')
assert s1 == s2
assert s1 is s2
s1 = self.type2test(u'unicode string', 'utf-8')
s2 = self.type2test(u'unicode string', 'ascii')
s3 = self.type2test(u'unicode string', 'utf-8')
assert s1 == s2 == s3
assert s1 is not s2
# even though all the attributes are the same, this is a mutable type
# so the objects created must be different
assert s1 is not s3
def test_bad_encoding(self):
"""tests that we throw an exception if we don't know the encoding"""
assert test.raises(ValueError, self.type2test, 'text', 'some-encoding')
|
py | 1a5308ed6929603c586bf57c8203dca798a727c3 | from flask import g
from flask_httpauth import HTTPBasicAuth
from flask_principal import Permission, RoleNeed
from web.manager.models import User
from config import ADMIN_GROUP
api_auth = HTTPBasicAuth()
admin_permission = Permission(RoleNeed(ADMIN_GROUP))
# API authentication setup
@api_auth.verify_password
def verify_password(username_or_token, password):
# try to authenticate with username/password
user = User.query.filter_by(username=username_or_token).first()
if not user or not user.verify_password(password) or user.role != 'admin':
return False
g.user = user
return True
|
py | 1a5309637d2cd750cc3a3180fff726f1c87c2b58 | # AUTOGENERATED! DO NOT EDIT! File to edit: 31-collate-xml-entities-spans.ipynb (unless otherwise specified).
__all__ = ['genAltEnts', 'generateSpans', 'parseEntities', 'parseManualEntities', 'parseXML', 'genEntryIDs',
'collate_frames', 'manual_collate', 'genSpaCyInput', 'frames_to_spacy']
# Cell
import pandas as pd
import numpy as np
# Cell
def genAltEnts(entity):
'''
Function genAltEnts: This function takes an entity and generates all possible forms of it with errant spaces or pound
symbols mixed in.
Inputs: entity: String of entity
Output: list of possible forms of the entity with spaces and pound signs inserted
'''
alt_ents = []
#individual characters replaced by ' '
for i in range(1, len(entity)):
alt_ents.append((entity[:i]) + ' ' + entity[i:])
#individual characters replaced by #
for j in range(1, len(entity)):
alt_ents.append((entity[:j]) + '#' + entity[j + 1:])
#missing last name
#if (len(entity.split(' ')) > 1):
#alt_ents.append(entity[:entity.rfind(' ')])
return alt_ents
# Cell
#search entry for each instance of an entity reference
def generateSpans(entry, entity):
'''
Function generateSpans: This function takes individual entries and entities from the merged dataframes and
returns a list containing a span defining each instance in which that entity appears in that entry
(spans are returned as nested lists).
Inputs: entry: String of entry text
entity: String of entity whose span is to be located within the text
Output: nested list of spans for the entity for each place it appears within the entry.
'''
curr_index = 0
spans = []
entity_len = len(entity)
#Scroll through each entry and find each instance of the entity and append spans if not already present
while entry.find(entity, curr_index) != -1:
entity_start = entry.find(entity, curr_index)
span = [entity_start, entity_start + entity_len]
curr_index = span[1]
if span not in spans:
spans.append(span)
#If no spans are present
if spans == []:
alts = genAltEnts(entity)
for alt in alts:
alt_len = len(alt)
while entry.find(alt, curr_index) != -1:
entity_start = entry.find(alt, curr_index)
span = [entity_start, entity_start + alt_len]
curr_index = span[1]
if span not in spans:
spans.append(span)
return spans
# Cell
def parseEntities(df_entities):
'''
Function parseEntities: takes entity reference dataframe as input, returns each column as a list for ease of processing
and boils source references down to unique portion
Inputs: df_entities: pandas DataFrame with columns "Entity Type", "Name", and "Source Associator"
Outputs: 3 lists: entity types, entities, and parsed source associators
'''
types = df_entities["Entity Type"].tolist()
names = df_entities["Name"].tolist()
sources = []
for source_assoc in df_entities["Source Associator"]:
temp = []
refs = source_assoc.split(';')
for ref in refs:
source = ref[ref.find('-') + 1:]
while source[0] == '0':
source = source[1:]
if (not source in temp):
temp.append(source)
sources.append(temp)
return types, names, sources
# Cell
def parseManualEntities(csv_entities):
'''
Function parseManualEntities: takes manually produced entity reference csv as input and produces same outputs
as those of parseEntities
Inputs: csv_entities: path to csv containing two columns, one with folio ids and one with a list of names appearing on
each folio, separated by semicolons
Outputs: 3 lists: entity types, entities, and parsed source associatos
'''
csv = open(csv_entities, 'r', encoding="utf-8")
folios = []
people = []
for line in csv:
if line[-1] == '\n':
line = line[:-1]
if "Folio" in line:
continue
folio, temp = line.split(',')
ppl = temp.split(';')
folios.append(folio)
people.append(ppl)
csv.close()
names = []
sources = []
for x in range(len(folios)):
for person in people[x]:
if not (person in names):
names.append(person)
temp = [folios[x]]
sources.append(temp)
else:
loc = names.index(person)
if not (folios[x] in sources[loc]):
sources[loc].append(folios[x])
types = ["PER"] * len(names)
return types, names, sources
# Cell
def parseXML(df_XML):
'''
Function parseXML: takes volume XML dataframe as input and returns each column as a list
Inputs: df_XML: pandas DataFrame with columns "vol_id", 'vol_titl', 'fol_id', 'entry_no', and 'text'
Outputs: 5 lists corresonding to each column: volume ids, volume titles, folio ids, entry numbers, and entry texts
'''
volume_ids = df_XML["vol_id"].tolist()
volume_titles = df_XML["vol_titl"].tolist()
folio_ids = df_XML["fol_id"].tolist()
entry_numbers = df_XML["entry_no"].tolist()
entry_texts = df_XML["text"].tolist()
return volume_ids, volume_titles, folio_ids, entry_numbers, entry_texts
# Cell
def genEntryIDs(folio_ids, entry_ids, texts, entity, entity_folio):
'''
Function genEntryIDs: returns of list of IDs for all entries in which an input entity appears in a folio.
Inputs: Sets of lists, where the index of each list correponds to the information in every other list index (i.e., each list is a column of a dataframe)
folio_ids: list of folio IDs to be processed
entry_ids: list of IDs for each entry
texts: list of entry texts
entity: entity to be found in the data
entity_folio: folio in which to look for the entity
Outputs: 5 lists corresonding to each column: volume ids, volume titles, folio ids, entry numbers, and entry texts
'''
#find the indices of the entity folio of interest
np_fol = np.array(folio_ids)
folio_indices = np.where(np_fol == entity_folio)[0]
matches = []
#Determine if the entity is in the folios if interest and append entry_id if so
for entries in folio_indices:
if texts[entries].find(entity) != -1:
matches.append(entry_ids[entries])
#Check the to see if maybe the entity has unusual representation with # or ' ' inserted
if matches == []:
alts = genAltEnts(entity)
for alt in alts:
for entries in folio_indices:
if (texts[entries].find(alt) != -1) and (entry_ids[entries] not in matches):
matches.append(entry_ids[entries])
return matches
# Cell
def collate_frames(xml_df, ent_df):
'''
Function collate_frames: combines XML dataframe and entity dataframe to generate final dataframe with spans
Inputs: xml_df: pandas DataFrame with columns "vol_id", 'vol_titl', 'fol_id', 'entry_no', and 'text'
ent_df: pandas DataFrame with columns "Entity Type", "Name", and "Source Associator"
Outputs: a joined, collated dataframe of xml_df and ent_df
'''
#parsing input frames into lists
vol_ids, vol_titls, fol_ids, entry_nos, entry_texts = parseXML(xml_df)
ent_types, ent_names, ent_fols = parseEntities(ent_df)
#building entry ID lists for entities
ent_entries = [[]] * len(ent_names)
index = 0
for entity in ent_names:
for folio in ent_fols[index]:
ent_entries[index] = ent_entries[index] + (genEntryIDs(fol_ids, entry_nos, entry_texts, entity, folio))
index += 1
#creating collated lists
out_volume_ids = []
out_volume_titles = []
out_folio_ids = []
out_entry_numbers = []
out_entry_texts = []
out_entity_names = []
out_span_starts = []
out_span_ends = []
out_labels = []
index = 0 #this is clumsy and could probably be implemented better with numpy,
#but it's to protect against multiple instances of the same entity name
for entity in ent_names:
for entry in ent_entries[index]:
folio_id = entry[:entry.find('-')]
volume_id = vol_ids[fol_ids.index(folio_id)]
volume_title = vol_titls[fol_ids.index(folio_id)]
entry_text = entry_texts[entry_nos.index(entry)]
label = ent_types[ent_names.index(entity)]
spans = generateSpans(entry_text, entity)
for span in spans:
out_volume_ids.append(volume_id)
out_volume_titles.append(volume_title)
out_folio_ids.append(folio_id)
out_entry_numbers.append(entry)
out_entry_texts.append(entry_text)
out_entity_names.append(entity)
out_span_starts.append(span[0])
out_span_ends.append(span[1])
out_labels.append(label)
index += 1
#The loop below addresses a rare corner case that could result in tokens being mapped to multiple entities, which spaCy
#does not allow. This occurs when one entity string in a given entry is a substring of another that appears in the same
#entry.
spanned_entries = [] #list of unique entries with entity spans
spanned_tokens_per_entry = [] #list of indices that appear in spans for each entry
i = 0
while i < len(out_entry_numbers):
if out_entry_numbers[i] in spanned_entries:
for k in range(out_span_starts[i], out_span_ends[i]):
if k in spanned_tokens_per_entry[spanned_entries.index(out_entry_numbers[i])]:
del out_volume_ids[i]
del out_volume_titles[i]
del out_folio_ids[i]
del out_entry_numbers[i]
del out_entry_texts[i]
del out_entity_names[i]
del out_span_starts[i]
del out_span_ends[i]
del out_labels[i]
i -= 1
break
else:
spanned_tokens_per_entry[spanned_entries.index(out_entry_numbers[i])].append(k)
else:
spanned_entries.append(out_entry_numbers[i])
temp = []
for j in range(out_span_starts[i], out_span_ends[i]):
temp.append(j)
spanned_tokens_per_entry.append(temp)
i += 1
collated_dict = {"vol_id": out_volume_ids, "vol_titl": out_volume_titles, "fol_id": out_folio_ids, "entry_no": out_entry_numbers, "text": out_entry_texts, "entity": out_entity_names, "start": out_span_starts, "end": out_span_ends, "label": out_labels}
collated_df = pd.DataFrame(collated_dict)
return collated_df
# Cell
def manual_collate(xml_df, ent_csv):
#parsing input frames into lists
vol_ids, vol_titls, fol_ids, entry_nos, entry_texts = parseXML(xml_df)
ent_types, ent_names, ent_fols = parseManualEntities(ent_csv)
#building entry ID lists for entities
ent_entries = [[]] * len(ent_names)
index = 0
for entity in ent_names:
for folio in ent_fols[index]:
ent_entries[index] = ent_entries[index] + (genEntryIDs(fol_ids, entry_nos, entry_texts, entity, folio))
index += 1
#creating collated lists
out_volume_ids = []
out_volume_titles = []
out_folio_ids = []
out_entry_numbers = []
out_entry_texts = []
out_entity_names = []
out_span_starts = []
out_span_ends = []
out_labels = []
index = 0 #this is clumsy and could probably be implemented better with numpy,
#but it's to protect against multiple instances of the same entity name
for entity in ent_names:
for entry in ent_entries[index]:
folio_id = entry[:entry.find('-')]
volume_id = vol_ids[fol_ids.index(folio_id)]
volume_title = vol_titls[fol_ids.index(folio_id)]
entry_text = entry_texts[entry_nos.index(entry)]
label = ent_types[ent_names.index(entity)]
spans = generateSpans(entry_text, entity)
for span in spans:
out_volume_ids.append(volume_id)
out_volume_titles.append(volume_title)
out_folio_ids.append(folio_id)
out_entry_numbers.append(entry)
out_entry_texts.append(entry_text)
out_entity_names.append(entity)
out_span_starts.append(span[0])
out_span_ends.append(span[1])
out_labels.append(label)
index += 1
#The loop below addresses a rare corner case that could result in tokens being mapped to multiple entities, which spaCy
#does not allow. This occurs when one entity string in a given entry is a substring of another that appears in the same
#entry.
spanned_entries = [] #list of unique entries with entity spans
spanned_entities_per_entry = [] #list of unique entity name strings in each entry
for entry_number in out_entry_numbers:
if not (entry_number in spanned_entries):
spanned_entries.append(entry_number)
spanned_entities_per_entry.append([])
for i in range(len(out_entity_names)):
entry_pos = spanned_entries.index(out_entry_numbers[i])
if not (out_entity_names[i] in spanned_entities_per_entry[entry_pos]):
spanned_entities_per_entry[entry_pos].append(out_entity_names[i])
output_pos = 0
while output_pos < len(out_entity_names):
entity_entry = spanned_entries.index(out_entry_numbers[output_pos])
for spanned_entity in spanned_entities_per_entry[entity_entry]:
if (out_entity_names[output_pos] in spanned_entity) and (out_entity_names[output_pos] != spanned_entity):
del out_volume_ids[output_pos]
del out_volume_titles[output_pos]
del out_folio_ids[output_pos]
del out_entry_numbers[output_pos]
del out_entry_texts[output_pos]
del out_entity_names[output_pos]
del out_span_starts[output_pos]
del out_span_ends[output_pos]
del out_labels[output_pos]
output_pos -= 1
break
output_pos += 1
collated_dict = {"vol_id": out_volume_ids, "vol_titl": out_volume_titles, "fol_id": out_folio_ids, "entry_no": out_entry_numbers, "text": out_entry_texts, "entity": out_entity_names, "start": out_span_starts, "end": out_span_ends, "label": out_labels}
collated_df = pd.DataFrame(collated_dict)
return collated_df
# Cell
def genSpaCyInput(df):
'''
Function genSpaCyInput:function takes a dataframe with columns/contents as above and transforms it into a list of tuples, each consisting of raw text
paired with a dictionary of entities that appear in that text, for input into spaCy.
Inputs: df: dataframe with columns 'text', 'label', 'start', 'end'
Outputs: data in SpaCy format
'''
text = df["text"].tolist()
label = df["label"].tolist()
start = df["start"].tolist()
end = df["end"].tolist()
#build list of unique entries and list of empty annotation dictionaries for each
u_txt = []
annot_ls = []
for tx in text:
if not (tx in u_txt):
u_txt.append(tx)
annot_ls.append({"entities":[]})
#populate annotation dictionaries
for i in range(len(label)):
pos = u_txt.index(text[i])
annot_ls[pos]["entities"].append((int(start[i]), int(end[i]), label[i]))
#build list of tuples
tuples = []
for i in range(len(u_txt)):
tuples.append((u_txt[i], annot_ls[i]))
return tuples
# Cell
def frames_to_spacy(xml_df, ent_df):
df = collate_frames(xml_df, ent_df)
return genSpaCyInput(df) |
py | 1a5309e7379d99d16124fb81ffef0d49177c9c57 | import discord
import asyncio
import io
import aiohttp
import utils
from random_generators import RandomMessageGenerator
from base_client import BaseClient
class DiscordClient(discord.Client, BaseClient):
def __init__(self, discord_config, send_handler):
discord.Client.__init__(self)
self.channel = discord_config.channel_id
self.token = discord_config.client_token
self.send_handler = send_handler
async def on_message(self, message):
if message.author == self.user:
return
if message.channel.id != self.channel:
return
try:
if message.content != None and len(message.content) > 0:
text = (
utils.format_message(
message.author.name + RandomMessageGenerator.get_random_said()
)
+ message.content
)
else:
text = None
if len(message.attachments) > 0:
urls = [a.url for a in message.attachments]
else:
urls = None
self.send_handler(self.get_client_name(), text, urls)
except Exception as e:
print(e)
async def on_ready(self):
print("We have logged in as {0.user}".format(self))
def send_message(self, text=None, urls=None):
self.loop.create_task(
self.send_message_in_loop(self.get_channel(self.channel), text, urls)
)
async def send_message_in_loop(self, channel, message=None, files=None):
try:
if files is not None:
for file in files:
async with aiohttp.ClientSession() as session:
async with session.get(file) as resp:
if resp.status != 200:
return await channel.send("Could not download file...")
data = io.BytesIO(await resp.read())
await channel.send(
file=discord.File(data, "cool_image.png")
)
if message is not None:
await channel.send(message)
except Exception as e:
print(e)
@staticmethod
def get_client_name():
return "Discord"
def is_threadable(self) -> bool:
return False
def run_client(self, *args):
token = args[0]
self.run(token)
def get_run_args(self):
return self.token
|
py | 1a5309f9fcee574befa275d3c579ffbef0f4240b | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from dynamixel_workbench_msgs/XH.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class XH(genpy.Message):
_md5sum = "e5260697f25a6834d84f685c2f474e33"
_type = "dynamixel_workbench_msgs/XH"
_has_header = False # flag to mark the presence of a Header object
_full_text = """# This message is compatible with control table of Dynamixel XH Series (XH430-W210, XH430-W350, XH430-V210, XH430-V350)
# If you want to specific information about control table, please follow the link (http://emanual.robotis.com/)
uint16 Model_Number
uint8 Firmware_Version
uint8 ID
uint8 Baud_Rate
uint8 Return_Delay_Time
uint8 Drive_Mode
uint8 Operating_Mode
uint8 Secondary_ID
uint8 Protocol_Version
int32 Homing_Offset
uint32 Moving_Threshold
uint8 Temperature_Limit
uint16 Max_Voltage_Limit
uint16 Min_Voltage_Limit
uint16 PWM_Limit
uint16 Current_Limit
uint32 Acceleration_Limit
uint32 Velocity_Limit
uint32 Max_Position_Limit
uint32 Min_Position_Limit
uint8 Shutdown
uint8 Torque_Enable
uint8 LED
uint8 Status_Return_Level
uint8 Registered_Instruction
uint8 Hardware_Error_Status
uint16 Velocity_I_Gain
uint16 Velocity_P_Gain
uint16 Position_D_Gain
uint16 Position_I_Gain
uint16 Position_P_Gain
uint16 Feedforward_2nd_Gain
uint16 Feedforward_1st_Gain
uint8 Bus_Watchdog
int16 Goal_PWM
int16 Goal_Current
int32 Goal_Velocity
uint32 Profile_Acceleration
uint32 Profile_Velocity
uint32 Goal_Position
uint16 Realtime_Tick
uint8 Moving
uint8 Moving_Status
int16 Present_PWM
int16 Present_Current
int32 Present_Velocity
int32 Present_Position
uint32 Velocity_Trajectory
uint32 Position_Trajectory
uint16 Present_Input_Voltage
uint8 Present_Temperature
"""
__slots__ = ['Model_Number','Firmware_Version','ID','Baud_Rate','Return_Delay_Time','Drive_Mode','Operating_Mode','Secondary_ID','Protocol_Version','Homing_Offset','Moving_Threshold','Temperature_Limit','Max_Voltage_Limit','Min_Voltage_Limit','PWM_Limit','Current_Limit','Acceleration_Limit','Velocity_Limit','Max_Position_Limit','Min_Position_Limit','Shutdown','Torque_Enable','LED','Status_Return_Level','Registered_Instruction','Hardware_Error_Status','Velocity_I_Gain','Velocity_P_Gain','Position_D_Gain','Position_I_Gain','Position_P_Gain','Feedforward_2nd_Gain','Feedforward_1st_Gain','Bus_Watchdog','Goal_PWM','Goal_Current','Goal_Velocity','Profile_Acceleration','Profile_Velocity','Goal_Position','Realtime_Tick','Moving','Moving_Status','Present_PWM','Present_Current','Present_Velocity','Present_Position','Velocity_Trajectory','Position_Trajectory','Present_Input_Voltage','Present_Temperature']
_slot_types = ['uint16','uint8','uint8','uint8','uint8','uint8','uint8','uint8','uint8','int32','uint32','uint8','uint16','uint16','uint16','uint16','uint32','uint32','uint32','uint32','uint8','uint8','uint8','uint8','uint8','uint8','uint16','uint16','uint16','uint16','uint16','uint16','uint16','uint8','int16','int16','int32','uint32','uint32','uint32','uint16','uint8','uint8','int16','int16','int32','int32','uint32','uint32','uint16','uint8']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
Model_Number,Firmware_Version,ID,Baud_Rate,Return_Delay_Time,Drive_Mode,Operating_Mode,Secondary_ID,Protocol_Version,Homing_Offset,Moving_Threshold,Temperature_Limit,Max_Voltage_Limit,Min_Voltage_Limit,PWM_Limit,Current_Limit,Acceleration_Limit,Velocity_Limit,Max_Position_Limit,Min_Position_Limit,Shutdown,Torque_Enable,LED,Status_Return_Level,Registered_Instruction,Hardware_Error_Status,Velocity_I_Gain,Velocity_P_Gain,Position_D_Gain,Position_I_Gain,Position_P_Gain,Feedforward_2nd_Gain,Feedforward_1st_Gain,Bus_Watchdog,Goal_PWM,Goal_Current,Goal_Velocity,Profile_Acceleration,Profile_Velocity,Goal_Position,Realtime_Tick,Moving,Moving_Status,Present_PWM,Present_Current,Present_Velocity,Present_Position,Velocity_Trajectory,Position_Trajectory,Present_Input_Voltage,Present_Temperature
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(XH, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.Model_Number is None:
self.Model_Number = 0
if self.Firmware_Version is None:
self.Firmware_Version = 0
if self.ID is None:
self.ID = 0
if self.Baud_Rate is None:
self.Baud_Rate = 0
if self.Return_Delay_Time is None:
self.Return_Delay_Time = 0
if self.Drive_Mode is None:
self.Drive_Mode = 0
if self.Operating_Mode is None:
self.Operating_Mode = 0
if self.Secondary_ID is None:
self.Secondary_ID = 0
if self.Protocol_Version is None:
self.Protocol_Version = 0
if self.Homing_Offset is None:
self.Homing_Offset = 0
if self.Moving_Threshold is None:
self.Moving_Threshold = 0
if self.Temperature_Limit is None:
self.Temperature_Limit = 0
if self.Max_Voltage_Limit is None:
self.Max_Voltage_Limit = 0
if self.Min_Voltage_Limit is None:
self.Min_Voltage_Limit = 0
if self.PWM_Limit is None:
self.PWM_Limit = 0
if self.Current_Limit is None:
self.Current_Limit = 0
if self.Acceleration_Limit is None:
self.Acceleration_Limit = 0
if self.Velocity_Limit is None:
self.Velocity_Limit = 0
if self.Max_Position_Limit is None:
self.Max_Position_Limit = 0
if self.Min_Position_Limit is None:
self.Min_Position_Limit = 0
if self.Shutdown is None:
self.Shutdown = 0
if self.Torque_Enable is None:
self.Torque_Enable = 0
if self.LED is None:
self.LED = 0
if self.Status_Return_Level is None:
self.Status_Return_Level = 0
if self.Registered_Instruction is None:
self.Registered_Instruction = 0
if self.Hardware_Error_Status is None:
self.Hardware_Error_Status = 0
if self.Velocity_I_Gain is None:
self.Velocity_I_Gain = 0
if self.Velocity_P_Gain is None:
self.Velocity_P_Gain = 0
if self.Position_D_Gain is None:
self.Position_D_Gain = 0
if self.Position_I_Gain is None:
self.Position_I_Gain = 0
if self.Position_P_Gain is None:
self.Position_P_Gain = 0
if self.Feedforward_2nd_Gain is None:
self.Feedforward_2nd_Gain = 0
if self.Feedforward_1st_Gain is None:
self.Feedforward_1st_Gain = 0
if self.Bus_Watchdog is None:
self.Bus_Watchdog = 0
if self.Goal_PWM is None:
self.Goal_PWM = 0
if self.Goal_Current is None:
self.Goal_Current = 0
if self.Goal_Velocity is None:
self.Goal_Velocity = 0
if self.Profile_Acceleration is None:
self.Profile_Acceleration = 0
if self.Profile_Velocity is None:
self.Profile_Velocity = 0
if self.Goal_Position is None:
self.Goal_Position = 0
if self.Realtime_Tick is None:
self.Realtime_Tick = 0
if self.Moving is None:
self.Moving = 0
if self.Moving_Status is None:
self.Moving_Status = 0
if self.Present_PWM is None:
self.Present_PWM = 0
if self.Present_Current is None:
self.Present_Current = 0
if self.Present_Velocity is None:
self.Present_Velocity = 0
if self.Present_Position is None:
self.Present_Position = 0
if self.Velocity_Trajectory is None:
self.Velocity_Trajectory = 0
if self.Position_Trajectory is None:
self.Position_Trajectory = 0
if self.Present_Input_Voltage is None:
self.Present_Input_Voltage = 0
if self.Present_Temperature is None:
self.Present_Temperature = 0
else:
self.Model_Number = 0
self.Firmware_Version = 0
self.ID = 0
self.Baud_Rate = 0
self.Return_Delay_Time = 0
self.Drive_Mode = 0
self.Operating_Mode = 0
self.Secondary_ID = 0
self.Protocol_Version = 0
self.Homing_Offset = 0
self.Moving_Threshold = 0
self.Temperature_Limit = 0
self.Max_Voltage_Limit = 0
self.Min_Voltage_Limit = 0
self.PWM_Limit = 0
self.Current_Limit = 0
self.Acceleration_Limit = 0
self.Velocity_Limit = 0
self.Max_Position_Limit = 0
self.Min_Position_Limit = 0
self.Shutdown = 0
self.Torque_Enable = 0
self.LED = 0
self.Status_Return_Level = 0
self.Registered_Instruction = 0
self.Hardware_Error_Status = 0
self.Velocity_I_Gain = 0
self.Velocity_P_Gain = 0
self.Position_D_Gain = 0
self.Position_I_Gain = 0
self.Position_P_Gain = 0
self.Feedforward_2nd_Gain = 0
self.Feedforward_1st_Gain = 0
self.Bus_Watchdog = 0
self.Goal_PWM = 0
self.Goal_Current = 0
self.Goal_Velocity = 0
self.Profile_Acceleration = 0
self.Profile_Velocity = 0
self.Goal_Position = 0
self.Realtime_Tick = 0
self.Moving = 0
self.Moving_Status = 0
self.Present_PWM = 0
self.Present_Current = 0
self.Present_Velocity = 0
self.Present_Position = 0
self.Velocity_Trajectory = 0
self.Position_Trajectory = 0
self.Present_Input_Voltage = 0
self.Present_Temperature = 0
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_H8BiIB4H4I6B7HB2hi3IH2B2h2i2IHB().pack(_x.Model_Number, _x.Firmware_Version, _x.ID, _x.Baud_Rate, _x.Return_Delay_Time, _x.Drive_Mode, _x.Operating_Mode, _x.Secondary_ID, _x.Protocol_Version, _x.Homing_Offset, _x.Moving_Threshold, _x.Temperature_Limit, _x.Max_Voltage_Limit, _x.Min_Voltage_Limit, _x.PWM_Limit, _x.Current_Limit, _x.Acceleration_Limit, _x.Velocity_Limit, _x.Max_Position_Limit, _x.Min_Position_Limit, _x.Shutdown, _x.Torque_Enable, _x.LED, _x.Status_Return_Level, _x.Registered_Instruction, _x.Hardware_Error_Status, _x.Velocity_I_Gain, _x.Velocity_P_Gain, _x.Position_D_Gain, _x.Position_I_Gain, _x.Position_P_Gain, _x.Feedforward_2nd_Gain, _x.Feedforward_1st_Gain, _x.Bus_Watchdog, _x.Goal_PWM, _x.Goal_Current, _x.Goal_Velocity, _x.Profile_Acceleration, _x.Profile_Velocity, _x.Goal_Position, _x.Realtime_Tick, _x.Moving, _x.Moving_Status, _x.Present_PWM, _x.Present_Current, _x.Present_Velocity, _x.Present_Position, _x.Velocity_Trajectory, _x.Position_Trajectory, _x.Present_Input_Voltage, _x.Present_Temperature))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
if python3:
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
_x = self
start = end
end += 111
(_x.Model_Number, _x.Firmware_Version, _x.ID, _x.Baud_Rate, _x.Return_Delay_Time, _x.Drive_Mode, _x.Operating_Mode, _x.Secondary_ID, _x.Protocol_Version, _x.Homing_Offset, _x.Moving_Threshold, _x.Temperature_Limit, _x.Max_Voltage_Limit, _x.Min_Voltage_Limit, _x.PWM_Limit, _x.Current_Limit, _x.Acceleration_Limit, _x.Velocity_Limit, _x.Max_Position_Limit, _x.Min_Position_Limit, _x.Shutdown, _x.Torque_Enable, _x.LED, _x.Status_Return_Level, _x.Registered_Instruction, _x.Hardware_Error_Status, _x.Velocity_I_Gain, _x.Velocity_P_Gain, _x.Position_D_Gain, _x.Position_I_Gain, _x.Position_P_Gain, _x.Feedforward_2nd_Gain, _x.Feedforward_1st_Gain, _x.Bus_Watchdog, _x.Goal_PWM, _x.Goal_Current, _x.Goal_Velocity, _x.Profile_Acceleration, _x.Profile_Velocity, _x.Goal_Position, _x.Realtime_Tick, _x.Moving, _x.Moving_Status, _x.Present_PWM, _x.Present_Current, _x.Present_Velocity, _x.Present_Position, _x.Velocity_Trajectory, _x.Position_Trajectory, _x.Present_Input_Voltage, _x.Present_Temperature,) = _get_struct_H8BiIB4H4I6B7HB2hi3IH2B2h2i2IHB().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_H8BiIB4H4I6B7HB2hi3IH2B2h2i2IHB().pack(_x.Model_Number, _x.Firmware_Version, _x.ID, _x.Baud_Rate, _x.Return_Delay_Time, _x.Drive_Mode, _x.Operating_Mode, _x.Secondary_ID, _x.Protocol_Version, _x.Homing_Offset, _x.Moving_Threshold, _x.Temperature_Limit, _x.Max_Voltage_Limit, _x.Min_Voltage_Limit, _x.PWM_Limit, _x.Current_Limit, _x.Acceleration_Limit, _x.Velocity_Limit, _x.Max_Position_Limit, _x.Min_Position_Limit, _x.Shutdown, _x.Torque_Enable, _x.LED, _x.Status_Return_Level, _x.Registered_Instruction, _x.Hardware_Error_Status, _x.Velocity_I_Gain, _x.Velocity_P_Gain, _x.Position_D_Gain, _x.Position_I_Gain, _x.Position_P_Gain, _x.Feedforward_2nd_Gain, _x.Feedforward_1st_Gain, _x.Bus_Watchdog, _x.Goal_PWM, _x.Goal_Current, _x.Goal_Velocity, _x.Profile_Acceleration, _x.Profile_Velocity, _x.Goal_Position, _x.Realtime_Tick, _x.Moving, _x.Moving_Status, _x.Present_PWM, _x.Present_Current, _x.Present_Velocity, _x.Present_Position, _x.Velocity_Trajectory, _x.Position_Trajectory, _x.Present_Input_Voltage, _x.Present_Temperature))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
if python3:
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
_x = self
start = end
end += 111
(_x.Model_Number, _x.Firmware_Version, _x.ID, _x.Baud_Rate, _x.Return_Delay_Time, _x.Drive_Mode, _x.Operating_Mode, _x.Secondary_ID, _x.Protocol_Version, _x.Homing_Offset, _x.Moving_Threshold, _x.Temperature_Limit, _x.Max_Voltage_Limit, _x.Min_Voltage_Limit, _x.PWM_Limit, _x.Current_Limit, _x.Acceleration_Limit, _x.Velocity_Limit, _x.Max_Position_Limit, _x.Min_Position_Limit, _x.Shutdown, _x.Torque_Enable, _x.LED, _x.Status_Return_Level, _x.Registered_Instruction, _x.Hardware_Error_Status, _x.Velocity_I_Gain, _x.Velocity_P_Gain, _x.Position_D_Gain, _x.Position_I_Gain, _x.Position_P_Gain, _x.Feedforward_2nd_Gain, _x.Feedforward_1st_Gain, _x.Bus_Watchdog, _x.Goal_PWM, _x.Goal_Current, _x.Goal_Velocity, _x.Profile_Acceleration, _x.Profile_Velocity, _x.Goal_Position, _x.Realtime_Tick, _x.Moving, _x.Moving_Status, _x.Present_PWM, _x.Present_Current, _x.Present_Velocity, _x.Present_Position, _x.Velocity_Trajectory, _x.Position_Trajectory, _x.Present_Input_Voltage, _x.Present_Temperature,) = _get_struct_H8BiIB4H4I6B7HB2hi3IH2B2h2i2IHB().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_H8BiIB4H4I6B7HB2hi3IH2B2h2i2IHB = None
def _get_struct_H8BiIB4H4I6B7HB2hi3IH2B2h2i2IHB():
global _struct_H8BiIB4H4I6B7HB2hi3IH2B2h2i2IHB
if _struct_H8BiIB4H4I6B7HB2hi3IH2B2h2i2IHB is None:
_struct_H8BiIB4H4I6B7HB2hi3IH2B2h2i2IHB = struct.Struct("<H8BiIB4H4I6B7HB2hi3IH2B2h2i2IHB")
return _struct_H8BiIB4H4I6B7HB2hi3IH2B2h2i2IHB
|
py | 1a530aee12bc8ef2a1ecd8937dd3147ec7d3c1c1 | # -*- coding: utf-8 -*-
# Copyright (C) 2020 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'openstackdocstheme',
]
# openstackdocstheme options
openstackdocs_repo_name = 'openstack/oslo.rootwrap'
openstackdocs_bug_project = 'oslo.rootwrap'
openstackdocs_bug_tag = ''
# autodoc generation is a bit aggressive and a nuisance when doing heavy
# text edit cycles.
# execute "export SPHINX_DEBUG=1" in your terminal to disable
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'oslo.rootwrap'
copyright = u'2014, OpenStack Foundation'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# html_static_path = ['static']
html_theme = 'openstackdocs'
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index',
'%s.tex' % project,
u'%s Documentation' % project,
u'OpenStack Foundation', 'manual'),
]
|
py | 1a530b24e8efb49ba92a3f52f5ca060324777582 | # Copyright (c) 2017, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import itertools
import pandas as pd
import os
import unittest
from coremltools._deps import HAS_SKLEARN
from coremltools.converters.sklearn import convert
from coremltools.models.utils import evaluate_classifier,\
evaluate_classifier_with_probabilities, macos_version, is_macos
if HAS_SKLEARN:
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC
@unittest.skipIf(not HAS_SKLEARN, 'Missing sklearn. Skipping tests.')
class GlmCassifierTest(unittest.TestCase):
def test_logistic_regression_binary_classification_with_string_labels(self):
self._conversion_and_evaluation_helper_for_logistic_regression(['Foo', 'Bar'])
def test_logistic_regression_multiclass_classification_with_int_labels(self):
self._conversion_and_evaluation_helper_for_logistic_regression([1,2,3,4])
@staticmethod
def _generate_random_data(labels):
import random
random.seed(42)
# Generate some random data
x, y = [], []
for _ in range(100):
x.append([random.gauss(2,3), random.gauss(-1,2)])
y.append(random.choice(labels))
return x, y
def _conversion_and_evaluation_helper_for_logistic_regression(self, class_labels):
options = {
'C': (0.1, 1., 2.),
'fit_intercept': (True, False),
'class_weight': ('balanced', None),
'solver': ('newton-cg', 'lbfgs', 'liblinear', 'sag')
}
# Generate a list of all combinations of options and the default parameters
product = itertools.product(*options.values())
args = [{}] + [dict(zip(options.keys(), p)) for p in product]
x, y = GlmCassifierTest._generate_random_data(class_labels)
column_names = ['x1', 'x2']
df = pd.DataFrame(x, columns=column_names)
for cur_args in args:
print(class_labels, cur_args)
cur_model = LogisticRegression(**cur_args)
cur_model.fit(x, y)
spec = convert(cur_model, input_features=column_names,
output_feature_names='target')
if is_macos() and macos_version() >= (10, 13):
probability_lists = cur_model.predict_proba(x)
df['classProbability'] = [dict(zip(cur_model.classes_, cur_vals)) for cur_vals in probability_lists]
metrics = evaluate_classifier_with_probabilities(spec, df, probabilities='classProbability', verbose=False)
self.assertEquals(metrics['num_key_mismatch'], 0)
self.assertLess(metrics['max_probability_error'], 0.00001)
def test_linear_svc_binary_classification_with_string_labels(self):
self._conversion_and_evaluation_helper_for_linear_svc(['Foo', 'Bar'])
def test_linear_svc_multiclass_classification_with_int_labels(self):
self._conversion_and_evaluation_helper_for_linear_svc([1,2,3,4])
def _conversion_and_evaluation_helper_for_linear_svc(self, class_labels):
ARGS = [ {},
{'C' : .75, 'loss': 'hinge'},
{'penalty': 'l1', 'dual': False},
{'tol': 0.001, 'fit_intercept': False},
{'intercept_scaling': 1.5}
]
x, y = GlmCassifierTest._generate_random_data(class_labels)
column_names = ['x1', 'x2']
df = pd.DataFrame(x, columns=column_names)
for cur_args in ARGS:
print(class_labels, cur_args)
cur_model = LinearSVC(**cur_args)
cur_model.fit(x, y)
spec = convert(cur_model, input_features=column_names,
output_feature_names='target')
if is_macos() and macos_version() >= (10, 13):
df['prediction'] = cur_model.predict(x)
cur_eval_metics = evaluate_classifier(spec, df, verbose=False)
self.assertEquals(cur_eval_metics['num_errors'], 0)
|
py | 1a530b294aa59fb1012c76397a40c8afd3dbdf5a | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['ServiceIamPolicyArgs', 'ServiceIamPolicy']
@pulumi.input_type
class ServiceIamPolicyArgs:
def __init__(__self__, *,
service_id: pulumi.Input[str],
audit_configs: Optional[pulumi.Input[Sequence[pulumi.Input['GoogleIamV1AuditConfigArgs']]]] = None,
bindings: Optional[pulumi.Input[Sequence[pulumi.Input['GoogleIamV1BindingArgs']]]] = None,
etag: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
update_mask: Optional[pulumi.Input[str]] = None,
version: Optional[pulumi.Input[int]] = None):
"""
The set of arguments for constructing a ServiceIamPolicy resource.
:param pulumi.Input[Sequence[pulumi.Input['GoogleIamV1AuditConfigArgs']]] audit_configs: Specifies cloud audit logging configuration for this policy.
:param pulumi.Input[Sequence[pulumi.Input['GoogleIamV1BindingArgs']]] bindings: Associates a list of `members`, or principals, with a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one principal. The `bindings` in a `Policy` can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the `bindings` grant 50 different roles to `user:[email protected]`, and not to any other principal, then you can add another 1,450 principals to the `bindings` in the `Policy`.
:param pulumi.Input[str] etag: `etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.
:param pulumi.Input[str] update_mask: OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only the fields in the mask will be modified. If no mask is provided, the following default mask is used: `paths: "bindings, etag"`
:param pulumi.Input[int] version: Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
"""
pulumi.set(__self__, "service_id", service_id)
if audit_configs is not None:
pulumi.set(__self__, "audit_configs", audit_configs)
if bindings is not None:
pulumi.set(__self__, "bindings", bindings)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if location is not None:
pulumi.set(__self__, "location", location)
if project is not None:
pulumi.set(__self__, "project", project)
if update_mask is not None:
pulumi.set(__self__, "update_mask", update_mask)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter(name="serviceId")
def service_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "service_id")
@service_id.setter
def service_id(self, value: pulumi.Input[str]):
pulumi.set(self, "service_id", value)
@property
@pulumi.getter(name="auditConfigs")
def audit_configs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GoogleIamV1AuditConfigArgs']]]]:
"""
Specifies cloud audit logging configuration for this policy.
"""
return pulumi.get(self, "audit_configs")
@audit_configs.setter
def audit_configs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GoogleIamV1AuditConfigArgs']]]]):
pulumi.set(self, "audit_configs", value)
@property
@pulumi.getter
def bindings(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GoogleIamV1BindingArgs']]]]:
"""
Associates a list of `members`, or principals, with a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one principal. The `bindings` in a `Policy` can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the `bindings` grant 50 different roles to `user:[email protected]`, and not to any other principal, then you can add another 1,450 principals to the `bindings` in the `Policy`.
"""
return pulumi.get(self, "bindings")
@bindings.setter
def bindings(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GoogleIamV1BindingArgs']]]]):
pulumi.set(self, "bindings", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
`etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="updateMask")
def update_mask(self) -> Optional[pulumi.Input[str]]:
"""
OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only the fields in the mask will be modified. If no mask is provided, the following default mask is used: `paths: "bindings, etag"`
"""
return pulumi.get(self, "update_mask")
@update_mask.setter
def update_mask(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "update_mask", value)
@property
@pulumi.getter
def version(self) -> Optional[pulumi.Input[int]]:
"""
Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
"""
return pulumi.get(self, "version")
@version.setter
def version(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "version", value)
class ServiceIamPolicy(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
audit_configs: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GoogleIamV1AuditConfigArgs']]]]] = None,
bindings: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GoogleIamV1BindingArgs']]]]] = None,
etag: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
service_id: Optional[pulumi.Input[str]] = None,
update_mask: Optional[pulumi.Input[str]] = None,
version: Optional[pulumi.Input[int]] = None,
__props__=None):
"""
Sets the IAM Access control policy for the specified Service. Overwrites any existing policy.
Note - this resource's API doesn't support deletion. When deleted, the resource will persist
on Google Cloud even though it will be deleted from Pulumi state.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GoogleIamV1AuditConfigArgs']]]] audit_configs: Specifies cloud audit logging configuration for this policy.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GoogleIamV1BindingArgs']]]] bindings: Associates a list of `members`, or principals, with a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one principal. The `bindings` in a `Policy` can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the `bindings` grant 50 different roles to `user:[email protected]`, and not to any other principal, then you can add another 1,450 principals to the `bindings` in the `Policy`.
:param pulumi.Input[str] etag: `etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.
:param pulumi.Input[str] update_mask: OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only the fields in the mask will be modified. If no mask is provided, the following default mask is used: `paths: "bindings, etag"`
:param pulumi.Input[int] version: Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ServiceIamPolicyArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Sets the IAM Access control policy for the specified Service. Overwrites any existing policy.
Note - this resource's API doesn't support deletion. When deleted, the resource will persist
on Google Cloud even though it will be deleted from Pulumi state.
:param str resource_name: The name of the resource.
:param ServiceIamPolicyArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ServiceIamPolicyArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
audit_configs: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GoogleIamV1AuditConfigArgs']]]]] = None,
bindings: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GoogleIamV1BindingArgs']]]]] = None,
etag: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
service_id: Optional[pulumi.Input[str]] = None,
update_mask: Optional[pulumi.Input[str]] = None,
version: Optional[pulumi.Input[int]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ServiceIamPolicyArgs.__new__(ServiceIamPolicyArgs)
__props__.__dict__["audit_configs"] = audit_configs
__props__.__dict__["bindings"] = bindings
__props__.__dict__["etag"] = etag
__props__.__dict__["location"] = location
__props__.__dict__["project"] = project
if service_id is None and not opts.urn:
raise TypeError("Missing required property 'service_id'")
__props__.__dict__["service_id"] = service_id
__props__.__dict__["update_mask"] = update_mask
__props__.__dict__["version"] = version
super(ServiceIamPolicy, __self__).__init__(
'google-native:run/v2:ServiceIamPolicy',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ServiceIamPolicy':
"""
Get an existing ServiceIamPolicy resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ServiceIamPolicyArgs.__new__(ServiceIamPolicyArgs)
__props__.__dict__["audit_configs"] = None
__props__.__dict__["bindings"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["version"] = None
return ServiceIamPolicy(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="auditConfigs")
def audit_configs(self) -> pulumi.Output[Sequence['outputs.GoogleIamV1AuditConfigResponse']]:
"""
Specifies cloud audit logging configuration for this policy.
"""
return pulumi.get(self, "audit_configs")
@property
@pulumi.getter
def bindings(self) -> pulumi.Output[Sequence['outputs.GoogleIamV1BindingResponse']]:
"""
Associates a list of `members`, or principals, with a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one principal. The `bindings` in a `Policy` can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the `bindings` grant 50 different roles to `user:[email protected]`, and not to any other principal, then you can add another 1,450 principals to the `bindings` in the `Policy`.
"""
return pulumi.get(self, "bindings")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
`etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def version(self) -> pulumi.Output[int]:
"""
Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
"""
return pulumi.get(self, "version")
|
py | 1a530b495eb576c2a5f909c56e98057f9a21d955 | import tensorflow as tf
import pickle
from model import Model
from utils import build_dict, build_train_draft_dataset, test_batch_iter
import os
with open("args.pickle", "rb") as f:
args = pickle.load(f)
print("Loading dictionary...")
word_dict, reversed_dict, article_max_len, summary_max_len = build_dict("test", args.toy)
print("Loading test dataset...")
title_list, test_x = build_train_draft_dataset(word_dict, article_max_len)
test_x_len = [len([y for y in x if y != 0]) for x in test_x]
with tf.Session() as sess:
print("Loading saved model...")
model = Model(reversed_dict, article_max_len, summary_max_len, args, forward_only=True)
saver = tf.train.Saver(tf.global_variables())
ckpt = tf.train.get_checkpoint_state("./saved_model/")
saver.restore(sess, ckpt.model_checkpoint_path)
story_test_result_list = []
if args.use_atten:
print ("Using Attention")
else:
print ("Not Using Attention")
for index in range(len(test_x)):
print ("testing %d out of %d" % (index, len(test_x)))
inputs = test_x[index]
batches = test_batch_iter(inputs, [0] * len(test_x), args.batch_size, 1)
result = []
for batch_x, _ in batches:
batch_x_len = [len([y for y in x if y != 0]) for x in batch_x]
test_feed_dict = {
model.batch_size: len(batch_x),
model.X: batch_x,
model.X_len: batch_x_len,
}
prediction = sess.run(model.prediction, feed_dict=test_feed_dict)
prediction_output = [[reversed_dict[y] for y in x] for x in prediction[:, 0, :]]
predict_story = ""
for line in prediction_output:
summary = list()
for word in line:
if word == "</s>":
break
if word not in summary:
summary.append(word)
predict_story = predict_story+" ".join(summary)+"<split>"
predict_story = "[{}]{}".format(title_list[index], predict_story)
story_test_result_list.append(predict_story)
if not os.path.exists("result"):
os.mkdir("result")
with open("result/train.txt", "wr") as f:
for story in story_test_result_list:
f.write(story+"\n")
print('Summaries are saved to "train.txt"...')
|
py | 1a530b60322062ff74c54f04d60ee2c948653296 | from sqlalchemy import Column, Integer, String, Boolean
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, sessionmaker
from sqlalchemy import create_engine
Base = declarative_base()
class Knowledge(Base):
__tablename__ = 'knowlenge'
subject_id = Column(Integer, primary_key=True)
student = Column(String)
topic = Column(String)
articale = Column(String)
rating = Column(Integer)
def __repr__(self):
return ("If you want to learn about {}, you should look at the Wikipedia article called {} We gave this article a rating of {} out of 10!").format(
self.topic,
self.articale,
self.rating)
|
py | 1a530c02a15f9105d14a4fee1bed14ef8e8b7f74 | import importlib
import inspect
import json
import logging
import os
import re
import sys
import zipfile
from pathlib import Path
import click
def add_to_dict_if_exists(options_dict, initial_dict={}):
for k, v in options_dict.items():
if v:
initial_dict[k] = v
return initial_dict
def convert_to_set(iterable):
if not isinstance(iterable, set):
return set(iterable)
return iterable
def extract_zip(source, dest):
with zipfile.ZipFile(source, 'r') as zip_ref:
zip_ref.extractall(dest)
def generate_path_str(*args):
if not args:
return
path = None
for arg in args:
if not path:
path = Path(arg)
else:
path /= arg
return str(path)
def is_dir(d):
return os.path.isdir(d)
def is_envvar_true(value):
return value in (True, 'True', 'true', '1')
def is_file(f):
return os.path.isfile(f)
def import_all_modules_in_directory(plugins_init_file, existing_commands):
try:
spec = importlib.util.spec_from_file_location('plugins_modules', plugins_init_file)
module = importlib.util.module_from_spec(spec)
sys.modules[spec.name] = module
spec.loader.exec_module(module)
import plugins_modules
from plugins_modules import __all__ as all_plugins_modules
for module in all_plugins_modules:
_module = getattr(plugins_modules, module)
if isinstance(_module, (click.core.Command, click.core.Group)):
existing_commands.add(_module)
except ImportError:
logging.warning(
f'{inspect.stack()[0][3]}; will skip loading plugin: {module}', exc_info=True
)
def make_dirs(path):
if not os.path.exists(path):
try:
os.makedirs(path)
except FileExistsError:
logging.warning(f'{inspect.stack()[0][3]}; will ignore FileExistsError')
def path_exists(file):
if os.path.exists(file):
sys.exit(f'error: {file} already exists')
def paths_exist(files):
for file in files:
path_exists(file)
def read_file(file, type='text'):
with open(file, 'r') as f:
if type == 'json':
return json.loads(f.read())
return f.read()
def remove_file_above_size(file, size_kb=100):
if os.path.getsize(file) > size_kb * 1024:
os.remove(file)
def remove_last_items_from_list(init_list, integer=0):
if integer <= 0:
return init_list
return init_list[:-integer]
def resolve_target_directory(target_directory=None):
if target_directory:
if not os.path.exists(target_directory):
os.makedirs(target_directory)
return str(Path(target_directory).resolve())
return os.getcwd()
def run_func_on_dir_files(dir, func, glob='**/*', args=(), kwargs={}):
state = []
for file_path in Path(resolve_target_directory(dir)).resolve().glob(glob):
_tuple = (str(file_path),)
result = func(*(_tuple + args), **kwargs)
if result:
state.append(result)
return state
def run_func_on_iterable(iterable, func, state_op='append', args=(), kwargs={}):
state = []
for item in iterable:
_tuple = (item,)
result = func(*(_tuple + args), **kwargs)
if result:
getattr(state, state_op)(result)
return state
def show_message(msg):
print(msg)
def split_path(path, delimiter='[/\\\\]'):
return re.split(delimiter, path)
def touch(path):
try:
with open(path, 'x'):
os.utime(path, None)
except FileNotFoundError:
os.makedirs(os.path.split(path)[0])
except FileExistsError:
logging.warning(f'{inspect.stack()[0][3]}; will ignore FileExistsError')
def write_file(content, path, fs_write=True, indent=None, eof=True):
if not fs_write:
return
touch(path)
with open(path, 'w') as f:
if isinstance(content, str):
if eof:
content = f'{content}\n'
f.write(content)
elif isinstance(content, dict) or isinstance(content, list):
if isinstance(indent, int):
content = f'{json.dumps(content, indent=indent)}'
else:
content = f'{json.dumps(content)}'
if eof:
f.write(f'{content}\n')
else:
f.write(content)
def write_zip(file, content):
touch(file)
with open(file, 'wb') as f:
f.write(content)
|
py | 1a530c0dd6bca62c78a9fef6fa8bc0bf39aa784c | from mighty.applications.tenant.models.role import Role
from mighty.applications.tenant.models.tenant import Tenant
from mighty.applications.tenant.models.invitation import TenantInvitation
__all__ = (Role, Tenant, TenantInvitation) |
py | 1a530c28129201398ae8aa3b04d65b2ecc0767d2 | # -*- coding: utf-8 -*-
# Copyright 2019 Cohesity Inc.
class TierTypeOracleCloudCredentialsEnum(object):
"""Implementation of the 'TierType_OracleCloudCredentials' enum.
Specifies the storage class of Oracle vault.
OracleTierType specifies the storage class for Oracle.
'kOracleTierStandard' indicates a tier type of Oracle properties that
requires fast, immediate and frequent access.
'kOracleTierArchive' indicates a tier type of Oracle properties that is
rarely accesed and preserved for long times.
Attributes:
KORACLETIERSTANDARD: TODO: type description here.
KORACLETIERARCHIVE: TODO: type description here.
"""
KORACLETIERSTANDARD = 'kOracleTierStandard'
KORACLETIERARCHIVE = 'kOracleTierArchive'
|
py | 1a530d2750faafed251594d92526622ae1903762 | from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import SGDRegressor
from sklearn.metrics import mean_squared_error
"""
线性回归:梯度下降法
:return:None
"""
# 1.获取数据
data = load_boston()
# 2.数据集划分
x_train, x_test, y_train, y_test = train_test_split(data.data, data.target, random_state=22)
# 3.特征工程-标准化
transfer = StandardScaler()
x_train = transfer.fit_transform(x_train)
x_test = transfer.fit_transform(x_test)
# 4.机器学习-线性回归(特征方程)
estimator = SGDRegressor(max_iter=1000)
estimator.fit(x_train, y_train)
# 5.模型评估
# 5.1 获取系数等值
y_predict = estimator.predict(x_test)
print("预测值为:\n", y_predict)
print("模型中的系数为:\n", estimator.coef_)
print("模型中的偏置为:\n", estimator.intercept_)
# 5.2 评价
# 均方误差
error = mean_squared_error(y_test, y_predict)
print("误差为:\n", error) |
py | 1a5310c04d080d04c562d0b560f050fb4de2b2d2 |
import os.path
import clr
project_dir = os.path.dirname(os.path.abspath(__file__))
import sys
sys.path.append(os.path.join(project_dir, "..", "TestStack.White.0.13.3\\lib\\net40\\"))
sys.path.append(os.path.join(project_dir, "..", "Castle.Core.3.3.0\\lib\\net40-client\\"))
clr.AddReferenceByName('TestStack.White')
from TestStack.White.UIItems.Finders import *
from TestStack.White.InputDevices import Keyboard
from TestStack.White.WindowsAPI import KeyboardInput
clr.AddReferenceByName('UIAutomationTypes, Version=4.0.0.0, Culture=neutral, PublicKeyToken=31bf3856ad364e35')
from System.Windows.Automation import ControlType
class GroupHelper:
def __init__(self, app):
self.app = app
def create(self, name):
modal = self.open_group_editor()
modal.Get(SearchCriteria.ByAutomationId("uxNewAddressButton")).Click()
modal.Get(SearchCriteria.ByControlType(ControlType.Edit)).Enter(name)
Keyboard.Instance.PressSpecialKey(KeyboardInput.SpecialKeys.RETURN)
self.close_group_editor(modal)
def count(self):
self.open_group_editor()
return len(self.get_group_list())
def get_group_list(self):
modal = self.open_group_editor()
tree = modal.Get(SearchCriteria.ByAutomationId("uxAddressTreeView"))
root = tree.Nodes[0]
l = [node.Text for node in root.Nodes]
self.close_group_editor(modal)
return l
def open_group_editor(self):
main_window = self.app.main_window
main_window.Get(SearchCriteria.ByAutomationId("groupButton")).Click()
modal = main_window.ModalWindow("Group editor")
return modal
def close_group_editor(self, modal):
modal.Get(SearchCriteria.ByAutomationId("uxCloseAddressButton")).Click()
def delete_first(self):
modal = self.open_group_editor()
tree = modal.Get(SearchCriteria.ByAutomationId("uxAddressTreeView"))
root = tree.Nodes[0]
root.Nodes[0].Select()
modal.Get(SearchCriteria.ByAutomationId("uxDeleteAddressButton")).Click()
modal.Get(SearchCriteria.ByAutomationId("uxOKAddressButton")).Click()
self.close_group_editor(modal)
|
py | 1a53113c310f1314e5172bab70aec90439c1e013 | # coding: utf-8
# /*##########################################################################
#
# Copyright (C) 2016-2018 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
from __future__ import absolute_import
__authors__ = ["V. Valls"]
__license__ = "MIT"
__date__ = "27/08/2018"
from pyFAI import units
from .AbstractModel import AbstractModel
from .DataModel import DataModel
class IntegrationSettingsModel(AbstractModel):
def __init__(self, parent=None):
super(IntegrationSettingsModel, self).__init__(parent)
self.__radialUnit = DataModel()
self.__radialUnit.setValue(units.TTH_RAD)
self.__radialUnit.changed.connect(self.wasChanged)
self.__nPointsRadial = DataModel()
self.__nPointsRadial.changed.connect(self.wasChanged)
self.__nPointsAzimuthal = DataModel()
self.__nPointsAzimuthal.changed.connect(self.wasChanged)
def isValid(self):
if self.__radialUnit.value() is None:
return False
return True
def radialUnit(self):
return self.__radialUnit
def nPointsRadial(self):
return self.__nPointsRadial
def nPointsAzimuthal(self):
return self.__nPointsAzimuthal
|
py | 1a531265acb6dd99a86ca6b0becb5967bf8ab41b | import json
from pyaltherma.const import VALID_RESPONSE_CODES
from pyaltherma.errors import PathException, AlthermaResponseException
def query_object(o, json_path, raise_exception=False, convert_to_none=True):
location_steps = json_path.split('/')
if isinstance(o, str):
o = json.loads(o)
for idx, step in enumerate(location_steps):
if step not in o:
if raise_exception:
raise PathException(f'{json_path} step: {step} not found in object')
if idx == len(location_steps) - 1 and convert_to_none:
return None
o = o.get(step, {})
return o
def assert_response(request, response):
resp_code = query_object(response, 'm2m:rsp/rsc')
if resp_code not in VALID_RESPONSE_CODES:
raise AlthermaResponseException(f'Response code {resp_code} is invalid.')
|
py | 1a531290e0b48075c8c3d60a2cd3c30eda0c7324 | import copy
from datetime import date, datetime, timedelta
from typing import Any, Callable, List, Optional, Sequence, Tuple, Type, Union
import numpy as np
from polars.utils import _timedelta_to_pl_duration
try:
from polars.polars import PyExpr
_DOCUMENTING = False
except ImportError: # pragma: no cover
_DOCUMENTING = True
import math
from polars import internals as pli
from polars.datatypes import (
DataType,
Date,
Datetime,
Float64,
Int32,
Object,
UInt32,
py_type_to_dtype,
)
def selection_to_pyexpr_list(
exprs: Union[str, "Expr", Sequence[Union[str, "Expr", "pli.Series"]], "pli.Series"]
) -> List["PyExpr"]:
if isinstance(exprs, (str, Expr, pli.Series)):
exprs = [exprs]
return [expr_to_lit_or_expr(e, str_to_lit=False)._pyexpr for e in exprs]
def wrap_expr(pyexpr: "PyExpr") -> "Expr":
return Expr._from_pyexpr(pyexpr)
class Expr:
"""
Expressions that can be used in various contexts.
"""
def __init__(self) -> None:
self._pyexpr: PyExpr # pragma: no cover
def __str__(self) -> str:
return self._pyexpr.to_str()
def _repr_html_(self) -> str:
return self._pyexpr.to_str()
@staticmethod
def _from_pyexpr(pyexpr: "PyExpr") -> "Expr":
self = Expr.__new__(Expr)
self._pyexpr = pyexpr
return self
def __to_pyexpr(self, other: Any) -> "PyExpr":
return self.__to_expr(other)._pyexpr
def __to_expr(self, other: Any) -> "Expr":
if isinstance(other, Expr):
return other
return pli.lit(other)
def __bool__(self) -> "Expr":
raise ValueError(
"Since Expr are lazy, the truthiness of an Expr is ambiguous. \
Hint: use '&' or '|' to chain Expr together, not and/or."
)
def __invert__(self) -> "Expr":
return self.is_not()
def __xor__(self, other: "Expr") -> "Expr":
return wrap_expr(self._pyexpr._xor(self.__to_pyexpr(other)))
def __rxor__(self, other: "Expr") -> "Expr":
return wrap_expr(self._pyexpr._xor(self.__to_pyexpr(other)))
def __and__(self, other: "Expr") -> "Expr":
return wrap_expr(self._pyexpr._and(self.__to_pyexpr(other)))
def __rand__(self, other: Any) -> "Expr":
return wrap_expr(self._pyexpr._and(self.__to_pyexpr(other)))
def __or__(self, other: "Expr") -> "Expr":
return wrap_expr(self._pyexpr._or(self.__to_pyexpr(other)))
def __ror__(self, other: Any) -> "Expr":
return wrap_expr(self.__to_pyexpr(other)._or(self._pyexpr))
def __add__(self, other: Any) -> "Expr":
return wrap_expr(self._pyexpr + self.__to_pyexpr(other))
def __radd__(self, other: Any) -> "Expr":
return wrap_expr(self.__to_pyexpr(other) + self._pyexpr)
def __sub__(self, other: Any) -> "Expr":
return wrap_expr(self._pyexpr - self.__to_pyexpr(other))
def __rsub__(self, other: Any) -> "Expr":
return wrap_expr(self.__to_pyexpr(other) - self._pyexpr)
def __mul__(self, other: Any) -> "Expr":
return wrap_expr(self._pyexpr * self.__to_pyexpr(other))
def __rmul__(self, other: Any) -> "Expr":
return wrap_expr(self.__to_pyexpr(other) * self._pyexpr)
def __truediv__(self, other: Any) -> "Expr":
return wrap_expr(self._pyexpr / self.__to_pyexpr(other))
def __rtruediv__(self, other: Any) -> "Expr":
return wrap_expr(self.__to_pyexpr(other) / self._pyexpr)
def __floordiv__(self, other: Any) -> "Expr":
return wrap_expr(self._pyexpr // self.__to_pyexpr(other))
def __rfloordiv__(self, other: Any) -> "Expr":
return wrap_expr(self.__to_pyexpr(other) // self._pyexpr)
def __mod__(self, other: Any) -> "Expr":
return wrap_expr(self._pyexpr % self.__to_pyexpr(other))
def __rmod__(self, other: Any) -> "Expr":
return wrap_expr(self.__to_pyexpr(other) % self._pyexpr)
def __pow__(self, power: float, modulo: None = None) -> "Expr":
return self.pow(power)
def __ge__(self, other: Any) -> "Expr":
return self.gt_eq(self.__to_expr(other))
def __le__(self, other: Any) -> "Expr":
return self.lt_eq(self.__to_expr(other))
def __eq__(self, other: Any) -> "Expr": # type: ignore[override]
return self.eq(self.__to_expr(other))
def __ne__(self, other: Any) -> "Expr": # type: ignore[override]
return self.neq(self.__to_expr(other))
def __lt__(self, other: Any) -> "Expr":
return self.lt(self.__to_expr(other))
def __gt__(self, other: Any) -> "Expr":
return self.gt(self.__to_expr(other))
def eq(self, other: "Expr") -> "Expr":
return wrap_expr(self._pyexpr.eq(other._pyexpr))
def neq(self, other: "Expr") -> "Expr":
return wrap_expr(self._pyexpr.neq(other._pyexpr))
def gt(self, other: "Expr") -> "Expr":
return wrap_expr(self._pyexpr.gt(other._pyexpr))
def gt_eq(self, other: "Expr") -> "Expr":
return wrap_expr(self._pyexpr.gt_eq(other._pyexpr))
def lt_eq(self, other: "Expr") -> "Expr":
return wrap_expr(self._pyexpr.lt_eq(other._pyexpr))
def lt(self, other: "Expr") -> "Expr":
return wrap_expr(self._pyexpr.lt(other._pyexpr))
def __neg__(self) -> "Expr":
return pli.lit(0) - self
def __array_ufunc__(
self, ufunc: Callable[..., Any], method: str, *inputs: Any, **kwargs: Any
) -> "Expr":
"""
Numpy universal functions.
"""
out_type = ufunc(np.array([1])).dtype
dtype: Optional[Type[DataType]]
if "float" in str(out_type):
dtype = Float64
else:
dtype = None
args = [inp for inp in inputs if not isinstance(inp, Expr)]
def function(s: "pli.Series") -> "pli.Series":
return ufunc(s, *args, **kwargs) # pragma: no cover
if "dtype" in kwargs:
dtype = kwargs["dtype"]
return self.map(function, return_dtype=dtype)
def __getstate__(self): # type: ignore
return self._pyexpr.__getstate__()
def __setstate__(self, state): # type: ignore
# init with a dummy
self._pyexpr = pli.lit(0)._pyexpr
self._pyexpr.__setstate__(state)
def to_physical(self) -> "Expr":
"""
Cast to physical representation of the logical dtype.
Date -> Int32
Datetime -> Int64
Time -> Int64
other -> other
"""
return wrap_expr(self._pyexpr.to_physical())
def any(self) -> "Expr":
"""
Check if any boolean value in the column is `True`
Returns
-------
Boolean literal
"""
return wrap_expr(self._pyexpr.any())
def all(self) -> "Expr":
"""
Check if all boolean values in the column are `True`
Returns
-------
Boolean literal
"""
return wrap_expr(self._pyexpr.all())
def sqrt(self) -> "Expr":
"""
Compute the square root of the elements
"""
return self ** 0.5
def log10(self) -> "Expr":
"""
Return the base 10 logarithm of the input array, element-wise.
"""
return self.log(10.0)
def exp(self) -> "Expr":
"""
Return the exponential element-wise
"""
return np.exp(self) # type: ignore
def alias(self, name: str) -> "Expr":
"""
Rename the output of an expression.
Parameters
----------
name
New name.
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [1, 2, 3],
... "b": ["a", "b", None],
... }
... )
>>> df
shape: (3, 2)
┌─────┬──────┐
│ a ┆ b │
│ --- ┆ --- │
│ i64 ┆ str │
╞═════╪══════╡
│ 1 ┆ a │
├╌╌╌╌╌┼╌╌╌╌╌╌┤
│ 2 ┆ b │
├╌╌╌╌╌┼╌╌╌╌╌╌┤
│ 3 ┆ null │
└─────┴──────┘
>>> df.select(
... [
... pl.col("a").alias("bar"),
... pl.col("b").alias("foo"),
... ]
... )
shape: (3, 2)
┌─────┬──────┐
│ bar ┆ foo │
│ --- ┆ --- │
│ i64 ┆ str │
╞═════╪══════╡
│ 1 ┆ a │
├╌╌╌╌╌┼╌╌╌╌╌╌┤
│ 2 ┆ b │
├╌╌╌╌╌┼╌╌╌╌╌╌┤
│ 3 ┆ null │
└─────┴──────┘
"""
return wrap_expr(self._pyexpr.alias(name))
def exclude(
self,
columns: Union[str, List[str], Type[DataType], Sequence[Type[DataType]]],
) -> "Expr":
"""
Exclude certain columns from a wildcard/regex selection.
You may also use regexes in the exclude list. They must start with `^` and end with `$`.
Parameters
----------
columns
Column(s) to exclude from selection.
This can be:
- a column name, or multiple names
- a regular expression starting with `^` and ending with `$`
- a dtype or multiple dtypes
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [1, 2, 3],
... "b": ["a", "b", None],
... "c": [None, 2, 1],
... }
... )
>>> df
shape: (3, 3)
┌─────┬──────┬──────┐
│ a ┆ b ┆ c │
│ --- ┆ --- ┆ --- │
│ i64 ┆ str ┆ i64 │
╞═════╪══════╪══════╡
│ 1 ┆ a ┆ null │
├╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ 2 ┆ b ┆ 2 │
├╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ 3 ┆ null ┆ 1 │
└─────┴──────┴──────┘
>>> df.select(
... pl.col("*").exclude("b"),
... )
shape: (3, 2)
┌─────┬──────┐
│ a ┆ c │
│ --- ┆ --- │
│ i64 ┆ i64 │
╞═════╪══════╡
│ 1 ┆ null │
├╌╌╌╌╌┼╌╌╌╌╌╌┤
│ 2 ┆ 2 │
├╌╌╌╌╌┼╌╌╌╌╌╌┤
│ 3 ┆ 1 │
└─────┴──────┘
"""
if isinstance(columns, str):
columns = [columns]
return wrap_expr(self._pyexpr.exclude(columns))
elif not isinstance(columns, list) and issubclass(columns, DataType): # type: ignore
columns = [columns] # type: ignore
return wrap_expr(self._pyexpr.exclude_dtype(columns))
if not all(
[
isinstance(a, str) or issubclass(a, DataType)
for a in columns # type: ignore
]
):
raise ValueError("input should be all string or all DataType")
if isinstance(columns[0], str): # type: ignore
return wrap_expr(self._pyexpr.exclude(columns))
else:
return wrap_expr(self._pyexpr.exclude_dtype(columns))
def keep_name(self) -> "Expr":
"""
Keep the original root name of the expression.
Examples
--------
A groupby aggregation often changes the name of a column.
With `keep_name` we can keep the original name of the column
>>> df = pl.DataFrame(
... {
... "a": [1, 2, 3],
... "b": ["a", "b", None],
... }
... )
>>> df.groupby("a").agg(pl.col("b").list()).sort(by="a")
shape: (3, 2)
┌─────┬────────────┐
│ a ┆ b_agg_list │
│ --- ┆ --- │
│ i64 ┆ list [str] │
╞═════╪════════════╡
│ 1 ┆ ["a"] │
├╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 2 ┆ ["b"] │
├╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 3 ┆ [null] │
└─────┴────────────┘
Keep the original column name:
>>> df.groupby("a").agg(pl.col("b").list().keep_name()).sort(by="a")
shape: (3, 2)
┌─────┬────────────┐
│ a ┆ b │
│ --- ┆ --- │
│ i64 ┆ list [str] │
╞═════╪════════════╡
│ 1 ┆ ["a"] │
├╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 2 ┆ ["b"] │
├╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 3 ┆ [null] │
└─────┴────────────┘
"""
return wrap_expr(self._pyexpr.keep_name())
def prefix(self, prefix: str) -> "Expr":
"""
Add a prefix the to root column name of the expression.
Examples
--------
>>> df = pl.DataFrame(
... {
... "A": [1, 2, 3, 4, 5],
... "fruits": ["banana", "banana", "apple", "apple", "banana"],
... "B": [5, 4, 3, 2, 1],
... "cars": ["beetle", "audi", "beetle", "beetle", "beetle"],
... }
... )
>>> df
shape: (5, 4)
┌─────┬────────┬─────┬────────┐
│ A ┆ fruits ┆ B ┆ cars │
│ --- ┆ --- ┆ --- ┆ --- │
│ i64 ┆ str ┆ i64 ┆ str │
╞═════╪════════╪═════╪════════╡
│ 1 ┆ banana ┆ 5 ┆ beetle │
├╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤
│ 2 ┆ banana ┆ 4 ┆ audi │
├╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤
│ 3 ┆ apple ┆ 3 ┆ beetle │
├╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤
│ 4 ┆ apple ┆ 2 ┆ beetle │
├╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤
│ 5 ┆ banana ┆ 1 ┆ beetle │
└─────┴────────┴─────┴────────┘
>>> df.select(
... [
... pl.all(),
... pl.all().reverse().suffix("_reverse"),
... ]
... )
shape: (5, 8)
┌─────┬────────┬─────┬────────┬───────────┬────────────────┬───────────┬──────────────┐
│ A ┆ fruits ┆ B ┆ cars ┆ A_reverse ┆ fruits_reverse ┆ B_reverse ┆ cars_reverse │
│ --- ┆ --- ┆ --- ┆ --- ┆ --- ┆ --- ┆ --- ┆ --- │
│ i64 ┆ str ┆ i64 ┆ str ┆ i64 ┆ str ┆ i64 ┆ str │
╞═════╪════════╪═════╪════════╪═══════════╪════════════════╪═══════════╪══════════════╡
│ 1 ┆ banana ┆ 5 ┆ beetle ┆ 5 ┆ banana ┆ 1 ┆ beetle │
├╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 2 ┆ banana ┆ 4 ┆ audi ┆ 4 ┆ apple ┆ 2 ┆ beetle │
├╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 3 ┆ apple ┆ 3 ┆ beetle ┆ 3 ┆ apple ┆ 3 ┆ beetle │
├╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 4 ┆ apple ┆ 2 ┆ beetle ┆ 2 ┆ banana ┆ 4 ┆ audi │
├╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 5 ┆ banana ┆ 1 ┆ beetle ┆ 1 ┆ banana ┆ 5 ┆ beetle │
└─────┴────────┴─────┴────────┴───────────┴────────────────┴───────────┴──────────────┘
"""
return wrap_expr(self._pyexpr.prefix(prefix))
def suffix(self, suffix: str) -> "Expr":
"""
Add a suffix the to root column name of the expression.
Examples
--------
>>> df = pl.DataFrame(
... {
... "A": [1, 2, 3, 4, 5],
... "fruits": ["banana", "banana", "apple", "apple", "banana"],
... "B": [5, 4, 3, 2, 1],
... "cars": ["beetle", "audi", "beetle", "beetle", "beetle"],
... }
... )
>>> df
shape: (5, 4)
┌─────┬────────┬─────┬────────┐
│ A ┆ fruits ┆ B ┆ cars │
│ --- ┆ --- ┆ --- ┆ --- │
│ i64 ┆ str ┆ i64 ┆ str │
╞═════╪════════╪═════╪════════╡
│ 1 ┆ banana ┆ 5 ┆ beetle │
├╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤
│ 2 ┆ banana ┆ 4 ┆ audi │
├╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤
│ 3 ┆ apple ┆ 3 ┆ beetle │
├╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤
│ 4 ┆ apple ┆ 2 ┆ beetle │
├╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤
│ 5 ┆ banana ┆ 1 ┆ beetle │
└─────┴────────┴─────┴────────┘
>>> df.select(
... [
... pl.all(),
... pl.all().reverse().prefix("reverse_"),
... ]
... )
shape: (5, 8)
┌─────┬────────┬─────┬────────┬───────────┬────────────────┬───────────┬──────────────┐
│ A ┆ fruits ┆ B ┆ cars ┆ reverse_A ┆ reverse_fruits ┆ reverse_B ┆ reverse_cars │
│ --- ┆ --- ┆ --- ┆ --- ┆ --- ┆ --- ┆ --- ┆ --- │
│ i64 ┆ str ┆ i64 ┆ str ┆ i64 ┆ str ┆ i64 ┆ str │
╞═════╪════════╪═════╪════════╪═══════════╪════════════════╪═══════════╪══════════════╡
│ 1 ┆ banana ┆ 5 ┆ beetle ┆ 5 ┆ banana ┆ 1 ┆ beetle │
├╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 2 ┆ banana ┆ 4 ┆ audi ┆ 4 ┆ apple ┆ 2 ┆ beetle │
├╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 3 ┆ apple ┆ 3 ┆ beetle ┆ 3 ┆ apple ┆ 3 ┆ beetle │
├╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 4 ┆ apple ┆ 2 ┆ beetle ┆ 2 ┆ banana ┆ 4 ┆ audi │
├╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 5 ┆ banana ┆ 1 ┆ beetle ┆ 1 ┆ banana ┆ 5 ┆ beetle │
└─────┴────────┴─────┴────────┴───────────┴────────────────┴───────────┴──────────────┘
"""
return wrap_expr(self._pyexpr.suffix(suffix))
def map_alias(self, f: Callable[[str], str]) -> "Expr":
"""
Rename the output of an expression by mapping a function over the root name.
Parameters
----------
f
function that maps root name to new name
"""
return wrap_expr(self._pyexpr.map_alias(f))
def is_not(self) -> "Expr":
"""
Negate a boolean expression.
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [True, False, False],
... "b": ["a", "b", None],
... }
... )
>>> df
shape: (3, 2)
┌───────┬──────┐
│ a ┆ b │
│ --- ┆ --- │
│ bool ┆ str │
╞═══════╪══════╡
│ true ┆ a │
├╌╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ false ┆ b │
├╌╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ false ┆ null │
└───────┴──────┘
>>> df.select(pl.col("a").is_not())
shape: (3, 1)
┌───────┐
│ a │
│ --- │
│ bool │
╞═══════╡
│ false │
├╌╌╌╌╌╌╌┤
│ true │
├╌╌╌╌╌╌╌┤
│ true │
└───────┘
"""
return wrap_expr(self._pyexpr.is_not())
def is_null(self) -> "Expr":
"""
Create a boolean expression returning `True` where the expression contains null values.
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [1, 2, None, 1, 5],
... "b": [1.0, 2.0, float("nan"), 1.0, 5.0],
... }
... )
>>> df.with_column(pl.all().is_null().suffix("_isnull")) # nan != null
shape: (5, 4)
┌──────┬─────┬──────────┬──────────┐
│ a ┆ b ┆ a_isnull ┆ b_isnull │
│ --- ┆ --- ┆ --- ┆ --- │
│ i64 ┆ f64 ┆ bool ┆ bool │
╞══════╪═════╪══════════╪══════════╡
│ 1 ┆ 1.0 ┆ false ┆ false │
├╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┤
│ 2 ┆ 2.0 ┆ false ┆ false │
├╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┤
│ null ┆ NaN ┆ true ┆ false │
├╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┤
│ 1 ┆ 1.0 ┆ false ┆ false │
├╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┤
│ 5 ┆ 5.0 ┆ false ┆ false │
└──────┴─────┴──────────┴──────────┘
"""
return wrap_expr(self._pyexpr.is_null())
def is_not_null(self) -> "Expr":
"""
Create a boolean expression returning `True` where the expression does not contain null values.
"""
return wrap_expr(self._pyexpr.is_not_null())
def is_finite(self) -> "Expr":
"""
Create a boolean expression returning `True` where the expression values are finite.
"""
return wrap_expr(self._pyexpr.is_finite())
def is_infinite(self) -> "Expr":
"""
Create a boolean expression returning `True` where the expression values are infinite.
"""
return wrap_expr(self._pyexpr.is_infinite())
def is_nan(self) -> "Expr":
"""
Create a boolean expression returning `True` where the expression values are NaN (Not A Number).
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [1, 2, None, 1, 5],
... "b": [1.0, 2.0, float("nan"), 1.0, 5.0],
... }
... )
>>> df.with_column(pl.all().is_nan().suffix("_isnan")) # nan != null
shape: (5, 4)
┌──────┬─────┬─────────┬─────────┐
│ a ┆ b ┆ a_isnan ┆ b_isnan │
│ --- ┆ --- ┆ --- ┆ --- │
│ i64 ┆ f64 ┆ bool ┆ bool │
╞══════╪═════╪═════════╪═════════╡
│ 1 ┆ 1.0 ┆ false ┆ false │
├╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌┤
│ 2 ┆ 2.0 ┆ false ┆ false │
├╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌┤
│ null ┆ NaN ┆ false ┆ true │
├╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌┤
│ 1 ┆ 1.0 ┆ false ┆ false │
├╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌┤
│ 5 ┆ 5.0 ┆ false ┆ false │
└──────┴─────┴─────────┴─────────┘
"""
return wrap_expr(self._pyexpr.is_nan())
def is_not_nan(self) -> "Expr":
"""
Create a boolean expression returning `True` where the expression values are not NaN (Not A Number).
"""
return wrap_expr(self._pyexpr.is_not_nan())
def agg_groups(self) -> "Expr":
"""
Get the group indexes of the group by operation.
Should be used in aggregation context only.
Examples
--------
>>> df = pl.DataFrame(
... {
... "group": [
... "one",
... "one",
... "one",
... "two",
... "two",
... "two",
... ],
... "value": [94, 95, 96, 97, 97, 99],
... }
... )
>>> df.groupby("group").agg(pl.col("value").agg_groups())
shape: (2, 2)
┌───────┬────────────┐
│ group ┆ value │
│ --- ┆ --- │
│ str ┆ list [u32] │
╞═══════╪════════════╡
│ two ┆ [3, 4, 5] │
├╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ one ┆ [0, 1, 2] │
└───────┴────────────┘
"""
return wrap_expr(self._pyexpr.agg_groups())
def count(self) -> "Expr":
"""Count the number of values in this expression"""
return wrap_expr(self._pyexpr.count())
def len(self) -> "Expr":
"""
Alias for count
Count the number of values in this expression
"""
return self.count()
def slice(self, offset: Union[int, "Expr"], length: Union[int, "Expr"]) -> "Expr":
"""
Slice the Series.
Parameters
----------
offset
Start index.
length
Length of the slice.
"""
if isinstance(offset, int):
offset = pli.lit(offset)
if isinstance(length, int):
length = pli.lit(length)
return wrap_expr(self._pyexpr.slice(offset._pyexpr, length._pyexpr))
def drop_nulls(self) -> "Expr":
"""
Drop null values
"""
return wrap_expr(self._pyexpr.drop_nulls())
def drop_nans(self) -> "Expr":
"""
Drop floating point NaN values
"""
return wrap_expr(self._pyexpr.drop_nans())
def cumsum(self, reverse: bool = False) -> "Expr":
"""
Get an array with the cumulative sum computed at every element.
Parameters
----------
reverse
Reverse the operation.
Notes
-----
Dtypes in {Int8, UInt8, Int16, UInt16} are cast to
Int64 before summing to prevent overflow issues.
"""
return wrap_expr(self._pyexpr.cumsum(reverse))
def cumprod(self, reverse: bool = False) -> "Expr":
"""
Get an array with the cumulative product computed at every element.
Parameters
----------
reverse
Reverse the operation.
Notes
-----
Dtypes in {Int8, UInt8, Int16, UInt16} are cast to
Int64 before summing to prevent overflow issues.
"""
return wrap_expr(self._pyexpr.cumprod(reverse))
def cummin(self, reverse: bool = False) -> "Expr":
"""
Get an array with the cumulative min computed at every element.
Parameters
----------
reverse
Reverse the operation.
"""
return wrap_expr(self._pyexpr.cummin(reverse))
def cummax(self, reverse: bool = False) -> "Expr":
"""
Get an array with the cumulative max computed at every element.
Parameters
----------
reverse
Reverse the operation.
"""
return wrap_expr(self._pyexpr.cummax(reverse))
def cumcount(self, reverse: bool = False) -> "Expr":
"""
Get an array with the cumulative count computed at every element.
Counting from 0 to len
Parameters
----------
reverse
Reverse the operation.
"""
return wrap_expr(self._pyexpr.cumcount(reverse))
def floor(self) -> "Expr":
"""
Floor underlying floating point array to the lowest integers smaller or equal to the float value.
Only works on floating point Series
"""
return wrap_expr(self._pyexpr.floor())
def ceil(self) -> "Expr":
"""
Ceil underlying floating point array to the heighest integers smaller or equal to the float value.
Only works on floating point Series
"""
return wrap_expr(self._pyexpr.ceil())
def round(self, decimals: int) -> "Expr":
"""
Round underlying floating point data by `decimals` digits.
Parameters
----------
decimals
Number of decimals to round by.
"""
return wrap_expr(self._pyexpr.round(decimals))
def dot(self, other: Union["Expr", str]) -> "Expr":
"""
Compute the dot/inner product between two Expressions
Parameters
----------
other
Expression to compute dot product with
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [1, 3, 5],
... "b": [2, 4, 6],
... }
... )
>>> df.select(pl.col("a").dot(pl.col("b")))
shape: (1, 1)
┌─────┐
│ a │
│ --- │
│ i64 │
╞═════╡
│ 44 │
└─────┘
"""
other = expr_to_lit_or_expr(other, str_to_lit=False)
return wrap_expr(self._pyexpr.dot(other._pyexpr))
def mode(self) -> "Expr":
"""
Compute the most occurring value(s). Can return multiple Values
"""
return wrap_expr(self._pyexpr.mode())
def cast(self, dtype: Union[Type[Any], DataType], strict: bool = True) -> "Expr":
"""
Cast between data types.
Parameters
----------
dtype
DataType to cast to
strict
Throw an error if a cast could not be done for instance due to an overflow
Examples
--------
>>> df = pl.DataFrame({"a": [1, 2, 3], "b": ["4", "5", "6"]})
>>> df.with_columns(
... [
... pl.col("a").cast(pl.Float64),
... pl.col("b").cast(pl.Int32),
... ]
... )
shape: (3, 2)
┌─────┬─────┐
│ a ┆ b │
│ --- ┆ --- │
│ f64 ┆ i32 │
╞═════╪═════╡
│ 1.0 ┆ 4 │
├╌╌╌╌╌┼╌╌╌╌╌┤
│ 2.0 ┆ 5 │
├╌╌╌╌╌┼╌╌╌╌╌┤
│ 3.0 ┆ 6 │
└─────┴─────┘
"""
dtype = py_type_to_dtype(dtype) # type: ignore
return wrap_expr(self._pyexpr.cast(dtype, strict))
def sort(self, reverse: bool = False, nulls_last: bool = False) -> "Expr":
"""
Sort this column. In projection/ selection context the whole column is sorted.
If used in a groupby context, the groups are sorted.
Parameters
----------
reverse
False -> order from small to large.
True -> order from large to small.
nulls_last
If True nulls are considered to be larger than any valid value
"""
return wrap_expr(self._pyexpr.sort_with(reverse, nulls_last))
def arg_sort(self, reverse: bool = False) -> "Expr":
"""
Get the index values that would sort this column.
Parameters
----------
reverse
False -> order from small to large.
True -> order from large to small.
Returns
-------
out
Series of type UInt32
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [20, 10, 30],
... }
... )
>>> df.select(pl.col("a").arg_sort())
shape: (3, 1)
┌─────┐
│ a │
│ --- │
│ u32 │
╞═════╡
│ 1 │
├╌╌╌╌╌┤
│ 0 │
├╌╌╌╌╌┤
│ 2 │
└─────┘
"""
return wrap_expr(self._pyexpr.arg_sort(reverse))
def arg_max(self) -> "Expr":
"""
Get the index of the maximal value.
"""
return wrap_expr(self._pyexpr.arg_max())
def arg_min(self) -> "Expr":
"""
Get the index of the minimal value.
"""
return wrap_expr(self._pyexpr.arg_min())
def sort_by(
self,
by: Union["Expr", str, List[Union["Expr", str]]],
reverse: Union[bool, List[bool]] = False,
) -> "Expr":
"""
Sort this column by the ordering of another column, or multiple other columns.
In projection/ selection context the whole column is sorted.
If used in a groupby context, the groups are sorted.
Parameters
----------
by
The column(s) used for sorting.
reverse
False -> order from small to large.
True -> order from large to small.
"""
if not isinstance(by, list):
by = [by]
if not isinstance(reverse, list):
reverse = [reverse]
by = selection_to_pyexpr_list(by)
return wrap_expr(self._pyexpr.sort_by(by, reverse))
def take(self, index: Union[List[int], "Expr", "pli.Series", np.ndarray]) -> "Expr":
"""
Take values by index.
Parameters
----------
index
An expression that leads to a UInt32 dtyped Series.
Returns
-------
Values taken by index
Examples
--------
>>> df = pl.DataFrame(
... {
... "group": [
... "one",
... "one",
... "one",
... "two",
... "two",
... "two",
... ],
... "value": [1, 98, 2, 3, 99, 4],
... }
... )
>>> df.groupby("group").agg(pl.col("value").take(1))
shape: (2, 2)
┌───────┬───────┐
│ group ┆ value │
│ --- ┆ --- │
│ str ┆ i64 │
╞═══════╪═══════╡
│ one ┆ 98 │
├╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ two ┆ 99 │
└───────┴───────┘
"""
if isinstance(index, (list, np.ndarray)):
index_lit = pli.lit(pli.Series("", index, dtype=UInt32))
else:
index_lit = pli.expr_to_lit_or_expr(index, str_to_lit=False)
return pli.wrap_expr(self._pyexpr.take(index_lit._pyexpr))
def shift(self, periods: int = 1) -> "Expr":
"""
Shift the values by a given period and fill the parts that will be empty due to this operation
with nulls.
Parameters
----------
periods
Number of places to shift (may be negative).
"""
return wrap_expr(self._pyexpr.shift(periods))
def shift_and_fill(
self, periods: int, fill_value: Union[int, float, bool, str, "Expr"]
) -> "Expr":
"""
Shift the values by a given period and fill the parts that will be empty due to this operation
with the result of the `fill_value` expression.
Parameters
----------
periods
Number of places to shift (may be negative).
fill_value
Fill None values with the result of this expression.
"""
fill_value = expr_to_lit_or_expr(fill_value, str_to_lit=True)
return wrap_expr(self._pyexpr.shift_and_fill(periods, fill_value._pyexpr))
def fill_null(self, fill_value: Union[int, float, bool, str, "Expr"]) -> "Expr":
"""
Fill null values using a filling strategy, literal, or Expr.
fill_value
One of:
- "backward"
- "forward"
- "min"
- "max"
- "mean"
- "one"
- "zero"
Or an expression.
Examples
--------
>>> df = pl.DataFrame({"a": [1, 2, None], "b": [4, None, 6]})
>>> df.fill_null("zero")
shape: (3, 2)
┌─────┬─────┐
│ a ┆ b │
│ --- ┆ --- │
│ i64 ┆ i64 │
╞═════╪═════╡
│ 1 ┆ 4 │
├╌╌╌╌╌┼╌╌╌╌╌┤
│ 2 ┆ 0 │
├╌╌╌╌╌┼╌╌╌╌╌┤
│ 0 ┆ 6 │
└─────┴─────┘
>>> df.fill_null(99)
shape: (3, 2)
┌─────┬─────┐
│ a ┆ b │
│ --- ┆ --- │
│ i64 ┆ i64 │
╞═════╪═════╡
│ 1 ┆ 4 │
├╌╌╌╌╌┼╌╌╌╌╌┤
│ 2 ┆ 99 │
├╌╌╌╌╌┼╌╌╌╌╌┤
│ 99 ┆ 6 │
└─────┴─────┘
"""
# we first must check if it is not an expr, as expr does not implement __bool__
# and thus leads to a value error in the second comparisson.
if not isinstance(fill_value, Expr) and fill_value in [
"backward",
"forward",
"min",
"max",
"mean",
"zero",
"one",
]:
return wrap_expr(self._pyexpr.fill_null_with_strategy(fill_value))
fill_value = expr_to_lit_or_expr(fill_value, str_to_lit=True)
return wrap_expr(self._pyexpr.fill_null(fill_value._pyexpr))
def fill_nan(self, fill_value: Union[str, int, float, bool, "Expr"]) -> "Expr":
"""
Fill floating point NaN value with a fill value
"""
fill_value = expr_to_lit_or_expr(fill_value, str_to_lit=True)
return wrap_expr(self._pyexpr.fill_nan(fill_value._pyexpr))
def forward_fill(self) -> "Expr":
"""
Fill missing values with the latest seen values
"""
return wrap_expr(self._pyexpr.forward_fill())
def backward_fill(self) -> "Expr":
"""
Fill missing values with the next to be seen values
"""
return wrap_expr(self._pyexpr.backward_fill())
def reverse(self) -> "Expr":
"""
Reverse the selection.
"""
return wrap_expr(self._pyexpr.reverse())
def std(self) -> "Expr":
"""
Get standard deviation.
"""
return wrap_expr(self._pyexpr.std())
def var(self) -> "Expr":
"""
Get variance.
"""
return wrap_expr(self._pyexpr.var())
def max(self) -> "Expr":
"""
Get maximum value.
"""
return wrap_expr(self._pyexpr.max())
def min(self) -> "Expr":
"""
Get minimum value.
"""
return wrap_expr(self._pyexpr.min())
def sum(self) -> "Expr":
"""
Get sum value.
Notes
-----
Dtypes in {Int8, UInt8, Int16, UInt16} are cast to
Int64 before summing to prevent overflow issues.
"""
return wrap_expr(self._pyexpr.sum())
def mean(self) -> "Expr":
"""
Get mean value.
"""
return wrap_expr(self._pyexpr.mean())
def median(self) -> "Expr":
"""
Get median value using linear interpolation.
"""
return wrap_expr(self._pyexpr.median())
def product(self) -> "Expr":
"""
Compute the product of an expression
"""
return wrap_expr(self._pyexpr.product())
def n_unique(self) -> "Expr":
"""Count unique values."""
return wrap_expr(self._pyexpr.n_unique())
def arg_unique(self) -> "Expr":
"""Get index of first unique value."""
return wrap_expr(self._pyexpr.arg_unique())
def unique(self, maintain_order: bool = False) -> "Expr":
"""
Get unique values of this expression.
Parameters
----------
maintain_order
Maintain order of data. This requires more work.
"""
if maintain_order:
return wrap_expr(self._pyexpr.unique_stable())
return wrap_expr(self._pyexpr.unique())
def first(self) -> "Expr":
"""
Get the first value.
"""
return wrap_expr(self._pyexpr.first())
def last(self) -> "Expr":
"""
Get the last value.
"""
return wrap_expr(self._pyexpr.last())
def list(self) -> "Expr":
"""
Aggregate to list.
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [1, 2, 3],
... "b": [4, 5, 6],
... }
... )
>>> df.select(pl.all().list())
shape: (1, 2)
┌────────────┬────────────┐
│ a ┆ b │
│ --- ┆ --- │
│ list [i64] ┆ list [i64] │
╞════════════╪════════════╡
│ [1, 2, 3] ┆ [4, 5, 6] │
└────────────┴────────────┘
"""
return wrap_expr(self._pyexpr.list())
def over(self, expr: Union[str, "Expr", List[Union["Expr", str]]]) -> "Expr":
"""
Apply window function over a subgroup.
This is similar to a groupby + aggregation + self join.
Or similar to [window functions in Postgres](https://www.postgresql.org/docs/9.1/tutorial-window.html)
Parameters
----------
expr
Column(s) to group by.
Examples
--------
>>> df = pl.DataFrame(
... {
... "groups": [1, 1, 2, 2, 1, 2, 3, 3, 1],
... "values": [1, 2, 3, 4, 5, 6, 7, 8, 8],
... }
... )
>>> (
... df.lazy()
... .select(
... [
... pl.col("groups").sum().over("groups"),
... ]
... )
... .collect()
... )
shape: (9, 1)
┌────────┐
│ groups │
│ --- │
│ i64 │
╞════════╡
│ 4 │
├╌╌╌╌╌╌╌╌┤
│ 4 │
├╌╌╌╌╌╌╌╌┤
│ 6 │
├╌╌╌╌╌╌╌╌┤
│ 6 │
├╌╌╌╌╌╌╌╌┤
│ ... │
├╌╌╌╌╌╌╌╌┤
│ 6 │
├╌╌╌╌╌╌╌╌┤
│ 6 │
├╌╌╌╌╌╌╌╌┤
│ 6 │
├╌╌╌╌╌╌╌╌┤
│ 4 │
└────────┘
"""
pyexprs = selection_to_pyexpr_list(expr)
return wrap_expr(self._pyexpr.over(pyexprs))
def is_unique(self) -> "Expr":
"""
Get mask of unique values.
"""
return wrap_expr(self._pyexpr.is_unique())
def is_first(self) -> "Expr":
"""
Get a mask of the first unique value.
Returns
-------
Boolean Series
Examples
--------
>>> df = pl.DataFrame(
... {
... "num": [1, 2, 3, 1, 5],
... }
... )
>>> df.with_column(pl.col("num").is_first().alias("is_first"))
shape: (5, 2)
┌─────┬──────────┐
│ num ┆ is_first │
│ --- ┆ --- │
│ i64 ┆ bool │
╞═════╪══════════╡
│ 1 ┆ true │
├╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┤
│ 2 ┆ true │
├╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┤
│ 3 ┆ true │
├╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┤
│ 1 ┆ false │
├╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┤
│ 5 ┆ true │
└─────┴──────────┘
"""
return wrap_expr(self._pyexpr.is_first())
def is_duplicated(self) -> "Expr":
"""
Get mask of duplicated values.
"""
return wrap_expr(self._pyexpr.is_duplicated())
def quantile(self, quantile: float, interpolation: str = "nearest") -> "Expr":
"""
Get quantile value.
Parameters
----------
quantile
quantile between 0.0 and 1.0
interpolation
interpolation type, options: ['nearest', 'higher', 'lower', 'midpoint', 'linear']
"""
return wrap_expr(self._pyexpr.quantile(quantile, interpolation))
def filter(self, predicate: "Expr") -> "Expr":
"""
Filter a single column.
Mostly useful in in aggregation context. If you want to filter on a DataFrame level, use `LazyFrame.filter`.
Parameters
----------
predicate
Boolean expression.
"""
return wrap_expr(self._pyexpr.filter(predicate._pyexpr))
def where(self, predicate: "Expr") -> "Expr":
"""
Alias for filter
Parameters
----------
predicate
Boolean expression.
"""
return self.filter(predicate)
def map(
self,
f: Callable[["pli.Series"], "pli.Series"],
return_dtype: Optional[Type[DataType]] = None,
agg_list: bool = False,
) -> "Expr":
"""
Apply a custom python function. This function must produce a `Series`. Any other value will be stored as
null/missing. If you want to apply a function over single values, consider using `apply`.
[read more in the book](https://pola-rs.github.io/polars-book/user-guide/howcani/apply/udfs.html)
Parameters
----------
f
Lambda/ function to apply.
return_dtype
Dtype of the output Series.
agg_list
"""
if return_dtype is not None:
return_dtype = py_type_to_dtype(return_dtype)
return wrap_expr(self._pyexpr.map(f, return_dtype, agg_list))
def apply(
self,
f: Union[Callable[["pli.Series"], "pli.Series"], Callable[[Any], Any]],
return_dtype: Optional[Type[DataType]] = None,
) -> "Expr":
"""
Apply a custom function in a GroupBy or Projection context.
Depending on the context it has the following behavior:
## Context
* Select/Project
expected type `f`: Callable[[Any], Any]
Applies a python function over each individual value in the column.
* GroupBy
expected type `f`: Callable[[Series], Series]
Applies a python function over each group.
Parameters
----------
f
Lambda/ function to apply.
return_dtype
Dtype of the output Series.
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [1, 2, 1, 1],
... "b": ["a", "b", "c", "c"],
... }
... )
>>> (
... df.lazy()
... .groupby("b", maintain_order=True)
... .agg(
... [
... pl.col("a").apply(lambda x: x.sum()),
... ]
... )
... .collect()
... )
shape: (3, 2)
┌─────┬─────┐
│ b ┆ a │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════╪═════╡
│ a ┆ 1 │
├╌╌╌╌╌┼╌╌╌╌╌┤
│ b ┆ 2 │
├╌╌╌╌╌┼╌╌╌╌╌┤
│ c ┆ 2 │
└─────┴─────┘
"""
# input x: Series of type list containing the group values
def wrap_f(x: "pli.Series") -> "pli.Series": # pragma: no cover
return x.apply(f, return_dtype=return_dtype)
return self.map(wrap_f, agg_list=True)
def flatten(self) -> "Expr":
"""
Alias for explode.
Explode a list or utf8 Series. This means that every item is expanded to a new row.
Returns
-------
Exploded Series of same dtype
"""
return wrap_expr(self._pyexpr.explode())
def explode(self) -> "Expr":
"""
Explode a list or utf8 Series. This means that every item is expanded to a new row.
Returns
-------
Exploded Series of same dtype
Examples
--------
>>> df = pl.DataFrame({"b": [[1, 2, 3], [4, 5, 6]]})
>>> df.select(pl.col("b").explode())
shape: (6, 1)
┌─────┐
│ b │
│ --- │
│ i64 │
╞═════╡
│ 1 │
├╌╌╌╌╌┤
│ 2 │
├╌╌╌╌╌┤
│ 3 │
├╌╌╌╌╌┤
│ 4 │
├╌╌╌╌╌┤
│ 5 │
├╌╌╌╌╌┤
│ 6 │
└─────┘
"""
return wrap_expr(self._pyexpr.explode())
def take_every(self, n: int) -> "Expr":
"""
Take every nth value in the Series and return as a new Series.
"""
return wrap_expr(self._pyexpr.take_every(n))
def head(self, n: Optional[Union[int, "Expr"]] = None) -> "Expr":
"""
Take the first n values.
"""
if isinstance(n, Expr):
return self.slice(0, n)
return wrap_expr(self._pyexpr.head(n))
def tail(self, n: Optional[int] = None) -> "Expr":
"""
Take the last n values.
"""
return wrap_expr(self._pyexpr.tail(n))
def pow(self, exponent: float) -> "Expr":
"""
Raise expression to the power of exponent.
"""
return wrap_expr(self._pyexpr.pow(exponent))
def is_in(self, other: Union["Expr", List[Any]]) -> "Expr":
"""
Check if elements of this Series are in the right Series, or List values of the right Series.
Parameters
----------
other
Series of primitive type or List type.
Returns
-------
Expr that evaluates to a Boolean Series.
Examples
--------
>>> df = pl.DataFrame(
... {"sets": [[1, 2, 3], [1, 2], [9, 10]], "optional_members": [1, 2, 3]}
... )
>>> df.select([pl.col("optional_members").is_in("sets").alias("contains")])
shape: (3, 1)
┌──────────┐
│ contains │
│ --- │
│ bool │
╞══════════╡
│ true │
├╌╌╌╌╌╌╌╌╌╌┤
│ true │
├╌╌╌╌╌╌╌╌╌╌┤
│ false │
└──────────┘
"""
if isinstance(other, list):
other = pli.lit(pli.Series(other))
else:
other = expr_to_lit_or_expr(other, str_to_lit=False)
return wrap_expr(self._pyexpr.is_in(other._pyexpr))
def repeat_by(self, by: Union["Expr", str]) -> "Expr":
"""
Repeat the elements in this Series `n` times by dictated by the number given by `by`.
The elements are expanded into a `List`
Parameters
----------
by
Numeric column that determines how often the values will be repeated.
The column will be coerced to UInt32. Give this dtype to make the coercion a no-op.
Returns
-------
Series of type List
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": ["x", "y", "z"],
... "n": [1, 2, 3],
... }
... )
>>> df.select(pl.col("a").repeat_by("n"))
shape: (3, 1)
┌─────────────────┐
│ a │
│ --- │
│ list [str] │
╞═════════════════╡
│ ["x"] │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤
│ ["y", "y"] │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤
│ ["z", "z", "z"] │
└─────────────────┘
"""
by = expr_to_lit_or_expr(by, False)
return wrap_expr(self._pyexpr.repeat_by(by._pyexpr))
def is_between(
self,
start: Union["Expr", datetime],
end: Union["Expr", datetime],
include_bounds: Union[bool, Sequence[bool]] = False,
) -> "Expr":
"""
Check if this expression is between start and end.
Parameters
----------
start
Lower bound as primitive type or datetime.
end
Upper bound as primitive type or datetime.
include_bounds
False: Exclude both start and end (default).
True: Include both start and end.
[False, False]: Exclude start and exclude end.
[True, True]: Include start and include end.
[False, True]: Exclude start and include end.
[True, False]: Include start and exclude end.
Returns
-------
Expr that evaluates to a Boolean Series.
Examples
--------
>>> df = pl.DataFrame(
... {
... "num": [1, 2, 3, 4, 5],
... }
... )
>>> df.with_column(pl.col("num").is_between(2, 4))
shape: (5, 2)
┌─────┬────────────┐
│ num ┆ is_between │
│ --- ┆ --- │
│ i64 ┆ bool │
╞═════╪════════════╡
│ 1 ┆ false │
├╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 2 ┆ false │
├╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 3 ┆ true │
├╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 4 ┆ false │
├╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 5 ┆ false │
└─────┴────────────┘
"""
cast_to_datetime = False
if isinstance(start, datetime):
start = pli.lit(start)
cast_to_datetime = True
if isinstance(end, datetime):
end = pli.lit(end)
cast_to_datetime = True
if cast_to_datetime:
expr = self.cast(Datetime)
else:
expr = self
if include_bounds is False or include_bounds == [False, False]:
return ((expr > start) & (expr < end)).alias("is_between")
elif include_bounds is True or include_bounds == [True, True]:
return ((expr >= start) & (expr <= end)).alias("is_between")
elif include_bounds == [False, True]:
return ((expr > start) & (expr <= end)).alias("is_between")
elif include_bounds == [True, False]:
return ((expr >= start) & (expr < end)).alias("is_between")
else:
raise ValueError(
"include_bounds should be a boolean or [boolean, boolean]."
)
def hash(self, k0: int = 0, k1: int = 1, k2: int = 2, k3: int = 3) -> "Expr":
"""
Hash the Series.
The hash value is of type `Datetime`
Parameters
----------
k0
seed parameter
k1
seed parameter
k2
seed parameter
k3
seed parameter
"""
return wrap_expr(self._pyexpr.hash(k0, k1, k2, k3))
def reinterpret(self, signed: bool) -> "Expr":
"""
Reinterpret the underlying bits as a signed/unsigned integer.
This operation is only allowed for 64bit integers. For lower bits integers,
you can safely use that cast operation.
Parameters
----------
signed
True -> pl.Int64
False -> pl.UInt64
"""
return wrap_expr(self._pyexpr.reinterpret(signed))
def inspect(self, fmt: str = "{}") -> "Expr":
"""
Prints the value that this expression evaluates to and passes on the value.
>>> df = pl.DataFrame({"foo": [1, 1, 2]})
>>> df.select(pl.col("foo").cumsum().inspect("value is: {}").alias("bar"))
value is: shape: (3,)
Series: 'foo' [i64]
[
1
2
4
]
shape: (3, 1)
┌─────┐
│ bar │
│ --- │
│ i64 │
╞═════╡
│ 1 │
├╌╌╌╌╌┤
│ 2 │
├╌╌╌╌╌┤
│ 4 │
└─────┘
"""
def inspect(s: "pli.Series") -> "pli.Series": # pragma: no cover
print(fmt.format(s))
return s
return self.map(inspect, return_dtype=None, agg_list=True)
def interpolate(self) -> "Expr":
"""
Interpolate intermediate values. The interpolation method is linear.
"""
return wrap_expr(self._pyexpr.interpolate())
def rolling_min(
self,
window_size: int,
weights: Optional[List[float]] = None,
min_periods: Optional[int] = None,
center: bool = False,
) -> "Expr":
"""
apply a rolling min (moving min) over the values in this array.
A window of length `window_size` will traverse the array. The values that fill this window
will (optionally) be multiplied with the weights given by the `weight` vector. The resulting
values will be aggregated to their sum.
Parameters
----------
window_size
The length of the window.
weights
An optional slice with the same length as the window that will be multiplied
elementwise with the values in the window.
min_periods
The number of values in the window that should be non-null before computing a result.
If None, it will be set equal to window size.
center
Set the labels at the center of the window
"""
if min_periods is None:
min_periods = window_size
return wrap_expr(
self._pyexpr.rolling_min(window_size, weights, min_periods, center)
)
def rolling_max(
self,
window_size: int,
weights: Optional[List[float]] = None,
min_periods: Optional[int] = None,
center: bool = False,
) -> "Expr":
"""
Apply a rolling max (moving max) over the values in this array.
A window of length `window_size` will traverse the array. The values that fill this window
will (optionally) be multiplied with the weights given by the `weight` vector. The resulting
values will be aggregated to their sum.
Parameters
----------
window_size
The length of the window.
weights
An optional slice with the same length as the window that will be multiplied
elementwise with the values in the window.
min_periods
The number of values in the window that should be non-null before computing a result.
If None, it will be set equal to window size.
center
Set the labels at the center of the window
"""
if min_periods is None:
min_periods = window_size
return wrap_expr(
self._pyexpr.rolling_max(window_size, weights, min_periods, center)
)
def rolling_mean(
self,
window_size: int,
weights: Optional[List[float]] = None,
min_periods: Optional[int] = None,
center: bool = False,
) -> "Expr":
"""
Apply a rolling mean (moving mean) over the values in this array.
A window of length `window_size` will traverse the array. The values that fill this window
will (optionally) be multiplied with the weights given by the `weight` vector. The resulting
values will be aggregated to their sum.
Parameters
----------
window_size
The length of the window.
weights
An optional slice with the same length as the window that will be multiplied
elementwise with the values in the window.
min_periods
The number of values in the window that should be non-null before computing a result.
If None, it will be set equal to window size.
center
Set the labels at the center of the window
Examples
--------
>>> df = pl.DataFrame({"A": [1.0, 8.0, 6.0, 2.0, 16.0, 10.0]})
>>> df.select(
... [
... pl.col("A").rolling_mean(window_size=2),
... ]
... )
shape: (6, 1)
┌──────┐
│ A │
│ --- │
│ f64 │
╞══════╡
│ null │
├╌╌╌╌╌╌┤
│ 4.5 │
├╌╌╌╌╌╌┤
│ 7 │
├╌╌╌╌╌╌┤
│ 4 │
├╌╌╌╌╌╌┤
│ 9 │
├╌╌╌╌╌╌┤
│ 13 │
└──────┘
"""
if min_periods is None:
min_periods = window_size
return wrap_expr(
self._pyexpr.rolling_mean(window_size, weights, min_periods, center)
)
def rolling_sum(
self,
window_size: int,
weights: Optional[List[float]] = None,
min_periods: Optional[int] = None,
center: bool = False,
) -> "Expr":
"""
Apply a rolling sum (moving sum) over the values in this array.
A window of length `window_size` will traverse the array. The values that fill this window
will (optionally) be multiplied with the weights given by the `weight` vector. The resulting
values will be aggregated to their sum.
Parameters
----------
window_size
The length of the window.
weights
An optional slice with the same length of the window that will be multiplied
elementwise with the values in the window.
min_periods
The number of values in the window that should be non-null before computing a result.
If None, it will be set equal to window size.
center
Set the labels at the center of the window
"""
if min_periods is None:
min_periods = window_size
return wrap_expr(
self._pyexpr.rolling_sum(window_size, weights, min_periods, center)
)
def rolling_std(
self,
window_size: int,
weights: Optional[List[float]] = None,
min_periods: Optional[int] = None,
center: bool = False,
) -> "Expr":
"""
Compute a rolling std dev
A window of length `window_size` will traverse the array. The values that fill this window
will (optionally) be multiplied with the weights given by the `weight` vector. The resulting
values will be aggregated to their sum.
Parameters
----------
window_size
The length of the window.
weights
An optional slice with the same length as the window that will be multiplied
elementwise with the values in the window.
min_periods
The number of values in the window that should be non-null before computing a result.
If None, it will be set equal to window size.
center
Set the labels at the center of the window
"""
if min_periods is None:
min_periods = window_size
return wrap_expr(
self._pyexpr.rolling_std(window_size, weights, min_periods, center)
)
def rolling_var(
self,
window_size: int,
weights: Optional[List[float]] = None,
min_periods: Optional[int] = None,
center: bool = False,
) -> "Expr":
"""
Compute a rolling variance.
A window of length `window_size` will traverse the array. The values that fill this window
will (optionally) be multiplied with the weights given by the `weight` vector. The resulting
values will be aggregated to their sum.
Parameters
----------
window_size
The length of the window.
weights
An optional slice with the same length as the window that will be multiplied
elementwise with the values in the window.
min_periods
The number of values in the window that should be non-null before computing a result.
If None, it will be set equal to window size.
center
Set the labels at the center of the window
"""
if min_periods is None:
min_periods = window_size
return wrap_expr(
self._pyexpr.rolling_var(window_size, weights, min_periods, center)
)
def rolling_apply(
self,
function: Callable[["pli.Series"], Any],
window_size: int,
weights: Optional[List[float]] = None,
min_periods: Optional[int] = None,
center: bool = False,
) -> "Expr":
"""
Allows a custom rolling window function.
Prefer the specific rolling window functions over this one, as they are faster.
Prefer:
* rolling_min
* rolling_max
* rolling_mean
* rolling_sum
Parameters
----------
function
Aggregation function
window_size
The length of the window.
weights
An optional slice with the same length as the window that will be multiplied
elementwise with the values in the window.
min_periods
The number of values in the window that should be non-null before computing a result.
If None, it will be set equal to window size.
center
Set the labels at the center of the window
Examples
--------
>>> df = pl.DataFrame(
... {
... "A": [1.0, 2.0, 9.0, 2.0, 13.0],
... }
... )
>>> df.select(
... [
... pl.col("A").rolling_apply(lambda s: s.std(), window_size=3),
... ]
... )
shape: (5, 1)
┌────────────────────┐
│ A │
│ --- │
│ f64 │
╞════════════════════╡
│ null │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤
│ null │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 4.358898943540674 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 4.041451884327381 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 5.5677643628300215 │
└────────────────────┘
"""
if min_periods is None:
min_periods = window_size
return wrap_expr(
self._pyexpr.rolling_apply(
function, window_size, weights, min_periods, center
)
)
def rolling_median(
self,
window_size: int,
weights: Optional[List[float]] = None,
min_periods: Optional[int] = None,
center: bool = False,
) -> "Expr":
"""
Compute a rolling median
Parameters
----------
window_size
Size of the rolling window
weights
An optional slice with the same length as the window that will be multiplied
elementwise with the values in the window.
min_periods
The number of values in the window that should be non-null before computing a result.
If None, it will be set equal to window size.
center
Set the labels at the center of the window
"""
if min_periods is None:
min_periods = window_size
return wrap_expr(
self._pyexpr.rolling_median(window_size, weights, min_periods, center)
)
def rolling_quantile(
self,
quantile: float,
interpolation: str = "nearest",
window_size: int = 2,
weights: Optional[List[float]] = None,
min_periods: Optional[int] = None,
center: bool = False,
) -> "Expr":
"""
Compute a rolling quantile
Parameters
----------
quantile
quantile to compute
interpolation
interpolation type, options: ['nearest', 'higher', 'lower', 'midpoint', 'linear']
window_size
The length of the window.
weights
An optional slice with the same length as the window that will be multiplied
elementwise with the values in the window.
min_periods
The number of values in the window that should be non-null before computing a result.
If None, it will be set equal to window size.
center
Set the labels at the center of the window
"""
if min_periods is None:
min_periods = window_size
return wrap_expr(
self._pyexpr.rolling_quantile(
quantile, interpolation, window_size, weights, min_periods, center
)
)
def rolling_skew(self, window_size: int, bias: bool = True) -> "Expr":
"""
Compute a rolling skew
Parameters
----------
window_size
Size of the rolling window
bias
If False, then the calculations are corrected for statistical bias.
"""
return wrap_expr(self._pyexpr.rolling_skew(window_size, bias))
def abs(self) -> "Expr":
"""
Take absolute values
"""
return wrap_expr(self._pyexpr.abs())
def argsort(self, reverse: bool = False) -> "Expr":
"""
alias for `arg_sort`
"""
return self.arg_sort(reverse)
def rank(self, method: str = "average", reverse: bool = False) -> "Expr":
"""
Assign ranks to data, dealing with ties appropriately.
Parameters
----------
method
{'average', 'min', 'max', 'dense', 'ordinal', 'random'}, optional
The method used to assign ranks to tied elements.
The following methods are available (default is 'average'):
- 'average': The average of the ranks that would have been assigned to
all the tied values is assigned to each value.
- 'min': The minimum of the ranks that would have been assigned to all
the tied values is assigned to each value. (This is also
referred to as "competition" ranking.)
- 'max': The maximum of the ranks that would have been assigned to all
the tied values is assigned to each value.
- 'dense': Like 'min', but the rank of the next highest element is
assigned the rank immediately after those assigned to the tied
elements.
- 'ordinal': All values are given a distinct rank, corresponding to
the order that the values occur in `a`.
- 'random': Like 'ordinal', but the rank for ties is not dependent
on the order that the values occur in `a`.
reverse
reverse the operation
Examples
--------
>>> df = pl.DataFrame({"a": [0, 1, 2, 2, 4]})
>>> df.select(pl.col("a").rank())
shape: (5, 1)
┌─────┐
│ a │
│ --- │
│ f32 │
╞═════╡
│ 1.0 │
├╌╌╌╌╌┤
│ 2.0 │
├╌╌╌╌╌┤
│ 3.5 │
├╌╌╌╌╌┤
│ 3.5 │
├╌╌╌╌╌┤
│ 5.0 │
└─────┘
"""
return wrap_expr(self._pyexpr.rank(method, reverse))
def diff(self, n: int = 1, null_behavior: str = "ignore") -> "Expr":
"""
Calculate the n-th discrete difference.
Parameters
----------
n
number of slots to shift
null_behavior
{'ignore', 'drop'}
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [20, 10, 30],
... }
... )
>>> df.select(pl.col("a").diff())
shape: (3, 1)
┌──────┐
│ a │
│ --- │
│ i64 │
╞══════╡
│ null │
├╌╌╌╌╌╌┤
│ -10 │
├╌╌╌╌╌╌┤
│ 20 │
└──────┘
"""
return wrap_expr(self._pyexpr.diff(n, null_behavior))
def pct_change(self, n: int = 1) -> "Expr":
"""
Percentage change (as fraction) between current element and most-recent
non-null element at least n period(s) before the current element.
Computes the change from the previous row by default.
Parameters
----------
n
periods to shift for forming percent change.
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [10, 11, 12, None, 12],
... }
... )
>>> df.with_column(pl.col("a").pct_change().alias("pct_change"))
shape: (5, 2)
┌──────┬────────────┐
│ a ┆ pct_change │
│ --- ┆ --- │
│ i64 ┆ f64 │
╞══════╪════════════╡
│ 10 ┆ null │
├╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 11 ┆ 0.1 │
├╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 12 ┆ 0.090909 │
├╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ null ┆ 0.0 │
├╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 12 ┆ 0.0 │
└──────┴────────────┘
"""
return wrap_expr(self._pyexpr.pct_change(n))
def skew(self, bias: bool = True) -> "Expr":
r"""Compute the sample skewness of a data set.
For normally distributed data, the skewness should be about zero. For
unimodal continuous distributions, a skewness value greater than zero means
that there is more weight in the right tail of the distribution. The
function `skewtest` can be used to determine if the skewness value
is close enough to zero, statistically speaking.
See scipy.stats for more information.
Parameters
----------
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
Notes
-----
The sample skewness is computed as the Fisher-Pearson coefficient
of skewness, i.e.
.. math:: g_1=\frac{m_3}{m_2^{3/2}}
where
.. math:: m_i=\frac{1}{N}\sum_{n=1}^N(x[n]-\bar{x})^i
is the biased sample :math:`i\texttt{th}` central moment, and
:math:`\bar{x}` is
the sample mean. If ``bias`` is False, the calculations are
corrected for bias and the value computed is the adjusted
Fisher-Pearson standardized moment coefficient, i.e.
.. math:: G_1=\frac{k_3}{k_2^{3/2}}= \frac{\sqrt{N(N-1)}}{N-2}\frac{m_3}{m_2^{3/2}}
"""
return wrap_expr(self._pyexpr.skew(bias))
def kurtosis(self, fisher: bool = True, bias: bool = True) -> "Expr":
"""Compute the kurtosis (Fisher or Pearson) of a dataset.
Kurtosis is the fourth central moment divided by the square of the
variance. If Fisher's definition is used, then 3.0 is subtracted from
the result to give 0.0 for a normal distribution.
If bias is False then the kurtosis is calculated using k statistics to
eliminate bias coming from biased moment estimators
See scipy.stats for more information
Parameters
----------
fisher : bool, optional
If True, Fisher's definition is used (normal ==> 0.0). If False,
Pearson's definition is used (normal ==> 3.0).
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
"""
return wrap_expr(self._pyexpr.kurtosis(fisher, bias))
def clip(self, min_val: Union[int, float], max_val: Union[int, float]) -> "Expr":
"""
Clip (limit) the values in an array to any value that fits in 64 floating poitns range.
Only works for the following dtypes: {Int32, Int64, Float32, Float64, UInt32}.
If you want to clip other dtypes, consider writing a when -> then -> otherwise expression
Parameters
----------
min_val
Minimum value.
max_val
Maximum value.
"""
return wrap_expr(self._pyexpr.clip(min_val, max_val))
def lower_bound(self) -> "Expr":
"""
Returns a unit Series with the lowest value possible for the dtype of this expression.
"""
return wrap_expr(self._pyexpr.lower_bound())
def upper_bound(self) -> "Expr":
"""
Returns a unit Series with the highest value possible for the dtype of this expression.
"""
return wrap_expr(self._pyexpr.upper_bound())
def sign(self) -> "Expr":
"""
Returns an element-wise indication of the sign of a number.
"""
return np.sign(self) # type: ignore
def sin(self) -> "Expr":
"""
Compute the element-wise value for Trigonometric sine on an array
Returns
-------
Series of dtype Float64
Examples
--------
>>> df = pl.DataFrame({"a": [0.0]})
>>> df.select(pl.col("a").sin())
shape: (1, 1)
┌─────┐
│ a │
│ --- │
│ f64 │
╞═════╡
│ 0.0 │
└─────┘
"""
return np.sin(self) # type: ignore
def cos(self) -> "Expr":
"""
Compute the element-wise value for Trigonometric cosine on an array
Returns
-------
Series of dtype Float64
Examples
--------
>>> df = pl.DataFrame({"a": [0.0]})
>>> df.select(pl.col("a").cos())
shape: (1, 1)
┌─────┐
│ a │
│ --- │
│ f64 │
╞═════╡
│ 1 │
└─────┘
"""
return np.cos(self) # type: ignore
def tan(self) -> "Expr":
"""
Compute the element-wise value for Trigonometric tangent on an array
Returns
-------
Series of dtype Float64
Examples
--------
>>> df = pl.DataFrame({"a": [1.0]})
>>> df.select(pl.col("a").tan().round(2))
shape: (1, 1)
┌──────┐
│ a │
│ --- │
│ f64 │
╞══════╡
│ 1.56 │
└──────┘
"""
return np.tan(self) # type: ignore
def arcsin(self) -> "Expr":
"""
Compute the element-wise value for Trigonometric sine on an array
Returns
-------
Series of dtype Float64
Examples
--------
>>> df = pl.DataFrame({"a": [1.0]})
>>> df.select(pl.col("a").arcsin())
shape: (1, 1)
┌────────────────────┐
│ a │
│ --- │
│ f64 │
╞════════════════════╡
│ 1.5707963267948966 │
└────────────────────┘
"""
return np.arcsin(self) # type: ignore
def arccos(self) -> "Expr":
"""
Compute the element-wise value for Trigonometric cosine on an array
Returns
-------
Series of dtype Float64
Examples
--------
>>> df = pl.DataFrame({"a": [0.0]})
>>> df.select(pl.col("a").arccos())
shape: (1, 1)
┌────────────────────┐
│ a │
│ --- │
│ f64 │
╞════════════════════╡
│ 1.5707963267948966 │
└────────────────────┘
"""
return np.arccos(self) # type: ignore
def arctan(self) -> "Expr":
"""
Compute the element-wise value for Trigonometric tangent on an array
Returns
-------
Series of dtype Float64
Examples
--------
>>> df = pl.DataFrame({"a": [1.0]})
>>> df.select(pl.col("a").arctan())
shape: (1, 1)
┌────────────────────┐
│ a │
│ --- │
│ f64 │
╞════════════════════╡
│ 0.7853981633974483 │
└────────────────────┘
"""
return np.arctan(self) # type: ignore
def reshape(self, dims: Tuple[int, ...]) -> "Expr":
"""
Reshape this Expr to a flat series, shape: (len,)
or a List series, shape: (rows, cols)
if a -1 is used in any of the dimensions, that dimension is inferred.
Parameters
----------
dims
Tuple of the dimension sizes
Returns
-------
Expr
"""
return wrap_expr(self._pyexpr.reshape(dims))
def shuffle(self, seed: Optional[int] = None) -> "Expr":
"""
Shuffle the contents of this expr.
Parameters
----------
seed
Seed initialization. If None given numpy is used.
"""
if seed is None:
seed = int(np.random.randint(0, 10000))
return wrap_expr(self._pyexpr.shuffle(seed))
def sample(
self,
fraction: float = 1.0,
with_replacement: bool = True,
seed: Optional[int] = None,
) -> "Expr":
"""
Sample a fraction of the `Series`.
Parameters
----------
fraction
Fraction 0.0 <= value <= 1.0
with_replacement
Allow values to be sampled more than once.
seed
Seed initialization. If None given a random seed is used.
"""
return wrap_expr(self._pyexpr.sample_frac(fraction, with_replacement, seed))
def ewm_mean(
self,
com: Optional[float] = None,
span: Optional[float] = None,
half_life: Optional[float] = None,
alpha: Optional[float] = None,
adjust: bool = True,
min_periods: int = 1,
) -> "Expr":
r"""
Exponential moving average.
Parameters
----------
com
Specify decay in terms of center of mass, :math:`alpha = 1/(1 + com) \;for\; com >= 0`.
span
Specify decay in terms of span, :math:`alpha = 2/(span + 1) \;for\; span >= 1`
half_life
Specify decay in terms of half-life, :math:`alpha = 1 - exp(-ln(2) / halflife) \;for\; halflife > 0`
alpha
Specify smoothing factor alpha directly, :math:`0 < alpha < 1`.
adjust
Divide by decaying adjustment factor in beginning periods to account for imbalance in relative weightings
- When adjust = True the EW function is calculated using weights :math:`w_i = (1 - alpha)^i`
- When adjust = False the EW function is calculated recursively.
min_periods
Minimum number of observations in window required to have a value (otherwise result is Null).
"""
alpha = _prepare_alpha(com, span, half_life, alpha)
return wrap_expr(self._pyexpr.ewm_mean(alpha, adjust, min_periods))
def ewm_std(
self,
com: Optional[float] = None,
span: Optional[float] = None,
half_life: Optional[float] = None,
alpha: Optional[float] = None,
adjust: bool = True,
min_periods: int = 1,
) -> "Expr":
r"""
Exponential moving standard deviation.
Parameters
----------
com
Specify decay in terms of center of mass, :math:`alpha = 1/(1 + com) \;for\; com >= 0`.
span
Specify decay in terms of span, :math:`alpha = 2/(span + 1) \;for\; span >= 1`
half_life
Specify decay in terms of half-life, :math:`alpha = 1 - exp(-ln(2) / halflife) \;for\; halflife > 0`
alpha
Specify smoothing factor alpha directly, :math:`0 < alpha < 1`.
adjust
Divide by decaying adjustment factor in beginning periods to account for imbalance in relative weightings
- When adjust = True the EW function is calculated using weights :math:`w_i = (1 - alpha)^i`
- When adjust = False the EW function is calculated recursively.
min_periods
Minimum number of observations in window required to have a value (otherwise result is Null).
"""
alpha = _prepare_alpha(com, span, half_life, alpha)
return wrap_expr(self._pyexpr.ewm_std(alpha, adjust, min_periods))
def ewm_var(
self,
com: Optional[float] = None,
span: Optional[float] = None,
half_life: Optional[float] = None,
alpha: Optional[float] = None,
adjust: bool = True,
min_periods: int = 1,
) -> "Expr":
r"""
Exponential moving standard deviation.
Parameters
----------
com
Specify decay in terms of center of mass, :math:`alpha = 1/(1 + com) \;for\; com >= 0`.
span
Specify decay in terms of span, :math:`alpha = 2/(span + 1) \;for\; span >= 1`
half_life
Specify decay in terms of half-life, :math:`alpha = 1 - exp(-ln(2) / halflife) \;for\; halflife > 0`
alpha
Specify smoothing factor alpha directly, :math:`0 < alpha < 1`.
adjust
Divide by decaying adjustment factor in beginning periods to account for imbalance in relative weightings
- When adjust = True the EW function is calculated using weights :math:`w_i = (1 - alpha)^i`
- When adjust = False the EW function is calculated recursively.
min_periods
Minimum number of observations in window required to have a value (otherwise result is Null).
"""
alpha = _prepare_alpha(com, span, half_life, alpha)
return wrap_expr(self._pyexpr.ewm_var(alpha, adjust, min_periods))
def extend_constant(
self, value: Optional[Union[int, float, str, bool]], n: int
) -> "Expr":
"""
Extend the Series with given number of values.
Parameters
----------
value
The value to extend the Series with. This value may be None to fill with nulls.
n
The number of values to extend.
Examples
--------
>>> s = pl.Series([1, 2, 3])
>>> s.extend_constant(99, n=2)
shape: (5,)
Series: '' [i64]
[
1
2
3
99
99
]
"""
return wrap_expr(self._pyexpr.extend_constant(value, n))
def value_counts(self, multithreaded: bool = False) -> "Expr":
"""
Count all unique values and create a struct mapping value to count
Parameters
----------
multithreaded:
Better to turn this off in the aggregation context, as it can lead to contention.
Returns
-------
Dtype Struct
Examples
--------
>>> df = pl.DataFrame(
... {
... "id": ["a", "b", "b", "c", "c", "c"],
... }
... )
>>> df.select(
... [
... pl.col("id").value_counts(),
... ]
... )
shape: (3, 1)
┌─────────────────────────────────────┐
│ id │
│ --- │
│ struct[2]{'id': str, 'counts': u32} │
╞═════════════════════════════════════╡
│ {"c",3} │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤
│ {"b",2} │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤
│ {"a",1} │
└─────────────────────────────────────┘
"""
return wrap_expr(self._pyexpr.value_counts(multithreaded))
def unique_counts(self) -> "Expr":
"""
Returns a count of the unique values in the order of appearance.
This method differs from `value_counts` in that it does not return the
values, only the counts and might be faster
Examples
--------
>>> df = pl.DataFrame(
... {
... "id": ["a", "b", "b", "c", "c", "c"],
... }
... )
>>> df.select(
... [
... pl.col("id").unique_counts(),
... ]
... )
shape: (3, 1)
┌─────┐
│ id │
│ --- │
│ u32 │
╞═════╡
│ 1 │
├╌╌╌╌╌┤
│ 2 │
├╌╌╌╌╌┤
│ 3 │
└─────┘
"""
return wrap_expr(self._pyexpr.unique_counts())
def log(self, base: float = math.e) -> "Expr":
"""
Compute the logarithm to a given base
Parameters
----------
base
Given base, defaults to `e`
"""
return wrap_expr(self._pyexpr.log(base))
def entropy(self, base: float = math.e) -> "Expr":
"""
Compute the entropy as `-sum(pk * log(pk)`.
where `pk` are discrete probabilities.
This routine will normalize pk if they don’t sum to 1.
Parameters
----------
base
Given base, defaults to `e`
"""
return wrap_expr(self._pyexpr.entropy(base))
# Below are the namespaces defined. Keep these at the end of the definition of Expr, as to not confuse mypy with
# the type annotation `str` with the namespace "str"
@property
def dt(self) -> "ExprDateTimeNameSpace":
"""
Create an object namespace of all datetime related methods.
"""
return ExprDateTimeNameSpace(self)
@property
def str(self) -> "ExprStringNameSpace":
"""
Create an object namespace of all string related methods.
"""
return ExprStringNameSpace(self)
@property
def arr(self) -> "ExprListNameSpace":
"""
Create an object namespace of all list related methods.
"""
return ExprListNameSpace(self)
@property
def cat(self) -> "ExprCatNameSpace":
"""
Create an object namespace of all categorical related methods.
"""
return ExprCatNameSpace(self)
@property
def struct(self) -> "ExprStructNameSpace":
"""
Create an object namespace of all struct related methods.
"""
return ExprStructNameSpace(self)
class ExprStructNameSpace:
"""
Namespace for struct related expressions
"""
def __init__(self, expr: Expr):
self._pyexpr = expr._pyexpr
def field(self, name: str) -> Expr:
"""
Retrieve one of the fields of this `Struct` as a new Series
Parameters
----------
name
Name of the field
Examples
--------
>>> df = (
... pl.DataFrame(
... {
... "int": [1, 2],
... "str": ["a", "b"],
... "bool": [True, None],
... "list": [[1, 2], [3]],
... }
... )
... .to_struct("my_struct")
... .to_frame()
... )
>>> df.select(pl.col("my_struct").struct.field("str"))
shape: (2, 1)
┌─────┐
│ str │
│ --- │
│ str │
╞═════╡
│ a │
├╌╌╌╌╌┤
│ b │
└─────┘
"""
return wrap_expr(self._pyexpr.struct_field_by_name(name))
def rename_fields(self, names: List[str]) -> Expr:
"""
Rename the fields of the struct
Parameters
----------
names
New names in the order of the struct's fields
Examples
--------
>>> df = (
... pl.DataFrame(
... {
... "int": [1, 2],
... "str": ["a", "b"],
... "bool": [True, None],
... "list": [[1, 2], [3]],
... }
... )
... .to_struct("my_struct")
... .to_frame()
... )
>>> df = df.with_column(
... pl.col("my_struct").struct.rename_fields(["INT", "STR", "BOOL", "LIST"])
... )
# does NOT work anymore:
# df.select(pl.col("my_struct").struct.field("int"))
# PanicException: int not found ^^^
>>> df.select(pl.col("my_struct").struct.field("INT"))
shape: (2, 1)
┌─────┐
│ INT │
│ --- │
│ i64 │
╞═════╡
│ 1 │
├╌╌╌╌╌┤
│ 2 │
└─────┘
"""
return wrap_expr(self._pyexpr.struct_rename_fields(names))
class ExprListNameSpace:
"""
Namespace for list related expressions
"""
def __init__(self, expr: Expr):
self._pyexpr = expr._pyexpr
def lengths(self) -> Expr:
"""
Get the length of the arrays as UInt32.
Examples
--------
>>> df = pl.DataFrame({"foo": [1, 2], "bar": [["a", "b"], ["c"]]})
>>> df.select(pl.col("bar").arr.lengths())
shape: (2, 1)
┌─────┐
│ bar │
│ --- │
│ u32 │
╞═════╡
│ 2 │
├╌╌╌╌╌┤
│ 1 │
└─────┘
"""
return wrap_expr(self._pyexpr.arr_lengths())
def sum(self) -> "Expr":
"""
Sum all the arrays in the list
"""
return wrap_expr(self._pyexpr.lst_sum())
def max(self) -> "Expr":
"""
Compute the max value of the arrays in the list
"""
return wrap_expr(self._pyexpr.lst_max())
def min(self) -> "Expr":
"""
Compute the min value of the arrays in the list
"""
return wrap_expr(self._pyexpr.lst_min())
def mean(self) -> "Expr":
"""
Compute the mean value of the arrays in the list
"""
return wrap_expr(self._pyexpr.lst_mean())
def sort(self, reverse: bool = False) -> "Expr":
"""
Sort the arrays in the list
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [[3, 2, 1], [9, 1, 2]],
... }
... )
>>> df.select(pl.col("a").arr.sort())
shape: (2, 1)
┌────────────┐
│ a │
│ --- │
│ list [i64] │
╞════════════╡
│ [1, 2, 3] │
├╌╌╌╌╌╌╌╌╌╌╌╌┤
│ [1, 2, 9] │
└────────────┘
"""
return wrap_expr(self._pyexpr.lst_sort(reverse))
def reverse(self) -> "Expr":
"""
Reverse the arrays in the list
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [[3, 2, 1], [9, 1, 2]],
... }
... )
>>> df.select(pl.col("a").arr.reverse())
shape: (2, 1)
┌────────────┐
│ a │
│ --- │
│ list [i64] │
╞════════════╡
│ [1, 2, 3] │
├╌╌╌╌╌╌╌╌╌╌╌╌┤
│ [2, 1, 9] │
└────────────┘
"""
return wrap_expr(self._pyexpr.lst_reverse())
def unique(self) -> "Expr":
"""
Get the unique/distinct values in the list
"""
return wrap_expr(self._pyexpr.lst_unique())
def concat(
self, other: Union[List[Union[Expr, str]], Expr, str, "pli.Series", List[Any]]
) -> "Expr":
"""
Concat the arrays in a Series dtype List in linear time.
Parameters
----------
other
Columns to concat into a List Series
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [["a"], ["x"]],
... "b": [["b", "c"], ["y", "z"]],
... }
... )
>>> df.select(pl.col("a").arr.concat("b"))
shape: (2, 1)
┌─────────────────┐
│ a │
│ --- │
│ list [str] │
╞═════════════════╡
│ ["a", "b", "c"] │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤
│ ["x", "y", "z"] │
└─────────────────┘
"""
if isinstance(other, list) and (
not isinstance(other[0], (Expr, str, pli.Series))
):
return self.concat(pli.Series([other]))
other_list: List[Union[Expr, str, "pli.Series"]]
if not isinstance(other, list):
other_list = [other]
else:
other_list = copy.copy(other) # type: ignore
other_list.insert(0, wrap_expr(self._pyexpr))
return pli.concat_list(other_list)
def get(self, index: int) -> "Expr":
"""
Get the value by index in the sublists.
So index `0` would return the first item of every sublist
and index `-1` would return the last item of every sublist
if an index is out of bounds, it will return a `None`.
Parameters
----------
index
Index to return per sublist
Examples
--------
>>> df = pl.DataFrame({"foo": [[3, 2, 1], [], [1, 2]]})
>>> df.select(pl.col("foo").arr.get(0))
shape: (3, 1)
┌──────┐
│ foo │
│ --- │
│ i64 │
╞══════╡
│ 3 │
├╌╌╌╌╌╌┤
│ null │
├╌╌╌╌╌╌┤
│ 1 │
└──────┘
"""
return wrap_expr(self._pyexpr.lst_get(index))
def first(self) -> "Expr":
"""
Get the first value of the sublists.
Examples
--------
>>> df = pl.DataFrame({"foo": [[3, 2, 1], [], [1, 2]]})
>>> df.select(pl.col("foo").arr.first())
shape: (3, 1)
┌──────┐
│ foo │
│ --- │
│ i64 │
╞══════╡
│ 3 │
├╌╌╌╌╌╌┤
│ null │
├╌╌╌╌╌╌┤
│ 1 │
└──────┘
"""
return self.get(0)
def last(self) -> "Expr":
"""
Get the last value of the sublists.
Examples
--------
>>> df = pl.DataFrame({"foo": [[3, 2, 1], [], [1, 2]]})
>>> df.select(pl.col("foo").arr.last())
shape: (3, 1)
┌──────┐
│ foo │
│ --- │
│ i64 │
╞══════╡
│ 1 │
├╌╌╌╌╌╌┤
│ null │
├╌╌╌╌╌╌┤
│ 2 │
└──────┘
"""
return self.get(-1)
def contains(self, item: Union[float, str, bool, int, date, datetime]) -> "Expr":
"""
Check if sublists contain the given item.
Parameters
----------
item
Item that will be checked for membership
Returns
-------
Boolean mask
Examples
--------
>>> df = pl.DataFrame({"foo": [[3, 2, 1], [], [1, 2]]})
>>> df.select(pl.col("foo").arr.contains(1))
shape: (3, 1)
┌───────┐
│ foo │
│ --- │
│ bool │
╞═══════╡
│ true │
├╌╌╌╌╌╌╌┤
│ false │
├╌╌╌╌╌╌╌┤
│ true │
└───────┘
"""
return wrap_expr(self._pyexpr).map(lambda s: s.arr.contains(item))
def join(self, separator: str) -> "Expr":
"""
Join all string items in a sublist and place a separator between them.
This errors if inner type of list `!= Utf8`.
Parameters
----------
separator
string to separate the items with
Returns
-------
Series of dtype Utf8
Examples
--------
>>> df = pl.DataFrame({"s": [["a", "b", "c"], ["x", "y"]]})
>>> df.select(pl.col("s").arr.join(" "))
shape: (2, 1)
┌───────┐
│ s │
│ --- │
│ str │
╞═══════╡
│ a b c │
├╌╌╌╌╌╌╌┤
│ x y │
└───────┘
"""
return wrap_expr(self._pyexpr.lst_join(separator))
def arg_min(self) -> "Expr":
"""
Retrieve the index of the minimal value in every sublist
Returns
-------
Series of dtype UInt32/UInt64 (depending on compilation)
"""
return wrap_expr(self._pyexpr.lst_arg_min())
def arg_max(self) -> "Expr":
"""
Retrieve the index of the maximum value in every sublist
Returns
-------
Series of dtype UInt32/UInt64 (depending on compilation)
"""
return wrap_expr(self._pyexpr.lst_arg_max())
def diff(self, n: int = 1, null_behavior: str = "ignore") -> "Expr":
"""
Calculate the n-th discrete difference of every sublist.
Parameters
----------
n
number of slots to shift
null_behavior
{'ignore', 'drop'}
Examples
--------
>>> s = pl.Series("a", [[1, 2, 3, 4], [10, 2, 1]])
>>> s.arr.diff()
shape: (2,)
Series: 'a' [list]
[
[null, 1, ... 1]
[null, -8, -1]
]
"""
return wrap_expr(self._pyexpr.lst_diff(n, null_behavior))
def shift(self, periods: int = 1) -> "Expr":
"""
Shift the values by a given period and fill the parts that will be empty due to this operation
with nulls.
Parameters
----------
periods
Number of places to shift (may be negative).
Examples
--------
>>> s = pl.Series("a", [[1, 2, 3, 4], [10, 2, 1]])
>>> s.arr.shift()
shape: (2,)
Series: 'a' [list]
[
[null, 1, ... 3]
[null, 10, 2]
]
"""
return wrap_expr(self._pyexpr.lst_shift(periods))
def slice(self, offset: int, length: int) -> "Expr":
"""
Slice every sublist
Parameters
----------
offset
Take the values from this index offset
length
The length of the slice to take
Examples
--------
>>> s = pl.Series("a", [[1, 2, 3, 4], [10, 2, 1]])
>>> s.arr.slice(1, 2)
shape: (2,)
Series: 'a' [list]
[
[2, 3]
[2, 1]
]
"""
return wrap_expr(self._pyexpr.lst_slice(offset, length))
def head(self, n: int = 5) -> "Expr":
"""
Slice the head of every sublist
Parameters
----------
n
How many values to take in the slice.
Examples
--------
>>> s = pl.Series("a", [[1, 2, 3, 4], [10, 2, 1]])
>>> s.arr.head(2)
shape: (2,)
Series: 'a' [list]
[
[1, 2]
[10, 2]
]
"""
return self.slice(0, n)
def tail(self, n: int = 5) -> "Expr":
"""
Slice the tail of every sublist
Parameters
----------
n
How many values to take in the slice.
Examples
--------
>>> s = pl.Series("a", [[1, 2, 3, 4], [10, 2, 1]])
>>> s.arr.tail(2)
shape: (2,)
Series: 'a' [list]
[
[3, 4]
[2, 1]
]
"""
return self.slice(-n, n)
def eval(self, expr: "Expr", parallel: bool = False) -> "Expr":
"""
Run any polars expression against the lists' elements
Parameters
----------
expr
Expression to run. Note that you can select an element with `pl.first()`, or `pl.col()`
parallel
Run all expression parallel. Don't activate this blindly.
Parallelism is worth it if there is enough work to do per thread.
This likely should not be use in the groupby context, because we already parallel execution per group
Examples
--------
>>> df = pl.DataFrame({"a": [1, 8, 3], "b": [4, 5, 2]})
>>> df.with_column(
... pl.concat_list(["a", "b"]).arr.eval(pl.first().rank()).alias("rank")
... )
shape: (3, 3)
┌─────┬─────┬────────────┐
│ a ┆ b ┆ rank │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ list [f32] │
╞═════╪═════╪════════════╡
│ 1 ┆ 4 ┆ [1.0, 2.0] │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 8 ┆ 5 ┆ [2.0, 1.0] │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 3 ┆ 2 ┆ [2.0, 1.0] │
└─────┴─────┴────────────┘
"""
return wrap_expr(self._pyexpr.lst_eval(expr._pyexpr, parallel))
class ExprStringNameSpace:
"""
Namespace for string related expressions
"""
def __init__(self, expr: Expr):
self._pyexpr = expr._pyexpr
def strptime(
self,
datatype: Union[Type[Date], Type[Datetime]],
fmt: Optional[str] = None,
strict: bool = True,
exact: bool = True,
) -> Expr:
"""
Parse utf8 expression as a Date/Datetimetype.
Parameters
----------
datatype
Date | Datetime.
fmt
format to use, see the following link for examples:
https://docs.rs/chrono/latest/chrono/format/strftime/index.html
example: "%y-%m-%d".
strict
raise an error if any conversion fails
exact
- If True, require an exact format match.
- If False, allow the format to match anywhere in the target string.
Examples
--------
Dealing with different formats.
>>> s = pl.Series(
... "date",
... [
... "2021-04-22",
... "2022-01-04 00:00:00",
... "01/31/22",
... "Sun Jul 8 00:34:60 2001",
... ],
... )
>>> (
... s.to_frame().with_column(
... pl.col("date")
... .str.strptime(pl.Date, "%F", strict=False)
... .fill_null(
... pl.col("date").str.strptime(pl.Date, "%F %T", strict=False)
... )
... .fill_null(pl.col("date").str.strptime(pl.Date, "%D", strict=False))
... .fill_null(pl.col("date").str.strptime(pl.Date, "%c", strict=False))
... )
... )
shape: (4, 1)
┌────────────┐
│ date │
│ --- │
│ date │
╞════════════╡
│ 2021-04-22 │
├╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 2022-01-04 │
├╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 2022-01-31 │
├╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 2001-07-08 │
└────────────┘
"""
if not issubclass(datatype, DataType):
raise ValueError(
f"expected: {DataType} got: {datatype}"
) # pragma: no cover
if datatype == Date:
return wrap_expr(self._pyexpr.str_parse_date(fmt, strict, exact))
elif datatype == Datetime:
return wrap_expr(self._pyexpr.str_parse_datetime(fmt, strict, exact))
else:
raise ValueError(
"dtype should be of type {Date, Datetime}"
) # pragma: no cover
def lengths(self) -> Expr:
"""
Get the length of the Strings as UInt32.
"""
return wrap_expr(self._pyexpr.str_lengths())
def concat(self, delimiter: str = "-") -> "Expr":
"""
Vertically concat the values in the Series to a single string value.
Returns
-------
Series of dtype Utf8
Examples
--------
>>> df = pl.DataFrame({"foo": [1, None, 2]})
>>> df = df.select(pl.col("foo").str.concat("-"))
>>> df
shape: (1, 1)
┌──────────┐
│ foo │
│ --- │
│ str │
╞══════════╡
│ 1-null-2 │
└──────────┘
"""
return wrap_expr(self._pyexpr.str_concat(delimiter))
def to_uppercase(self) -> Expr:
"""
Transform to uppercase variant.
"""
return wrap_expr(self._pyexpr.str_to_uppercase())
def to_lowercase(self) -> Expr:
"""
Transform to lowercase variant.
"""
return wrap_expr(self._pyexpr.str_to_lowercase())
def strip(self) -> Expr:
"""
Remove leading and trailing whitespace.
"""
return wrap_expr(self._pyexpr.str_strip())
def lstrip(self) -> Expr:
"""
Remove leading whitespace.
"""
return wrap_expr(self._pyexpr.str_lstrip())
def rstrip(self) -> Expr:
"""
Remove trailing whitespace.
"""
return wrap_expr(self._pyexpr.str_rstrip())
def contains(self, pattern: str) -> Expr:
"""
Check if string contains regex.
Parameters
----------
pattern
Regex pattern.
"""
return wrap_expr(self._pyexpr.str_contains(pattern))
def json_path_match(self, json_path: str) -> Expr:
"""
Extract the first match of json string with provided JSONPath expression.
Throw errors if encounter invalid json strings.
All return value will be casted to Utf8 regardless of the original value.
Documentation on JSONPath standard: https://goessner.net/articles/JsonPath/
Parameters
----------
json_path
A valid JSON path query string
Returns
-------
Utf8 array. Contain null if original value is null or the json_path return nothing.
Examples
--------
>>> df = pl.DataFrame(
... {"json_val": ['{"a":"1"}', None, '{"a":2}', '{"a":2.1}', '{"a":true}']}
... )
>>> df.select(pl.col("json_val").str.json_path_match("$.a"))
shape: (5, 1)
┌──────────┐
│ json_val │
│ --- │
│ str │
╞══════════╡
│ 1 │
├╌╌╌╌╌╌╌╌╌╌┤
│ null │
├╌╌╌╌╌╌╌╌╌╌┤
│ 2 │
├╌╌╌╌╌╌╌╌╌╌┤
│ 2.1 │
├╌╌╌╌╌╌╌╌╌╌┤
│ true │
└──────────┘
"""
return wrap_expr(self._pyexpr.str_json_path_match(json_path))
def decode(self, encoding: str, strict: bool = False) -> Expr:
"""
Decodes a value using the provided encoding
Parameters
----------
encoding
'hex' or 'base64'
strict
how to handle invalid inputs
- True: method will throw error if unable to decode a value
- False: unhandled values will be replaced with `None`
Examples
--------
>>> df = pl.DataFrame({"encoded": ["666f6f", "626172", None]})
>>> df.select(pl.col("encoded").str.decode("hex"))
shape: (3, 1)
┌─────────┐
│ encoded │
│ --- │
│ str │
╞═════════╡
│ foo │
├╌╌╌╌╌╌╌╌╌┤
│ bar │
├╌╌╌╌╌╌╌╌╌┤
│ null │
└─────────┘
"""
if encoding == "hex":
return wrap_expr(self._pyexpr.str_hex_decode(strict))
elif encoding == "base64":
return wrap_expr(self._pyexpr.str_base64_decode(strict))
else:
raise ValueError("supported encodings are 'hex' and 'base64'")
def encode(self, encoding: str) -> Expr:
"""
Encodes a value using the provided encoding
Parameters
----------
encoding
'hex' or 'base64'
Returns
-------
Utf8 array with values encoded using provided encoding
Examples
--------
>>> df = pl.DataFrame({"strings": ["foo", "bar", None]})
>>> df.select(pl.col("strings").str.encode("hex"))
shape: (3, 1)
┌─────────┐
│ strings │
│ --- │
│ str │
╞═════════╡
│ 666f6f │
├╌╌╌╌╌╌╌╌╌┤
│ 626172 │
├╌╌╌╌╌╌╌╌╌┤
│ null │
└─────────┘
"""
if encoding == "hex":
return wrap_expr(self._pyexpr.str_hex_encode())
elif encoding == "base64":
return wrap_expr(self._pyexpr.str_base64_encode())
else:
raise ValueError("supported encodings are 'hex' and 'base64'")
def extract(self, pattern: str, group_index: int = 1) -> Expr:
r"""
Extract the target capture group from provided patterns.
Parameters
----------
pattern
A valid regex pattern
group_index
Index of the targeted capture group.
Group 0 mean the whole pattern, first group begin at index 1
Default to the first capture group
Returns
-------
Utf8 array. Contain null if original value is null or regex capture nothing.
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [
... "http://vote.com/ballon_dor?candidate=messi&ref=polars",
... "http://vote.com/ballon_dor?candidat=jorginho&ref=polars",
... "http://vote.com/ballon_dor?candidate=ronaldo&ref=polars",
... ]
... }
... )
>>> df.select(
... [
... pl.col("a").str.extract(r"candidate=(\w+)", 1),
... ]
... )
shape: (3, 1)
┌─────────┐
│ a │
│ --- │
│ str │
╞═════════╡
│ messi │
├╌╌╌╌╌╌╌╌╌┤
│ null │
├╌╌╌╌╌╌╌╌╌┤
│ ronaldo │
└─────────┘
"""
return wrap_expr(self._pyexpr.str_extract(pattern, group_index))
def split(self, by: str, inclusive: bool = False) -> Expr:
"""
Split the string by a substring.
The return type will by of type List<Utf8>
Parameters
----------
by
substring
inclusive
Include the split character/string in the results
Examples
--------
>>> df = pl.DataFrame({"s": ["foo bar", "foo-bar", "foo bar baz"]})
>>> df.select(pl.col("s").str.split(by=" "))
shape: (3, 1)
┌───────────────────────┐
│ s │
│ --- │
│ list [str] │
╞═══════════════════════╡
│ ["foo", "bar"] │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤
│ ["foo-bar"] │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤
│ ["foo", "bar", "baz"] │
└───────────────────────┘
"""
if inclusive:
return wrap_expr(self._pyexpr.str_split_inclusive(by))
return wrap_expr(self._pyexpr.str_split(by))
def split_exact(self, by: str, n: int, inclusive: bool = False) -> Expr:
"""
Split the string by a substring into a struct of `n` fields.
The return type will by of type Struct<Utf8>
If it cannot make `n` splits, the remaiming field elements will be null
Parameters
----------
by
substring
n
Number of splits to make
inclusive
Include the split character/string in the results
Examples
--------
>>> (
... pl.DataFrame({"x": ["a_1", None, "c", "d_4"]}).select(
... [
... pl.col("x").str.split_exact("_", 1).alias("fields"),
... ]
... )
... )
shape: (4, 1)
┌───────────────────────────────────────────┐
│ fields │
│ --- │
│ struct[2]{'field_0': str, 'field_1': str} │
╞═══════════════════════════════════════════╡
│ {"a","1"} │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤
│ {null,null} │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤
│ {"c",null} │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤
│ {"d","4"} │
└───────────────────────────────────────────┘
"""
if inclusive:
return wrap_expr(self._pyexpr.str_split_exact_inclusive(by, n))
return wrap_expr(self._pyexpr.str_split_exact(by, n))
def replace(self, pattern: str, value: str) -> Expr:
"""
Replace first regex match with a string value.
Parameters
----------
pattern
Regex pattern.
value
Replacement string.
"""
return wrap_expr(self._pyexpr.str_replace(pattern, value))
def replace_all(self, pattern: str, value: str) -> Expr:
"""
Replace substring on all regex pattern matches.
Parameters
----------
pattern
Regex pattern.
value
Replacement string.
"""
return wrap_expr(self._pyexpr.str_replace_all(pattern, value))
def slice(self, start: int, length: Optional[int] = None) -> Expr:
"""
Create subslices of the string values of a Utf8 Series.
Parameters
----------
start
Start of the slice (negative indexing may be used).
length
Optional length of the slice.
Returns
-------
Series of Utf8 type
"""
return wrap_expr(self._pyexpr.str_slice(start, length))
class ExprDateTimeNameSpace:
"""
Namespace for datetime related expressions.
"""
def __init__(self, expr: Expr):
self._pyexpr = expr._pyexpr
def truncate(
self,
every: Union[str, timedelta],
offset: Optional[Union[str, timedelta]] = None,
) -> Expr:
"""
.. warning::
This API is experimental and may change without it being considered a breaking change.
Divide the date/ datetime range into buckets.
Data must be sorted, if not the output does not make sense.
The `every` and `offset` arguments are created with
the following string language:
1ns # 1 nanosecond
1us # 1 microsecond
1ms # 1 millisecond
1s # 1 second
1m # 1 minute
1h # 1 hour
1d # 1 day
1w # 1 week
1mo # 1 calendar month
1y # 1 calendar year
3d12h4m25s # 3 days, 12 hours, 4 minutes, and 25 seconds
Parameters
----------
every
Every interval start and period length
offset
Offset the window
Returns
-------
Date/Datetime series
Examples
--------
>>> from datetime import timedelta, datetime
>>> start = datetime(2001, 1, 1)
>>> stop = datetime(2001, 1, 2)
>>> s = pl.date_range(start, stop, timedelta(minutes=30), name="dates")
>>> s
shape: (49,)
Series: 'dates' [datetime[ns]]
[
2001-01-01 00:00:00
2001-01-01 00:30:00
2001-01-01 01:00:00
2001-01-01 01:30:00
2001-01-01 02:00:00
2001-01-01 02:30:00
2001-01-01 03:00:00
2001-01-01 03:30:00
2001-01-01 04:00:00
2001-01-01 04:30:00
2001-01-01 05:00:00
2001-01-01 05:30:00
...
2001-01-01 18:30:00
2001-01-01 19:00:00
2001-01-01 19:30:00
2001-01-01 20:00:00
2001-01-01 20:30:00
2001-01-01 21:00:00
2001-01-01 21:30:00
2001-01-01 22:00:00
2001-01-01 22:30:00
2001-01-01 23:00:00
2001-01-01 23:30:00
2001-01-02 00:00:00
]
>>> s.dt.truncate("1h")
shape: (49,)
Series: 'dates' [datetime[ns]]
[
2001-01-01 00:00:00
2001-01-01 00:00:00
2001-01-01 01:00:00
2001-01-01 01:00:00
2001-01-01 02:00:00
2001-01-01 02:00:00
2001-01-01 03:00:00
2001-01-01 03:00:00
2001-01-01 04:00:00
2001-01-01 04:00:00
2001-01-01 05:00:00
2001-01-01 05:00:00
...
2001-01-01 18:00:00
2001-01-01 19:00:00
2001-01-01 19:00:00
2001-01-01 20:00:00
2001-01-01 20:00:00
2001-01-01 21:00:00
2001-01-01 21:00:00
2001-01-01 22:00:00
2001-01-01 22:00:00
2001-01-01 23:00:00
2001-01-01 23:00:00
2001-01-02 00:00:00
]
>>> assert s.dt.truncate("1h") == s.dt.truncate(timedelta(hours=1))
"""
if offset is None:
offset = "0ns"
if isinstance(every, timedelta):
every = _timedelta_to_pl_duration(every)
if isinstance(offset, timedelta):
offset = _timedelta_to_pl_duration(offset)
return wrap_expr(self._pyexpr.date_truncate(every, offset))
def strftime(self, fmt: str) -> Expr:
"""
Format Date/datetime with a formatting rule: See [chrono strftime/strptime](https://docs.rs/chrono/0.4.19/chrono/format/strftime/index.html).
"""
return wrap_expr(self._pyexpr.strftime(fmt))
def year(self) -> Expr:
"""
Extract year from underlying Date representation.
Can be performed on Date and Datetime.
Returns the year number in the calendar date.
Returns
-------
Year as Int32
"""
return wrap_expr(self._pyexpr.year())
def month(self) -> Expr:
"""
Extract month from underlying Date representation.
Can be performed on Date and Datetime.
Returns the month number starting from 1.
The return value ranges from 1 to 12.
Returns
-------
Month as UInt32
"""
return wrap_expr(self._pyexpr.month())
def week(self) -> Expr:
"""
Extract the week from the underlying Date representation.
Can be performed on Date and Datetime
Returns the ISO week number starting from 1.
The return value ranges from 1 to 53. (The last week of year differs by years.)
Returns
-------
Week number as UInt32
"""
return wrap_expr(self._pyexpr.week())
def weekday(self) -> Expr:
"""
Extract the week day from the underlying Date representation.
Can be performed on Date and Datetime.
Returns the weekday number where monday = 0 and sunday = 6
Returns
-------
Week day as UInt32
"""
return wrap_expr(self._pyexpr.weekday())
def day(self) -> Expr:
"""
Extract day from underlying Date representation.
Can be performed on Date and Datetime.
Returns the day of month starting from 1.
The return value ranges from 1 to 31. (The last day of month differs by months.)
Returns
-------
Day as UInt32
"""
return wrap_expr(self._pyexpr.day())
def ordinal_day(self) -> Expr:
"""
Extract ordinal day from underlying Date representation.
Can be performed on Date and Datetime.
Returns the day of year starting from 1.
The return value ranges from 1 to 366. (The last day of year differs by years.)
Returns
-------
Day as UInt32
"""
return wrap_expr(self._pyexpr.ordinal_day())
def hour(self) -> Expr:
"""
Extract hour from underlying DateTime representation.
Can be performed on Datetime.
Returns the hour number from 0 to 23.
Returns
-------
Hour as UInt32
"""
return wrap_expr(self._pyexpr.hour())
def minute(self) -> Expr:
"""
Extract minutes from underlying DateTime representation.
Can be performed on Datetime.
Returns the minute number from 0 to 59.
Returns
-------
Minute as UInt32
"""
return wrap_expr(self._pyexpr.minute())
def second(self) -> Expr:
"""
Extract seconds from underlying DateTime representation.
Can be performed on Datetime.
Returns the second number from 0 to 59.
Returns
-------
Second as UInt32
"""
return wrap_expr(self._pyexpr.second())
def nanosecond(self) -> Expr:
"""
Extract seconds from underlying DateTime representation.
Can be performed on Datetime.
Returns the number of nanoseconds since the whole non-leap second.
The range from 1,000,000,000 to 1,999,999,999 represents the leap second.
Returns
-------
Nanosecond as UInt32
"""
return wrap_expr(self._pyexpr.nanosecond())
def to_python_datetime(self) -> Expr:
"""
Go from Date/Datetime to python DateTime objects
"""
return wrap_expr(self._pyexpr).map(
lambda s: s.dt.to_python_datetime(), return_dtype=Object
)
def epoch(self, tu: str = "us") -> Expr:
"""
Get the time passed since the Unix EPOCH in the give time unit
Parameters
----------
tu
One of {'ns', 'us', 'ms', 's', 'd'}
"""
if tu in ["ns", "us", "ms"]:
return self.timestamp(tu)
if tu == "s":
return wrap_expr(self._pyexpr.dt_epoch_seconds())
if tu == "d":
return wrap_expr(self._pyexpr).cast(Date).cast(Int32)
else:
raise ValueError(f"time unit {tu} not understood")
def epoch_days(self) -> Expr:
"""
Get the number of days since the unix EPOCH.
If the date is before the unix EPOCH, the number of days will be negative.
.. deprecated:: 0.13.9
Use :func:`epoch` instead.
Returns
-------
Days as Int32
"""
return wrap_expr(self._pyexpr).cast(Date).cast(Int32)
def epoch_milliseconds(self) -> Expr:
"""
Get the number of milliseconds since the unix EPOCH
If the date is before the unix EPOCH, the number of milliseconds will be negative.
.. deprecated:: 0.13.9
Use :func:`epoch` instead.
Returns
-------
Milliseconds as Int64
"""
return self.timestamp("ms")
def epoch_seconds(self) -> Expr:
"""
Get the number of seconds since the unix EPOCH
If the date is before the unix EPOCH, the number of seconds will be negative.
.. deprecated:: 0.13.9
Use :func:`epoch` instead.
Returns
-------
Milliseconds as Int64
"""
return wrap_expr(self._pyexpr.dt_epoch_seconds())
def timestamp(self, tu: str = "us") -> Expr:
"""
Return a timestamp in the given time unit.
Parameters
----------
tu
One of {'ns', 'us', 'ms'}
"""
return wrap_expr(self._pyexpr.timestamp(tu))
def with_time_unit(self, tu: str) -> Expr:
"""
Set time unit a Series of dtype Datetime or Duration. This does not modify underlying data,
and should be used to fix an incorrect time unit.
Parameters
----------
tu
Time unit for the `Datetime` Series: one of {"ns", "us", "ms"}
"""
return wrap_expr(self._pyexpr.dt_with_time_unit(tu))
def cast_time_unit(self, tu: str) -> Expr:
"""
Cast the underlying data to another time unit. This may lose precision.
Parameters
----------
tu
Time unit for the `Datetime` Series: any of {"ns", "us", "ms"}
"""
return wrap_expr(self._pyexpr.dt_cast_time_unit(tu))
def and_time_unit(self, tu: str, dtype: Type[DataType] = Datetime) -> Expr:
"""
Set time unit a Series of type Datetime. This does not modify underlying data,
and should be used to fix an incorrect time unit.
..deprecated::
Use `with_time_unit`
Parameters
----------
tu
Time unit for the `Datetime` Series: any of {"ns", "us", "ms"}
dtype
Output data type.
"""
return self.with_time_unit(tu)
def and_time_zone(self, tz: Optional[str]) -> Expr:
"""
Set time zone for a Series of type Datetime.
..deprecated::
Use `with_time_zone`
Parameters
----------
tz
Time zone for the `Datetime` Series
"""
return wrap_expr(self._pyexpr).map(
lambda s: s.dt.with_time_zone(tz), return_dtype=Datetime
)
def with_time_zone(self, tz: Optional[str]) -> Expr:
"""
Set time zone for a Series of type Datetime.
Parameters
----------
tz
Time zone for the `Datetime` Series
"""
return wrap_expr(self._pyexpr).map(
lambda s: s.dt.with_time_zone(tz), return_dtype=Datetime
)
def days(self) -> Expr:
"""
Extract the days from a Duration type.
Returns
-------
A series of dtype Int64
"""
return wrap_expr(self._pyexpr.duration_days())
def hours(self) -> Expr:
"""
Extract the hours from a Duration type.
Returns
-------
A series of dtype Int64
"""
return wrap_expr(self._pyexpr.duration_hours())
def minutes(self) -> Expr:
"""
Extract the minutes from a Duration type.
Returns
-------
A series of dtype Int64
"""
return wrap_expr(self._pyexpr.duration_minutes())
def seconds(self) -> Expr:
"""
Extract the seconds from a Duration type.
Returns
-------
A series of dtype Int64
"""
return wrap_expr(self._pyexpr.duration_seconds())
def milliseconds(self) -> Expr:
"""
Extract the milliseconds from a Duration type.
Returns
-------
A series of dtype Int64
"""
return wrap_expr(self._pyexpr.duration_milliseconds())
def nanoseconds(self) -> Expr:
"""
Extract the nanoseconds from a Duration type.
Returns
-------
A series of dtype Int64
"""
return wrap_expr(self._pyexpr.duration_nanoseconds())
def expr_to_lit_or_expr(
expr: Union[Expr, bool, int, float, str, "pli.Series"],
str_to_lit: bool = True,
) -> Expr:
"""
Helper function that converts args to expressions.
Parameters
----------
expr
Any argument.
str_to_lit
If True string argument `"foo"` will be converted to `lit("foo")`,
If False it will be converted to `col("foo")`
Returns
-------
"""
if isinstance(expr, str) and not str_to_lit:
return pli.col(expr)
elif (
isinstance(expr, (int, float, str, pli.Series, datetime, date)) or expr is None
):
return pli.lit(expr)
elif isinstance(expr, Expr):
return expr
else:
raise ValueError(
f"did not expect value {expr} of type {type(expr)}, maybe disambiguate with pl.lit or pl.col"
)
class ExprCatNameSpace:
"""
Namespace for categorical related expressions
"""
def __init__(self, expr: Expr):
self._pyexpr = expr._pyexpr
def set_ordering(self, ordering: str) -> "Expr":
"""
Determine how this categorical series should be sorted.
Parameters
----------
ordering
One of:
- 'physical' -> use the physical representation of the categories to determine the order (default)
- 'lexical' -. use the string values to determine the ordering
Examples
--------
>>> df = pl.DataFrame(
... {"cats": ["z", "z", "k", "a", "b"], "vals": [3, 1, 2, 2, 3]}
... ).with_columns(
... [
... pl.col("cats").cast(pl.Categorical).cat.set_ordering("lexical"),
... ]
... )
>>> df.sort(["cats", "vals"])
shape: (5, 2)
┌──────┬──────┐
│ cats ┆ vals │
│ --- ┆ --- │
│ cat ┆ i64 │
╞══════╪══════╡
│ a ┆ 2 │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ b ┆ 3 │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ k ┆ 2 │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ z ┆ 1 │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ z ┆ 3 │
└──────┴──────┘
"""
return wrap_expr(self._pyexpr.cat_set_ordering(ordering))
def _prepare_alpha(
com: Optional[float] = None,
span: Optional[float] = None,
half_life: Optional[float] = None,
alpha: Optional[float] = None,
) -> float:
if com is not None and alpha is None:
assert com >= 0.0
alpha = 1.0 / (1.0 + com)
if span is not None and alpha is None:
assert span >= 1.0
alpha = 2.0 / (span + 1.0)
if half_life is not None and alpha is None:
assert half_life > 0.0
alpha = 1.0 - np.exp(-np.log(2.0) / half_life)
if alpha is None:
raise ValueError("at least one of {com, span, half_life, alpha} should be set")
return alpha
|
py | 1a531338b565f8ce6a172415f976b59850e7fb55 | from yandex_checkout.domain.response.refund_response import RefundResponse
from yandex_checkout.domain.common.base_object import BaseObject
from yandex_checkout.domain.response.payment_response import PaymentResponse
class WebhookNotification(BaseObject):
__type = None
__event = None
__object = None
@property
def type(self):
return self.__type
@type.setter
def type(self, value):
self.__type = value
@property
def event(self):
return self.__event
@event.setter
def event(self, value):
self.__event = value
@property
def object(self):
return self.__object
@object.setter
def object(self, value):
if isinstance(value, dict) and value:
self.__object = PaymentResponse(value)
elif not value:
raise ValueError('Parameter object is empty')
else:
raise TypeError('Invalid object type')
class RefundWebhookNotification(BaseObject):
__type = None
__event = None
__object = None
@property
def type(self):
return self.__type
@type.setter
def type(self, value):
self.__type = value
@property
def event(self):
return self.__event
@event.setter
def event(self, value):
self.__event = value
@property
def object(self):
return self.__object
@object.setter
def object(self, value):
if isinstance(value, dict) and value:
self.__object = RefundResponse(value)
elif not value:
raise ValueError('Parameter object is empty')
else:
raise TypeError('Invalid object type')
|
py | 1a53133af1529332c2d92a521cbc0200729a9aa9 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Experiment utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
import tensorflow as tf
flags.DEFINE_integer("batch_size", 16, "Batch size.")
flags.DEFINE_string("model_dir", None, "Model directory")
flags.DEFINE_integer("tf_random_seed", None,
"Random seed for tensorflow")
flags.DEFINE_integer("num_eval_steps", None,
"Number of steps to take during evaluation.")
flags.DEFINE_integer("num_train_steps", None,
"Number of steps to take during training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"Number of steps between checkpoint saves.")
flags.DEFINE_integer("eval_throttle_secs", 600,
"Minimum number of seconds to wait between evaluations")
flags.DEFINE_integer("eval_start_delay_secs", 120,
"Number of seconds to wait before starting evaluations.")
flags.DEFINE_integer("keep_checkpoint_max", 5,
"Max number of checkpoints to keep")
FLAGS = flags.FLAGS
def run_experiment(model_fn, train_input_fn, eval_input_fn, exporters=None):
"""Run experiment."""
run_config = tf.estimator.RunConfig(
model_dir=FLAGS.model_dir,
tf_random_seed=FLAGS.tf_random_seed,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
keep_checkpoint_max=FLAGS.keep_checkpoint_max)
estimator = tf.estimator.Estimator(
config=run_config,
model_fn=model_fn,
model_dir=FLAGS.model_dir)
train_spec = tf.estimator.TrainSpec(
input_fn=train_input_fn,
max_steps=FLAGS.num_train_steps)
eval_spec = tf.estimator.EvalSpec(
name="default",
input_fn=eval_input_fn,
exporters=exporters,
start_delay_secs=FLAGS.eval_start_delay_secs,
throttle_secs=FLAGS.eval_throttle_secs,
steps=FLAGS.num_eval_steps)
tf.logging.set_verbosity(tf.logging.INFO)
tf.estimator.train_and_evaluate(
estimator=estimator,
train_spec=train_spec,
eval_spec=eval_spec)
|
py | 1a5314b60e099373317ae6974f2b1e8695b10909 | import spacy
nlp = spacy.load("en_core_web_sm")
# Importa la clase Doc
from ____ import ____
# El texto deseado: "Go, get started!"
words = ["Go", ",", "get", "started", "!"]
spaces = [____, ____, ____, ____, ____]
# Crea un Doc a partir de las palabras y los espacios
doc = ____(____, ____=____, ____=____)
print(doc.text)
|
py | 1a5314b8b6391b6838a331daa9084e4bada07537 | import os
import re
from dataclasses import dataclass
from pathlib import PurePath
from typing import Awaitable, List, Optional, Pattern, Set, Union
from urllib.parse import urljoin
from bs4 import BeautifulSoup, Tag
from ..config import Config
from ..logging import ProgressBar, log
from ..output_dir import FileSink
from ..utils import soupify
from .crawler import CrawlError
from .http_crawler import HttpCrawler, HttpCrawlerSection
class KitIpdCrawlerSection(HttpCrawlerSection):
def target(self) -> str:
target = self.s.get("target")
if not target:
self.missing_value("target")
if not target.startswith("https://"):
self.invalid_value("target", target, "Should be a URL")
return target
def link_regex(self) -> Pattern[str]:
regex = self.s.get("link_regex", r"^.*/[^/]*\.(?:pdf|zip|c|java)$")
return re.compile(regex)
@dataclass(unsafe_hash=True)
class KitIpdFile:
name: str
url: str
@dataclass
class KitIpdFolder:
name: str
files: List[KitIpdFile]
def explain(self) -> None:
log.explain_topic(f"Folder {self.name!r}")
for file in self.files:
log.explain(f"File {file.name!r}")
def __hash__(self) -> int:
return self.name.__hash__()
class KitIpdCrawler(HttpCrawler):
def __init__(
self,
name: str,
section: KitIpdCrawlerSection,
config: Config,
):
super().__init__(name, section, config)
self._url = section.target()
self._file_regex = section.link_regex()
async def _run(self) -> None:
maybe_cl = await self.crawl(PurePath("."))
if not maybe_cl:
return
tasks: List[Awaitable[None]] = []
async with maybe_cl:
for item in await self._fetch_items():
if isinstance(item, KitIpdFolder):
tasks.append(self._crawl_folder(item))
else:
# Orphan files are placed in the root folder
tasks.append(self._download_file(PurePath("."), item))
await self.gather(tasks)
async def _crawl_folder(self, folder: KitIpdFolder) -> None:
path = PurePath(folder.name)
if not await self.crawl(path):
return
tasks = [self._download_file(path, file) for file in folder.files]
await self.gather(tasks)
async def _download_file(self, parent: PurePath, file: KitIpdFile) -> None:
element_path = parent / file.name
maybe_dl = await self.download(element_path)
if not maybe_dl:
return
async with maybe_dl as (bar, sink):
await self._stream_from_url(file.url, sink, bar)
async def _fetch_items(self) -> Set[Union[KitIpdFile, KitIpdFolder]]:
page = await self.get_page()
elements: List[Tag] = self._find_file_links(page)
items: Set[Union[KitIpdFile, KitIpdFolder]] = set()
for element in elements:
folder_label = self._find_folder_label(element)
if folder_label:
folder = self._extract_folder(folder_label)
if folder not in items:
items.add(folder)
folder.explain()
else:
file = self._extract_file(element)
items.add(file)
log.explain_topic(f"Orphan file {file.name!r}")
log.explain("Attributing it to root folder")
return items
def _extract_folder(self, folder_tag: Tag) -> KitIpdFolder:
files: List[KitIpdFile] = []
name = folder_tag.getText().strip()
container: Tag = folder_tag.findNextSibling(name="table")
for link in self._find_file_links(container):
files.append(self._extract_file(link))
return KitIpdFolder(name, files)
@staticmethod
def _find_folder_label(file_link: Tag) -> Optional[Tag]:
enclosing_table: Tag = file_link.findParent(name="table")
if enclosing_table is None:
return None
return enclosing_table.findPreviousSibling(name=re.compile("^h[1-6]$"))
def _extract_file(self, link: Tag) -> KitIpdFile:
url = self._abs_url_from_link(link)
name = os.path.basename(url)
return KitIpdFile(name, url)
def _find_file_links(self, tag: Union[Tag, BeautifulSoup]) -> List[Tag]:
return tag.findAll(name="a", attrs={"href": self._file_regex})
def _abs_url_from_link(self, link_tag: Tag) -> str:
return urljoin(self._url, link_tag.get("href"))
async def _stream_from_url(self, url: str, sink: FileSink, bar: ProgressBar) -> None:
async with self.session.get(url, allow_redirects=False) as resp:
if resp.status == 403:
raise CrawlError("Received a 403. Are you within the KIT network/VPN?")
if resp.content_length:
bar.set_total(resp.content_length)
async for data in resp.content.iter_chunked(1024):
sink.file.write(data)
bar.advance(len(data))
sink.done()
async def get_page(self) -> BeautifulSoup:
async with self.session.get(self._url) as request:
return soupify(await request.read())
|
py | 1a5314ccff315a4d216658fdd80cab1eb0d57392 | import gc
import sys
import unittest
import collections
import weakref
import operator
import contextlib
import copy
import threading
import time
import random
from test import support
from test.support import script_helper, ALWAYS_EQ
# Used in ReferencesTestCase.test_ref_created_during_del() .
ref_from_del = None
# Used by FinalizeTestCase as a global that may be replaced by None
# when the interpreter shuts down.
_global_var = 'foobar'
class C:
def method(self):
pass
class Callable:
bar = None
def __call__(self, x):
self.bar = x
def create_function():
def f(): pass
return f
def create_bound_method():
return C().method
class Object:
def __init__(self, arg):
self.arg = arg
def __repr__(self):
return "<Object %r>" % self.arg
def __eq__(self, other):
if isinstance(other, Object):
return self.arg == other.arg
return NotImplemented
def __lt__(self, other):
if isinstance(other, Object):
return self.arg < other.arg
return NotImplemented
def __hash__(self):
return hash(self.arg)
def some_method(self):
return 4
def other_method(self):
return 5
class RefCycle:
def __init__(self):
self.cycle = self
class TestBase(unittest.TestCase):
def setUp(self):
self.cbcalled = 0
def callback(self, ref):
self.cbcalled += 1
@contextlib.contextmanager
def collect_in_thread(period=0.0001):
"""
Ensure GC collections happen in a different thread, at a high frequency.
"""
please_stop = False
def collect():
while not please_stop:
time.sleep(period)
gc.collect()
with support.disable_gc():
t = threading.Thread(target=collect)
t.start()
try:
yield
finally:
please_stop = True
t.join()
class ReferencesTestCase(TestBase):
def test_basic_ref(self):
self.check_basic_ref(C)
self.check_basic_ref(create_function)
self.check_basic_ref(create_bound_method)
# Just make sure the tp_repr handler doesn't raise an exception.
# Live reference:
o = C()
wr = weakref.ref(o)
repr(wr)
# Dead reference:
del o
repr(wr)
def test_basic_callback(self):
self.check_basic_callback(C)
self.check_basic_callback(create_function)
self.check_basic_callback(create_bound_method)
@support.cpython_only
def test_cfunction(self):
import _testcapi
create_cfunction = _testcapi.create_cfunction
f = create_cfunction()
wr = weakref.ref(f)
self.assertIs(wr(), f)
del f
self.assertIsNone(wr())
self.check_basic_ref(create_cfunction)
self.check_basic_callback(create_cfunction)
def test_multiple_callbacks(self):
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del o
self.assertIsNone(ref1(), "expected reference to be invalidated")
self.assertIsNone(ref2(), "expected reference to be invalidated")
self.assertEqual(self.cbcalled, 2,
"callback not called the right number of times")
def test_multiple_selfref_callbacks(self):
# Make sure all references are invalidated before callbacks are called
#
# What's important here is that we're using the first
# reference in the callback invoked on the second reference
# (the most recently created ref is cleaned up first). This
# tests that all references to the object are invalidated
# before any of the callbacks are invoked, so that we only
# have one invocation of _weakref.c:cleanup_helper() active
# for a particular object at a time.
#
def callback(object, self=self):
self.ref()
c = C()
self.ref = weakref.ref(c, callback)
ref1 = weakref.ref(c, callback)
del c
def test_constructor_kwargs(self):
c = C()
self.assertRaises(TypeError, weakref.ref, c, callback=None)
def test_proxy_ref(self):
o = C()
o.bar = 1
ref1 = weakref.proxy(o, self.callback)
ref2 = weakref.proxy(o, self.callback)
del o
def check(proxy):
proxy.bar
self.assertRaises(ReferenceError, check, ref1)
self.assertRaises(ReferenceError, check, ref2)
self.assertRaises(ReferenceError, bool, weakref.proxy(C()))
self.assertEqual(self.cbcalled, 2)
def check_basic_ref(self, factory):
o = factory()
ref = weakref.ref(o)
self.assertIsNotNone(ref(),
"weak reference to live object should be live")
o2 = ref()
self.assertIs(o, o2,
"<ref>() should return original object if live")
def check_basic_callback(self, factory):
self.cbcalled = 0
o = factory()
ref = weakref.ref(o, self.callback)
del o
self.assertEqual(self.cbcalled, 1,
"callback did not properly set 'cbcalled'")
self.assertIsNone(ref(),
"ref2 should be dead after deleting object reference")
def test_ref_reuse(self):
o = C()
ref1 = weakref.ref(o)
# create a proxy to make sure that there's an intervening creation
# between these two; it should make no difference
proxy = weakref.proxy(o)
ref2 = weakref.ref(o)
self.assertIs(ref1, ref2,
"reference object w/out callback should be re-used")
o = C()
proxy = weakref.proxy(o)
ref1 = weakref.ref(o)
ref2 = weakref.ref(o)
self.assertIs(ref1, ref2,
"reference object w/out callback should be re-used")
self.assertEqual(weakref.getweakrefcount(o), 2,
"wrong weak ref count for object")
del proxy
self.assertEqual(weakref.getweakrefcount(o), 1,
"wrong weak ref count for object after deleting proxy")
def test_proxy_reuse(self):
o = C()
proxy1 = weakref.proxy(o)
ref = weakref.ref(o)
proxy2 = weakref.proxy(o)
self.assertIs(proxy1, proxy2,
"proxy object w/out callback should have been re-used")
def test_basic_proxy(self):
o = C()
self.check_proxy(o, weakref.proxy(o))
L = collections.UserList()
p = weakref.proxy(L)
self.assertFalse(p, "proxy for empty UserList should be false")
p.append(12)
self.assertEqual(len(L), 1)
self.assertTrue(p, "proxy for non-empty UserList should be true")
p[:] = [2, 3]
self.assertEqual(len(L), 2)
self.assertEqual(len(p), 2)
self.assertIn(3, p, "proxy didn't support __contains__() properly")
p[1] = 5
self.assertEqual(L[1], 5)
self.assertEqual(p[1], 5)
L2 = collections.UserList(L)
p2 = weakref.proxy(L2)
self.assertEqual(p, p2)
## self.assertEqual(repr(L2), repr(p2))
L3 = collections.UserList(range(10))
p3 = weakref.proxy(L3)
self.assertEqual(L3[:], p3[:])
self.assertEqual(L3[5:], p3[5:])
self.assertEqual(L3[:5], p3[:5])
self.assertEqual(L3[2:5], p3[2:5])
def test_proxy_unicode(self):
# See bug 5037
class C(object):
def __str__(self):
return "string"
def __bytes__(self):
return b"bytes"
instance = C()
self.assertIn("__bytes__", dir(weakref.proxy(instance)))
self.assertEqual(bytes(weakref.proxy(instance)), b"bytes")
def test_proxy_index(self):
class C:
def __index__(self):
return 10
o = C()
p = weakref.proxy(o)
self.assertEqual(operator.index(p), 10)
def test_proxy_div(self):
class C:
def __floordiv__(self, other):
return 42
def __ifloordiv__(self, other):
return 21
o = C()
p = weakref.proxy(o)
self.assertEqual(p // 5, 42)
p //= 5
self.assertEqual(p, 21)
def test_proxy_matmul(self):
class C:
def __matmul__(self, other):
return 1729
def __rmatmul__(self, other):
return -163
def __imatmul__(self, other):
return 561
o = C()
p = weakref.proxy(o)
self.assertEqual(p @ 5, 1729)
self.assertEqual(5 @ p, -163)
p @= 5
self.assertEqual(p, 561)
# The PyWeakref_* C API is documented as allowing either NULL or
# None as the value for the callback, where either means "no
# callback". The "no callback" ref and proxy objects are supposed
# to be shared so long as they exist by all callers so long as
# they are active. In Python 2.3.3 and earlier, this guarantee
# was not honored, and was broken in different ways for
# PyWeakref_NewRef() and PyWeakref_NewProxy(). (Two tests.)
def test_shared_ref_without_callback(self):
self.check_shared_without_callback(weakref.ref)
def test_shared_proxy_without_callback(self):
self.check_shared_without_callback(weakref.proxy)
def check_shared_without_callback(self, makeref):
o = Object(1)
p1 = makeref(o, None)
p2 = makeref(o, None)
self.assertIs(p1, p2, "both callbacks were None in the C API")
del p1, p2
p1 = makeref(o)
p2 = makeref(o, None)
self.assertIs(p1, p2, "callbacks were NULL, None in the C API")
del p1, p2
p1 = makeref(o)
p2 = makeref(o)
self.assertIs(p1, p2, "both callbacks were NULL in the C API")
del p1, p2
p1 = makeref(o, None)
p2 = makeref(o)
self.assertIs(p1, p2, "callbacks were None, NULL in the C API")
def test_callable_proxy(self):
o = Callable()
ref1 = weakref.proxy(o)
self.check_proxy(o, ref1)
self.assertIs(type(ref1), weakref.CallableProxyType,
"proxy is not of callable type")
ref1('twinkies!')
self.assertEqual(o.bar, 'twinkies!',
"call through proxy not passed through to original")
ref1(x='Splat.')
self.assertEqual(o.bar, 'Splat.',
"call through proxy not passed through to original")
# expect due to too few args
self.assertRaises(TypeError, ref1)
# expect due to too many args
self.assertRaises(TypeError, ref1, 1, 2, 3)
def check_proxy(self, o, proxy):
o.foo = 1
self.assertEqual(proxy.foo, 1,
"proxy does not reflect attribute addition")
o.foo = 2
self.assertEqual(proxy.foo, 2,
"proxy does not reflect attribute modification")
del o.foo
self.assertFalse(hasattr(proxy, 'foo'),
"proxy does not reflect attribute removal")
proxy.foo = 1
self.assertEqual(o.foo, 1,
"object does not reflect attribute addition via proxy")
proxy.foo = 2
self.assertEqual(o.foo, 2,
"object does not reflect attribute modification via proxy")
del proxy.foo
self.assertFalse(hasattr(o, 'foo'),
"object does not reflect attribute removal via proxy")
def test_proxy_deletion(self):
# Test clearing of SF bug #762891
class Foo:
result = None
def __delitem__(self, accessor):
self.result = accessor
g = Foo()
f = weakref.proxy(g)
del f[0]
self.assertEqual(f.result, 0)
def test_proxy_bool(self):
# Test clearing of SF bug #1170766
class List(list): pass
lyst = List()
self.assertEqual(bool(weakref.proxy(lyst)), bool(lyst))
def test_proxy_iter(self):
# Test fails with a debug build of the interpreter
# (see bpo-38395).
obj = None
class MyObj:
def __iter__(self):
nonlocal obj
del obj
return NotImplemented
obj = MyObj()
p = weakref.proxy(obj)
with self.assertRaises(TypeError):
# "blech" in p calls MyObj.__iter__ through the proxy,
# without keeping a reference to the real object, so it
# can be killed in the middle of the call
"blech" in p
def test_proxy_next(self):
arr = [4, 5, 6]
def iterator_func():
yield from arr
it = iterator_func()
class IteratesWeakly:
def __iter__(self):
return weakref.proxy(it)
weak_it = IteratesWeakly()
# Calls proxy.__next__
self.assertEqual(list(weak_it), [4, 5, 6])
def test_proxy_bad_next(self):
# bpo-44720: PyIter_Next() shouldn't be called if the reference
# isn't an iterator.
not_an_iterator = lambda: 0
class A:
def __iter__(self):
return weakref.proxy(not_an_iterator)
a = A()
msg = "Weakref proxy referenced a non-iterator"
with self.assertRaisesRegex(TypeError, msg):
list(a)
def test_proxy_reversed(self):
class MyObj:
def __len__(self):
return 3
def __reversed__(self):
return iter('cba')
obj = MyObj()
self.assertEqual("".join(reversed(weakref.proxy(obj))), "cba")
def test_proxy_hash(self):
class MyObj:
def __hash__(self):
return 42
obj = MyObj()
with self.assertRaises(TypeError):
hash(weakref.proxy(obj))
class MyObj:
__hash__ = None
obj = MyObj()
with self.assertRaises(TypeError):
hash(weakref.proxy(obj))
def test_getweakrefcount(self):
o = C()
ref1 = weakref.ref(o)
ref2 = weakref.ref(o, self.callback)
self.assertEqual(weakref.getweakrefcount(o), 2,
"got wrong number of weak reference objects")
proxy1 = weakref.proxy(o)
proxy2 = weakref.proxy(o, self.callback)
self.assertEqual(weakref.getweakrefcount(o), 4,
"got wrong number of weak reference objects")
del ref1, ref2, proxy1, proxy2
self.assertEqual(weakref.getweakrefcount(o), 0,
"weak reference objects not unlinked from"
" referent when discarded.")
# assumes ints do not support weakrefs
self.assertEqual(weakref.getweakrefcount(1), 0,
"got wrong number of weak reference objects for int")
def test_getweakrefs(self):
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del ref1
self.assertEqual(weakref.getweakrefs(o), [ref2],
"list of refs does not match")
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del ref2
self.assertEqual(weakref.getweakrefs(o), [ref1],
"list of refs does not match")
del ref1
self.assertEqual(weakref.getweakrefs(o), [],
"list of refs not cleared")
# assumes ints do not support weakrefs
self.assertEqual(weakref.getweakrefs(1), [],
"list of refs does not match for int")
def test_newstyle_number_ops(self):
class F(float):
pass
f = F(2.0)
p = weakref.proxy(f)
self.assertEqual(p + 1.0, 3.0)
self.assertEqual(1.0 + p, 3.0) # this used to SEGV
def test_callbacks_protected(self):
# Callbacks protected from already-set exceptions?
# Regression test for SF bug #478534.
class BogusError(Exception):
pass
data = {}
def remove(k):
del data[k]
def encapsulate():
f = lambda : ()
data[weakref.ref(f, remove)] = None
raise BogusError
try:
encapsulate()
except BogusError:
pass
else:
self.fail("exception not properly restored")
try:
encapsulate()
except BogusError:
pass
else:
self.fail("exception not properly restored")
def test_sf_bug_840829(self):
# "weakref callbacks and gc corrupt memory"
# subtype_dealloc erroneously exposed a new-style instance
# already in the process of getting deallocated to gc,
# causing double-deallocation if the instance had a weakref
# callback that triggered gc.
# If the bug exists, there probably won't be an obvious symptom
# in a release build. In a debug build, a segfault will occur
# when the second attempt to remove the instance from the "list
# of all objects" occurs.
import gc
class C(object):
pass
c = C()
wr = weakref.ref(c, lambda ignore: gc.collect())
del c
# There endeth the first part. It gets worse.
del wr
c1 = C()
c1.i = C()
wr = weakref.ref(c1.i, lambda ignore: gc.collect())
c2 = C()
c2.c1 = c1
del c1 # still alive because c2 points to it
# Now when subtype_dealloc gets called on c2, it's not enough just
# that c2 is immune from gc while the weakref callbacks associated
# with c2 execute (there are none in this 2nd half of the test, btw).
# subtype_dealloc goes on to call the base classes' deallocs too,
# so any gc triggered by weakref callbacks associated with anything
# torn down by a base class dealloc can also trigger double
# deallocation of c2.
del c2
def test_callback_in_cycle_1(self):
import gc
class J(object):
pass
class II(object):
def acallback(self, ignore):
self.J
I = II()
I.J = J
I.wr = weakref.ref(J, I.acallback)
# Now J and II are each in a self-cycle (as all new-style class
# objects are, since their __mro__ points back to them). I holds
# both a weak reference (I.wr) and a strong reference (I.J) to class
# J. I is also in a cycle (I.wr points to a weakref that references
# I.acallback). When we del these three, they all become trash, but
# the cycles prevent any of them from getting cleaned up immediately.
# Instead they have to wait for cyclic gc to deduce that they're
# trash.
#
# gc used to call tp_clear on all of them, and the order in which
# it does that is pretty accidental. The exact order in which we
# built up these things manages to provoke gc into running tp_clear
# in just the right order (I last). Calling tp_clear on II leaves
# behind an insane class object (its __mro__ becomes NULL). Calling
# tp_clear on J breaks its self-cycle, but J doesn't get deleted
# just then because of the strong reference from I.J. Calling
# tp_clear on I starts to clear I's __dict__, and just happens to
# clear I.J first -- I.wr is still intact. That removes the last
# reference to J, which triggers the weakref callback. The callback
# tries to do "self.J", and instances of new-style classes look up
# attributes ("J") in the class dict first. The class (II) wants to
# search II.__mro__, but that's NULL. The result was a segfault in
# a release build, and an assert failure in a debug build.
del I, J, II
gc.collect()
def test_callback_in_cycle_2(self):
import gc
# This is just like test_callback_in_cycle_1, except that II is an
# old-style class. The symptom is different then: an instance of an
# old-style class looks in its own __dict__ first. 'J' happens to
# get cleared from I.__dict__ before 'wr', and 'J' was never in II's
# __dict__, so the attribute isn't found. The difference is that
# the old-style II doesn't have a NULL __mro__ (it doesn't have any
# __mro__), so no segfault occurs. Instead it got:
# test_callback_in_cycle_2 (__main__.ReferencesTestCase) ...
# Exception exceptions.AttributeError:
# "II instance has no attribute 'J'" in <bound method II.acallback
# of <?.II instance at 0x00B9B4B8>> ignored
class J(object):
pass
class II:
def acallback(self, ignore):
self.J
I = II()
I.J = J
I.wr = weakref.ref(J, I.acallback)
del I, J, II
gc.collect()
def test_callback_in_cycle_3(self):
import gc
# This one broke the first patch that fixed the last two. In this
# case, the objects reachable from the callback aren't also reachable
# from the object (c1) *triggering* the callback: you can get to
# c1 from c2, but not vice-versa. The result was that c2's __dict__
# got tp_clear'ed by the time the c2.cb callback got invoked.
class C:
def cb(self, ignore):
self.me
self.c1
self.wr
c1, c2 = C(), C()
c2.me = c2
c2.c1 = c1
c2.wr = weakref.ref(c1, c2.cb)
del c1, c2
gc.collect()
def test_callback_in_cycle_4(self):
import gc
# Like test_callback_in_cycle_3, except c2 and c1 have different
# classes. c2's class (C) isn't reachable from c1 then, so protecting
# objects reachable from the dying object (c1) isn't enough to stop
# c2's class (C) from getting tp_clear'ed before c2.cb is invoked.
# The result was a segfault (C.__mro__ was NULL when the callback
# tried to look up self.me).
class C(object):
def cb(self, ignore):
self.me
self.c1
self.wr
class D:
pass
c1, c2 = D(), C()
c2.me = c2
c2.c1 = c1
c2.wr = weakref.ref(c1, c2.cb)
del c1, c2, C, D
gc.collect()
def test_callback_in_cycle_resurrection(self):
import gc
# Do something nasty in a weakref callback: resurrect objects
# from dead cycles. For this to be attempted, the weakref and
# its callback must also be part of the cyclic trash (else the
# objects reachable via the callback couldn't be in cyclic trash
# to begin with -- the callback would act like an external root).
# But gc clears trash weakrefs with callbacks early now, which
# disables the callbacks, so the callbacks shouldn't get called
# at all (and so nothing actually gets resurrected).
alist = []
class C(object):
def __init__(self, value):
self.attribute = value
def acallback(self, ignore):
alist.append(self.c)
c1, c2 = C(1), C(2)
c1.c = c2
c2.c = c1
c1.wr = weakref.ref(c2, c1.acallback)
c2.wr = weakref.ref(c1, c2.acallback)
def C_went_away(ignore):
alist.append("C went away")
wr = weakref.ref(C, C_went_away)
del c1, c2, C # make them all trash
self.assertEqual(alist, []) # del isn't enough to reclaim anything
gc.collect()
# c1.wr and c2.wr were part of the cyclic trash, so should have
# been cleared without their callbacks executing. OTOH, the weakref
# to C is bound to a function local (wr), and wasn't trash, so that
# callback should have been invoked when C went away.
self.assertEqual(alist, ["C went away"])
# The remaining weakref should be dead now (its callback ran).
self.assertEqual(wr(), None)
del alist[:]
gc.collect()
self.assertEqual(alist, [])
def test_callbacks_on_callback(self):
import gc
# Set up weakref callbacks *on* weakref callbacks.
alist = []
def safe_callback(ignore):
alist.append("safe_callback called")
class C(object):
def cb(self, ignore):
alist.append("cb called")
c, d = C(), C()
c.other = d
d.other = c
callback = c.cb
c.wr = weakref.ref(d, callback) # this won't trigger
d.wr = weakref.ref(callback, d.cb) # ditto
external_wr = weakref.ref(callback, safe_callback) # but this will
self.assertIs(external_wr(), callback)
# The weakrefs attached to c and d should get cleared, so that
# C.cb is never called. But external_wr isn't part of the cyclic
# trash, and no cyclic trash is reachable from it, so safe_callback
# should get invoked when the bound method object callback (c.cb)
# -- which is itself a callback, and also part of the cyclic trash --
# gets reclaimed at the end of gc.
del callback, c, d, C
self.assertEqual(alist, []) # del isn't enough to clean up cycles
gc.collect()
self.assertEqual(alist, ["safe_callback called"])
self.assertEqual(external_wr(), None)
del alist[:]
gc.collect()
self.assertEqual(alist, [])
def test_gc_during_ref_creation(self):
self.check_gc_during_creation(weakref.ref)
def test_gc_during_proxy_creation(self):
self.check_gc_during_creation(weakref.proxy)
def check_gc_during_creation(self, makeref):
thresholds = gc.get_threshold()
gc.set_threshold(1, 1, 1)
gc.collect()
class A:
pass
def callback(*args):
pass
referenced = A()
a = A()
a.a = a
a.wr = makeref(referenced)
try:
# now make sure the object and the ref get labeled as
# cyclic trash:
a = A()
weakref.ref(referenced, callback)
finally:
gc.set_threshold(*thresholds)
def test_ref_created_during_del(self):
# Bug #1377858
# A weakref created in an object's __del__() would crash the
# interpreter when the weakref was cleaned up since it would refer to
# non-existent memory. This test should not segfault the interpreter.
class Target(object):
def __del__(self):
global ref_from_del
ref_from_del = weakref.ref(self)
w = Target()
def test_init(self):
# Issue 3634
# <weakref to class>.__init__() doesn't check errors correctly
r = weakref.ref(Exception)
self.assertRaises(TypeError, r.__init__, 0, 0, 0, 0, 0)
# No exception should be raised here
gc.collect()
def test_classes(self):
# Check that classes are weakrefable.
class A(object):
pass
l = []
weakref.ref(int)
a = weakref.ref(A, l.append)
A = None
gc.collect()
self.assertEqual(a(), None)
self.assertEqual(l, [a])
def test_equality(self):
# Alive weakrefs defer equality testing to their underlying object.
x = Object(1)
y = Object(1)
z = Object(2)
a = weakref.ref(x)
b = weakref.ref(y)
c = weakref.ref(z)
d = weakref.ref(x)
# Note how we directly test the operators here, to stress both
# __eq__ and __ne__.
self.assertTrue(a == b)
self.assertFalse(a != b)
self.assertFalse(a == c)
self.assertTrue(a != c)
self.assertTrue(a == d)
self.assertFalse(a != d)
self.assertFalse(a == x)
self.assertTrue(a != x)
self.assertTrue(a == ALWAYS_EQ)
self.assertFalse(a != ALWAYS_EQ)
del x, y, z
gc.collect()
for r in a, b, c:
# Sanity check
self.assertIs(r(), None)
# Dead weakrefs compare by identity: whether `a` and `d` are the
# same weakref object is an implementation detail, since they pointed
# to the same original object and didn't have a callback.
# (see issue #16453).
self.assertFalse(a == b)
self.assertTrue(a != b)
self.assertFalse(a == c)
self.assertTrue(a != c)
self.assertEqual(a == d, a is d)
self.assertEqual(a != d, a is not d)
def test_ordering(self):
# weakrefs cannot be ordered, even if the underlying objects can.
ops = [operator.lt, operator.gt, operator.le, operator.ge]
x = Object(1)
y = Object(1)
a = weakref.ref(x)
b = weakref.ref(y)
for op in ops:
self.assertRaises(TypeError, op, a, b)
# Same when dead.
del x, y
gc.collect()
for op in ops:
self.assertRaises(TypeError, op, a, b)
def test_hashing(self):
# Alive weakrefs hash the same as the underlying object
x = Object(42)
y = Object(42)
a = weakref.ref(x)
b = weakref.ref(y)
self.assertEqual(hash(a), hash(42))
del x, y
gc.collect()
# Dead weakrefs:
# - retain their hash is they were hashed when alive;
# - otherwise, cannot be hashed.
self.assertEqual(hash(a), hash(42))
self.assertRaises(TypeError, hash, b)
def test_trashcan_16602(self):
# Issue #16602: when a weakref's target was part of a long
# deallocation chain, the trashcan mechanism could delay clearing
# of the weakref and make the target object visible from outside
# code even though its refcount had dropped to 0. A crash ensued.
class C:
def __init__(self, parent):
if not parent:
return
wself = weakref.ref(self)
def cb(wparent):
o = wself()
self.wparent = weakref.ref(parent, cb)
d = weakref.WeakKeyDictionary()
root = c = C(None)
for n in range(100):
d[c] = c = C(c)
del root
gc.collect()
def test_callback_attribute(self):
x = Object(1)
callback = lambda ref: None
ref1 = weakref.ref(x, callback)
self.assertIs(ref1.__callback__, callback)
ref2 = weakref.ref(x)
self.assertIsNone(ref2.__callback__)
def test_callback_attribute_after_deletion(self):
x = Object(1)
ref = weakref.ref(x, self.callback)
self.assertIsNotNone(ref.__callback__)
del x
support.gc_collect()
self.assertIsNone(ref.__callback__)
def test_set_callback_attribute(self):
x = Object(1)
callback = lambda ref: None
ref1 = weakref.ref(x, callback)
with self.assertRaises(AttributeError):
ref1.__callback__ = lambda ref: None
def test_callback_gcs(self):
class ObjectWithDel(Object):
def __del__(self): pass
x = ObjectWithDel(1)
ref1 = weakref.ref(x, lambda ref: support.gc_collect())
del x
support.gc_collect()
class SubclassableWeakrefTestCase(TestBase):
def test_subclass_refs(self):
class MyRef(weakref.ref):
def __init__(self, ob, callback=None, value=42):
self.value = value
super().__init__(ob, callback)
def __call__(self):
self.called = True
return super().__call__()
o = Object("foo")
mr = MyRef(o, value=24)
self.assertIs(mr(), o)
self.assertTrue(mr.called)
self.assertEqual(mr.value, 24)
del o
self.assertIsNone(mr())
self.assertTrue(mr.called)
def test_subclass_refs_dont_replace_standard_refs(self):
class MyRef(weakref.ref):
pass
o = Object(42)
r1 = MyRef(o)
r2 = weakref.ref(o)
self.assertIsNot(r1, r2)
self.assertEqual(weakref.getweakrefs(o), [r2, r1])
self.assertEqual(weakref.getweakrefcount(o), 2)
r3 = MyRef(o)
self.assertEqual(weakref.getweakrefcount(o), 3)
refs = weakref.getweakrefs(o)
self.assertEqual(len(refs), 3)
self.assertIs(r2, refs[0])
self.assertIn(r1, refs[1:])
self.assertIn(r3, refs[1:])
def test_subclass_refs_dont_conflate_callbacks(self):
class MyRef(weakref.ref):
pass
o = Object(42)
r1 = MyRef(o, id)
r2 = MyRef(o, str)
self.assertIsNot(r1, r2)
refs = weakref.getweakrefs(o)
self.assertIn(r1, refs)
self.assertIn(r2, refs)
def test_subclass_refs_with_slots(self):
class MyRef(weakref.ref):
__slots__ = "slot1", "slot2"
def __new__(type, ob, callback, slot1, slot2):
return weakref.ref.__new__(type, ob, callback)
def __init__(self, ob, callback, slot1, slot2):
self.slot1 = slot1
self.slot2 = slot2
def meth(self):
return self.slot1 + self.slot2
o = Object(42)
r = MyRef(o, None, "abc", "def")
self.assertEqual(r.slot1, "abc")
self.assertEqual(r.slot2, "def")
self.assertEqual(r.meth(), "abcdef")
self.assertFalse(hasattr(r, "__dict__"))
def test_subclass_refs_with_cycle(self):
"""Confirm https://bugs.python.org/issue3100 is fixed."""
# An instance of a weakref subclass can have attributes.
# If such a weakref holds the only strong reference to the object,
# deleting the weakref will delete the object. In this case,
# the callback must not be called, because the ref object is
# being deleted.
class MyRef(weakref.ref):
pass
# Use a local callback, for "regrtest -R::"
# to detect refcounting problems
def callback(w):
self.cbcalled += 1
o = C()
r1 = MyRef(o, callback)
r1.o = o
del o
del r1 # Used to crash here
self.assertEqual(self.cbcalled, 0)
# Same test, with two weakrefs to the same object
# (since code paths are different)
o = C()
r1 = MyRef(o, callback)
r2 = MyRef(o, callback)
r1.r = r2
r2.o = o
del o
del r2
del r1 # Used to crash here
self.assertEqual(self.cbcalled, 0)
class WeakMethodTestCase(unittest.TestCase):
def _subclass(self):
"""Return an Object subclass overriding `some_method`."""
class C(Object):
def some_method(self):
return 6
return C
def test_alive(self):
o = Object(1)
r = weakref.WeakMethod(o.some_method)
self.assertIsInstance(r, weakref.ReferenceType)
self.assertIsInstance(r(), type(o.some_method))
self.assertIs(r().__self__, o)
self.assertIs(r().__func__, o.some_method.__func__)
self.assertEqual(r()(), 4)
def test_object_dead(self):
o = Object(1)
r = weakref.WeakMethod(o.some_method)
del o
gc.collect()
self.assertIs(r(), None)
def test_method_dead(self):
C = self._subclass()
o = C(1)
r = weakref.WeakMethod(o.some_method)
del C.some_method
gc.collect()
self.assertIs(r(), None)
def test_callback_when_object_dead(self):
# Test callback behaviour when object dies first.
C = self._subclass()
calls = []
def cb(arg):
calls.append(arg)
o = C(1)
r = weakref.WeakMethod(o.some_method, cb)
del o
gc.collect()
self.assertEqual(calls, [r])
# Callback is only called once.
C.some_method = Object.some_method
gc.collect()
self.assertEqual(calls, [r])
def test_callback_when_method_dead(self):
# Test callback behaviour when method dies first.
C = self._subclass()
calls = []
def cb(arg):
calls.append(arg)
o = C(1)
r = weakref.WeakMethod(o.some_method, cb)
del C.some_method
gc.collect()
self.assertEqual(calls, [r])
# Callback is only called once.
del o
gc.collect()
self.assertEqual(calls, [r])
@support.cpython_only
def test_no_cycles(self):
# A WeakMethod doesn't create any reference cycle to itself.
o = Object(1)
def cb(_):
pass
r = weakref.WeakMethod(o.some_method, cb)
wr = weakref.ref(r)
del r
self.assertIs(wr(), None)
def test_equality(self):
def _eq(a, b):
self.assertTrue(a == b)
self.assertFalse(a != b)
def _ne(a, b):
self.assertTrue(a != b)
self.assertFalse(a == b)
x = Object(1)
y = Object(1)
a = weakref.WeakMethod(x.some_method)
b = weakref.WeakMethod(y.some_method)
c = weakref.WeakMethod(x.other_method)
d = weakref.WeakMethod(y.other_method)
# Objects equal, same method
_eq(a, b)
_eq(c, d)
# Objects equal, different method
_ne(a, c)
_ne(a, d)
_ne(b, c)
_ne(b, d)
# Objects unequal, same or different method
z = Object(2)
e = weakref.WeakMethod(z.some_method)
f = weakref.WeakMethod(z.other_method)
_ne(a, e)
_ne(a, f)
_ne(b, e)
_ne(b, f)
# Compare with different types
_ne(a, x.some_method)
_eq(a, ALWAYS_EQ)
del x, y, z
gc.collect()
# Dead WeakMethods compare by identity
refs = a, b, c, d, e, f
for q in refs:
for r in refs:
self.assertEqual(q == r, q is r)
self.assertEqual(q != r, q is not r)
def test_hashing(self):
# Alive WeakMethods are hashable if the underlying object is
# hashable.
x = Object(1)
y = Object(1)
a = weakref.WeakMethod(x.some_method)
b = weakref.WeakMethod(y.some_method)
c = weakref.WeakMethod(y.other_method)
# Since WeakMethod objects are equal, the hashes should be equal.
self.assertEqual(hash(a), hash(b))
ha = hash(a)
# Dead WeakMethods retain their old hash value
del x, y
gc.collect()
self.assertEqual(hash(a), ha)
self.assertEqual(hash(b), ha)
# If it wasn't hashed when alive, a dead WeakMethod cannot be hashed.
self.assertRaises(TypeError, hash, c)
class MappingTestCase(TestBase):
COUNT = 10
def check_len_cycles(self, dict_type, cons):
N = 20
items = [RefCycle() for i in range(N)]
dct = dict_type(cons(o) for o in items)
# Keep an iterator alive
it = dct.items()
try:
next(it)
except StopIteration:
pass
del items
gc.collect()
n1 = len(dct)
del it
gc.collect()
n2 = len(dct)
# one item may be kept alive inside the iterator
self.assertIn(n1, (0, 1))
self.assertEqual(n2, 0)
def test_weak_keyed_len_cycles(self):
self.check_len_cycles(weakref.WeakKeyDictionary, lambda k: (k, 1))
def test_weak_valued_len_cycles(self):
self.check_len_cycles(weakref.WeakValueDictionary, lambda k: (1, k))
def check_len_race(self, dict_type, cons):
# Extended sanity checks for len() in the face of cyclic collection
self.addCleanup(gc.set_threshold, *gc.get_threshold())
for th in range(1, 100):
N = 20
gc.collect(0)
gc.set_threshold(th, th, th)
items = [RefCycle() for i in range(N)]
dct = dict_type(cons(o) for o in items)
del items
# All items will be collected at next garbage collection pass
it = dct.items()
try:
next(it)
except StopIteration:
pass
n1 = len(dct)
del it
n2 = len(dct)
self.assertGreaterEqual(n1, 0)
self.assertLessEqual(n1, N)
self.assertGreaterEqual(n2, 0)
self.assertLessEqual(n2, n1)
def test_weak_keyed_len_race(self):
self.check_len_race(weakref.WeakKeyDictionary, lambda k: (k, 1))
def test_weak_valued_len_race(self):
self.check_len_race(weakref.WeakValueDictionary, lambda k: (1, k))
def test_weak_values(self):
#
# This exercises d.copy(), d.items(), d[], del d[], len(d).
#
dict, objects = self.make_weak_valued_dict()
for o in objects:
self.assertEqual(weakref.getweakrefcount(o), 1)
self.assertIs(o, dict[o.arg],
"wrong object returned by weak dict!")
items1 = list(dict.items())
items2 = list(dict.copy().items())
items1.sort()
items2.sort()
self.assertEqual(items1, items2,
"cloning of weak-valued dictionary did not work!")
del items1, items2
self.assertEqual(len(dict), self.COUNT)
del objects[0]
self.assertEqual(len(dict), self.COUNT - 1,
"deleting object did not cause dictionary update")
del objects, o
self.assertEqual(len(dict), 0,
"deleting the values did not clear the dictionary")
# regression on SF bug #447152:
dict = weakref.WeakValueDictionary()
self.assertRaises(KeyError, dict.__getitem__, 1)
dict[2] = C()
self.assertRaises(KeyError, dict.__getitem__, 2)
def test_weak_keys(self):
#
# This exercises d.copy(), d.items(), d[] = v, d[], del d[],
# len(d), k in d.
#
dict, objects = self.make_weak_keyed_dict()
for o in objects:
self.assertEqual(weakref.getweakrefcount(o), 1,
"wrong number of weak references to %r!" % o)
self.assertIs(o.arg, dict[o],
"wrong object returned by weak dict!")
items1 = dict.items()
items2 = dict.copy().items()
self.assertEqual(set(items1), set(items2),
"cloning of weak-keyed dictionary did not work!")
del items1, items2
self.assertEqual(len(dict), self.COUNT)
del objects[0]
self.assertEqual(len(dict), (self.COUNT - 1),
"deleting object did not cause dictionary update")
del objects, o
self.assertEqual(len(dict), 0,
"deleting the keys did not clear the dictionary")
o = Object(42)
dict[o] = "What is the meaning of the universe?"
self.assertIn(o, dict)
self.assertNotIn(34, dict)
def test_weak_keyed_iters(self):
dict, objects = self.make_weak_keyed_dict()
self.check_iters(dict)
# Test keyrefs()
refs = dict.keyrefs()
self.assertEqual(len(refs), len(objects))
objects2 = list(objects)
for wr in refs:
ob = wr()
self.assertIn(ob, dict)
self.assertIn(ob, dict)
self.assertEqual(ob.arg, dict[ob])
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
# Test iterkeyrefs()
objects2 = list(objects)
self.assertEqual(len(list(dict.keyrefs())), len(objects))
for wr in dict.keyrefs():
ob = wr()
self.assertIn(ob, dict)
self.assertIn(ob, dict)
self.assertEqual(ob.arg, dict[ob])
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
def test_weak_valued_iters(self):
dict, objects = self.make_weak_valued_dict()
self.check_iters(dict)
# Test valuerefs()
refs = dict.valuerefs()
self.assertEqual(len(refs), len(objects))
objects2 = list(objects)
for wr in refs:
ob = wr()
self.assertEqual(ob, dict[ob.arg])
self.assertEqual(ob.arg, dict[ob.arg].arg)
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
# Test itervaluerefs()
objects2 = list(objects)
self.assertEqual(len(list(dict.itervaluerefs())), len(objects))
for wr in dict.itervaluerefs():
ob = wr()
self.assertEqual(ob, dict[ob.arg])
self.assertEqual(ob.arg, dict[ob.arg].arg)
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
def check_iters(self, dict):
# item iterator:
items = list(dict.items())
for item in dict.items():
items.remove(item)
self.assertFalse(items, "items() did not touch all items")
# key iterator, via __iter__():
keys = list(dict.keys())
for k in dict:
keys.remove(k)
self.assertFalse(keys, "__iter__() did not touch all keys")
# key iterator, via iterkeys():
keys = list(dict.keys())
for k in dict.keys():
keys.remove(k)
self.assertFalse(keys, "iterkeys() did not touch all keys")
# value iterator:
values = list(dict.values())
for v in dict.values():
values.remove(v)
self.assertFalse(values,
"itervalues() did not touch all values")
def check_weak_destroy_while_iterating(self, dict, objects, iter_name):
n = len(dict)
it = iter(getattr(dict, iter_name)())
next(it) # Trigger internal iteration
# Destroy an object
del objects[-1]
gc.collect() # just in case
# We have removed either the first consumed object, or another one
self.assertIn(len(list(it)), [len(objects), len(objects) - 1])
del it
# The removal has been committed
self.assertEqual(len(dict), n - 1)
def check_weak_destroy_and_mutate_while_iterating(self, dict, testcontext):
# Check that we can explicitly mutate the weak dict without
# interfering with delayed removal.
# `testcontext` should create an iterator, destroy one of the
# weakref'ed objects and then return a new key/value pair corresponding
# to the destroyed object.
with testcontext() as (k, v):
self.assertNotIn(k, dict)
with testcontext() as (k, v):
self.assertRaises(KeyError, dict.__delitem__, k)
self.assertNotIn(k, dict)
with testcontext() as (k, v):
self.assertRaises(KeyError, dict.pop, k)
self.assertNotIn(k, dict)
with testcontext() as (k, v):
dict[k] = v
self.assertEqual(dict[k], v)
ddict = copy.copy(dict)
with testcontext() as (k, v):
dict.update(ddict)
self.assertEqual(dict, ddict)
with testcontext() as (k, v):
dict.clear()
self.assertEqual(len(dict), 0)
def check_weak_del_and_len_while_iterating(self, dict, testcontext):
# Check that len() works when both iterating and removing keys
# explicitly through various means (.pop(), .clear()...), while
# implicit mutation is deferred because an iterator is alive.
# (each call to testcontext() should schedule one item for removal
# for this test to work properly)
o = Object(123456)
with testcontext():
n = len(dict)
# Since underlaying dict is ordered, first item is popped
dict.pop(next(dict.keys()))
self.assertEqual(len(dict), n - 1)
dict[o] = o
self.assertEqual(len(dict), n)
# last item in objects is removed from dict in context shutdown
with testcontext():
self.assertEqual(len(dict), n - 1)
# Then, (o, o) is popped
dict.popitem()
self.assertEqual(len(dict), n - 2)
with testcontext():
self.assertEqual(len(dict), n - 3)
del dict[next(dict.keys())]
self.assertEqual(len(dict), n - 4)
with testcontext():
self.assertEqual(len(dict), n - 5)
dict.popitem()
self.assertEqual(len(dict), n - 6)
with testcontext():
dict.clear()
self.assertEqual(len(dict), 0)
self.assertEqual(len(dict), 0)
def test_weak_keys_destroy_while_iterating(self):
# Issue #7105: iterators shouldn't crash when a key is implicitly removed
dict, objects = self.make_weak_keyed_dict()
self.check_weak_destroy_while_iterating(dict, objects, 'keys')
self.check_weak_destroy_while_iterating(dict, objects, 'items')
self.check_weak_destroy_while_iterating(dict, objects, 'values')
self.check_weak_destroy_while_iterating(dict, objects, 'keyrefs')
dict, objects = self.make_weak_keyed_dict()
@contextlib.contextmanager
def testcontext():
try:
it = iter(dict.items())
next(it)
# Schedule a key/value for removal and recreate it
v = objects.pop().arg
gc.collect() # just in case
yield Object(v), v
finally:
it = None # should commit all removals
gc.collect()
self.check_weak_destroy_and_mutate_while_iterating(dict, testcontext)
# Issue #21173: len() fragile when keys are both implicitly and
# explicitly removed.
dict, objects = self.make_weak_keyed_dict()
self.check_weak_del_and_len_while_iterating(dict, testcontext)
def test_weak_values_destroy_while_iterating(self):
# Issue #7105: iterators shouldn't crash when a key is implicitly removed
dict, objects = self.make_weak_valued_dict()
self.check_weak_destroy_while_iterating(dict, objects, 'keys')
self.check_weak_destroy_while_iterating(dict, objects, 'items')
self.check_weak_destroy_while_iterating(dict, objects, 'values')
self.check_weak_destroy_while_iterating(dict, objects, 'itervaluerefs')
self.check_weak_destroy_while_iterating(dict, objects, 'valuerefs')
dict, objects = self.make_weak_valued_dict()
@contextlib.contextmanager
def testcontext():
try:
it = iter(dict.items())
next(it)
# Schedule a key/value for removal and recreate it
k = objects.pop().arg
gc.collect() # just in case
yield k, Object(k)
finally:
it = None # should commit all removals
gc.collect()
self.check_weak_destroy_and_mutate_while_iterating(dict, testcontext)
dict, objects = self.make_weak_valued_dict()
self.check_weak_del_and_len_while_iterating(dict, testcontext)
def test_make_weak_keyed_dict_from_dict(self):
o = Object(3)
dict = weakref.WeakKeyDictionary({o:364})
self.assertEqual(dict[o], 364)
def test_make_weak_keyed_dict_from_weak_keyed_dict(self):
o = Object(3)
dict = weakref.WeakKeyDictionary({o:364})
dict2 = weakref.WeakKeyDictionary(dict)
self.assertEqual(dict[o], 364)
def make_weak_keyed_dict(self):
dict = weakref.WeakKeyDictionary()
objects = list(map(Object, range(self.COUNT)))
for o in objects:
dict[o] = o.arg
return dict, objects
def test_make_weak_valued_dict_from_dict(self):
o = Object(3)
dict = weakref.WeakValueDictionary({364:o})
self.assertEqual(dict[364], o)
def test_make_weak_valued_dict_from_weak_valued_dict(self):
o = Object(3)
dict = weakref.WeakValueDictionary({364:o})
dict2 = weakref.WeakValueDictionary(dict)
self.assertEqual(dict[364], o)
def test_make_weak_valued_dict_misc(self):
# errors
self.assertRaises(TypeError, weakref.WeakValueDictionary.__init__)
self.assertRaises(TypeError, weakref.WeakValueDictionary, {}, {})
self.assertRaises(TypeError, weakref.WeakValueDictionary, (), ())
# special keyword arguments
o = Object(3)
for kw in 'self', 'dict', 'other', 'iterable':
d = weakref.WeakValueDictionary(**{kw: o})
self.assertEqual(list(d.keys()), [kw])
self.assertEqual(d[kw], o)
def make_weak_valued_dict(self):
dict = weakref.WeakValueDictionary()
objects = list(map(Object, range(self.COUNT)))
for o in objects:
dict[o.arg] = o
return dict, objects
def check_popitem(self, klass, key1, value1, key2, value2):
weakdict = klass()
weakdict[key1] = value1
weakdict[key2] = value2
self.assertEqual(len(weakdict), 2)
k, v = weakdict.popitem()
self.assertEqual(len(weakdict), 1)
if k is key1:
self.assertIs(v, value1)
else:
self.assertIs(v, value2)
k, v = weakdict.popitem()
self.assertEqual(len(weakdict), 0)
if k is key1:
self.assertIs(v, value1)
else:
self.assertIs(v, value2)
def test_weak_valued_dict_popitem(self):
self.check_popitem(weakref.WeakValueDictionary,
"key1", C(), "key2", C())
def test_weak_keyed_dict_popitem(self):
self.check_popitem(weakref.WeakKeyDictionary,
C(), "value 1", C(), "value 2")
def check_setdefault(self, klass, key, value1, value2):
self.assertIsNot(value1, value2,
"invalid test"
" -- value parameters must be distinct objects")
weakdict = klass()
o = weakdict.setdefault(key, value1)
self.assertIs(o, value1)
self.assertIn(key, weakdict)
self.assertIs(weakdict.get(key), value1)
self.assertIs(weakdict[key], value1)
o = weakdict.setdefault(key, value2)
self.assertIs(o, value1)
self.assertIn(key, weakdict)
self.assertIs(weakdict.get(key), value1)
self.assertIs(weakdict[key], value1)
def test_weak_valued_dict_setdefault(self):
self.check_setdefault(weakref.WeakValueDictionary,
"key", C(), C())
def test_weak_keyed_dict_setdefault(self):
self.check_setdefault(weakref.WeakKeyDictionary,
C(), "value 1", "value 2")
def check_update(self, klass, dict):
#
# This exercises d.update(), len(d), d.keys(), k in d,
# d.get(), d[].
#
weakdict = klass()
weakdict.update(dict)
self.assertEqual(len(weakdict), len(dict))
for k in weakdict.keys():
self.assertIn(k, dict, "mysterious new key appeared in weak dict")
v = dict.get(k)
self.assertIs(v, weakdict[k])
self.assertIs(v, weakdict.get(k))
for k in dict.keys():
self.assertIn(k, weakdict, "original key disappeared in weak dict")
v = dict[k]
self.assertIs(v, weakdict[k])
self.assertIs(v, weakdict.get(k))
def test_weak_valued_dict_update(self):
self.check_update(weakref.WeakValueDictionary,
{1: C(), 'a': C(), C(): C()})
# errors
self.assertRaises(TypeError, weakref.WeakValueDictionary.update)
d = weakref.WeakValueDictionary()
self.assertRaises(TypeError, d.update, {}, {})
self.assertRaises(TypeError, d.update, (), ())
self.assertEqual(list(d.keys()), [])
# special keyword arguments
o = Object(3)
for kw in 'self', 'dict', 'other', 'iterable':
d = weakref.WeakValueDictionary()
d.update(**{kw: o})
self.assertEqual(list(d.keys()), [kw])
self.assertEqual(d[kw], o)
def test_weak_valued_union_operators(self):
a = C()
b = C()
c = C()
wvd1 = weakref.WeakValueDictionary({1: a})
wvd2 = weakref.WeakValueDictionary({1: b, 2: a})
wvd3 = wvd1.copy()
d1 = {1: c, 3: b}
pairs = [(5, c), (6, b)]
tmp1 = wvd1 | wvd2 # Between two WeakValueDictionaries
self.assertEqual(dict(tmp1), dict(wvd1) | dict(wvd2))
self.assertIs(type(tmp1), weakref.WeakValueDictionary)
wvd1 |= wvd2
self.assertEqual(wvd1, tmp1)
tmp2 = wvd2 | d1 # Between WeakValueDictionary and mapping
self.assertEqual(dict(tmp2), dict(wvd2) | d1)
self.assertIs(type(tmp2), weakref.WeakValueDictionary)
wvd2 |= d1
self.assertEqual(wvd2, tmp2)
tmp3 = wvd3.copy() # Between WeakValueDictionary and iterable key, value
tmp3 |= pairs
self.assertEqual(dict(tmp3), dict(wvd3) | dict(pairs))
self.assertIs(type(tmp3), weakref.WeakValueDictionary)
tmp4 = d1 | wvd3 # Testing .__ror__
self.assertEqual(dict(tmp4), d1 | dict(wvd3))
self.assertIs(type(tmp4), weakref.WeakValueDictionary)
del a
self.assertNotIn(2, tmp1)
self.assertNotIn(2, tmp2)
self.assertNotIn(1, tmp3)
self.assertNotIn(1, tmp4)
def test_weak_keyed_dict_update(self):
self.check_update(weakref.WeakKeyDictionary,
{C(): 1, C(): 2, C(): 3})
def test_weak_keyed_delitem(self):
d = weakref.WeakKeyDictionary()
o1 = Object('1')
o2 = Object('2')
d[o1] = 'something'
d[o2] = 'something'
self.assertEqual(len(d), 2)
del d[o1]
self.assertEqual(len(d), 1)
self.assertEqual(list(d.keys()), [o2])
def test_weak_keyed_union_operators(self):
o1 = C()
o2 = C()
o3 = C()
wkd1 = weakref.WeakKeyDictionary({o1: 1, o2: 2})
wkd2 = weakref.WeakKeyDictionary({o3: 3, o1: 4})
wkd3 = wkd1.copy()
d1 = {o2: '5', o3: '6'}
pairs = [(o2, 7), (o3, 8)]
tmp1 = wkd1 | wkd2 # Between two WeakKeyDictionaries
self.assertEqual(dict(tmp1), dict(wkd1) | dict(wkd2))
self.assertIs(type(tmp1), weakref.WeakKeyDictionary)
wkd1 |= wkd2
self.assertEqual(wkd1, tmp1)
tmp2 = wkd2 | d1 # Between WeakKeyDictionary and mapping
self.assertEqual(dict(tmp2), dict(wkd2) | d1)
self.assertIs(type(tmp2), weakref.WeakKeyDictionary)
wkd2 |= d1
self.assertEqual(wkd2, tmp2)
tmp3 = wkd3.copy() # Between WeakKeyDictionary and iterable key, value
tmp3 |= pairs
self.assertEqual(dict(tmp3), dict(wkd3) | dict(pairs))
self.assertIs(type(tmp3), weakref.WeakKeyDictionary)
tmp4 = d1 | wkd3 # Testing .__ror__
self.assertEqual(dict(tmp4), d1 | dict(wkd3))
self.assertIs(type(tmp4), weakref.WeakKeyDictionary)
del o1
self.assertNotIn(4, tmp1.values())
self.assertNotIn(4, tmp2.values())
self.assertNotIn(1, tmp3.values())
self.assertNotIn(1, tmp4.values())
def test_weak_valued_delitem(self):
d = weakref.WeakValueDictionary()
o1 = Object('1')
o2 = Object('2')
d['something'] = o1
d['something else'] = o2
self.assertEqual(len(d), 2)
del d['something']
self.assertEqual(len(d), 1)
self.assertEqual(list(d.items()), [('something else', o2)])
def test_weak_keyed_bad_delitem(self):
d = weakref.WeakKeyDictionary()
o = Object('1')
# An attempt to delete an object that isn't there should raise
# KeyError. It didn't before 2.3.
self.assertRaises(KeyError, d.__delitem__, o)
self.assertRaises(KeyError, d.__getitem__, o)
# If a key isn't of a weakly referencable type, __getitem__ and
# __setitem__ raise TypeError. __delitem__ should too.
self.assertRaises(TypeError, d.__delitem__, 13)
self.assertRaises(TypeError, d.__getitem__, 13)
self.assertRaises(TypeError, d.__setitem__, 13, 13)
def test_weak_keyed_cascading_deletes(self):
# SF bug 742860. For some reason, before 2.3 __delitem__ iterated
# over the keys via self.data.iterkeys(). If things vanished from
# the dict during this (or got added), that caused a RuntimeError.
d = weakref.WeakKeyDictionary()
mutate = False
class C(object):
def __init__(self, i):
self.value = i
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
if mutate:
# Side effect that mutates the dict, by removing the
# last strong reference to a key.
del objs[-1]
return self.value == other.value
objs = [C(i) for i in range(4)]
for o in objs:
d[o] = o.value
del o # now the only strong references to keys are in objs
# Find the order in which iterkeys sees the keys.
objs = list(d.keys())
# Reverse it, so that the iteration implementation of __delitem__
# has to keep looping to find the first object we delete.
objs.reverse()
# Turn on mutation in C.__eq__. The first time through the loop,
# under the iterkeys() business the first comparison will delete
# the last item iterkeys() would see, and that causes a
# RuntimeError: dictionary changed size during iteration
# when the iterkeys() loop goes around to try comparing the next
# key. After this was fixed, it just deletes the last object *our*
# "for o in obj" loop would have gotten to.
mutate = True
count = 0
for o in objs:
count += 1
del d[o]
self.assertEqual(len(d), 0)
self.assertEqual(count, 2)
def test_make_weak_valued_dict_repr(self):
dict = weakref.WeakValueDictionary()
self.assertRegex(repr(dict), '<WeakValueDictionary at 0x.*>')
def test_make_weak_keyed_dict_repr(self):
dict = weakref.WeakKeyDictionary()
self.assertRegex(repr(dict), '<WeakKeyDictionary at 0x.*>')
def test_threaded_weak_valued_setdefault(self):
d = weakref.WeakValueDictionary()
with collect_in_thread():
for i in range(100000):
x = d.setdefault(10, RefCycle())
self.assertIsNot(x, None) # we never put None in there!
del x
def test_threaded_weak_valued_pop(self):
d = weakref.WeakValueDictionary()
with collect_in_thread():
for i in range(100000):
d[10] = RefCycle()
x = d.pop(10, 10)
self.assertIsNot(x, None) # we never put None in there!
def test_threaded_weak_valued_consistency(self):
# Issue #28427: old keys should not remove new values from
# WeakValueDictionary when collecting from another thread.
d = weakref.WeakValueDictionary()
with collect_in_thread():
for i in range(200000):
o = RefCycle()
d[10] = o
# o is still alive, so the dict can't be empty
self.assertEqual(len(d), 1)
o = None # lose ref
def check_threaded_weak_dict_copy(self, type_, deepcopy):
# `type_` should be either WeakKeyDictionary or WeakValueDictionary.
# `deepcopy` should be either True or False.
exc = []
class DummyKey:
def __init__(self, ctr):
self.ctr = ctr
class DummyValue:
def __init__(self, ctr):
self.ctr = ctr
def dict_copy(d, exc):
try:
if deepcopy is True:
_ = copy.deepcopy(d)
else:
_ = d.copy()
except Exception as ex:
exc.append(ex)
def pop_and_collect(lst):
gc_ctr = 0
while lst:
i = random.randint(0, len(lst) - 1)
gc_ctr += 1
lst.pop(i)
if gc_ctr % 10000 == 0:
gc.collect() # just in case
self.assertIn(type_, (weakref.WeakKeyDictionary, weakref.WeakValueDictionary))
d = type_()
keys = []
values = []
# Initialize d with many entries
for i in range(70000):
k, v = DummyKey(i), DummyValue(i)
keys.append(k)
values.append(v)
d[k] = v
del k
del v
t_copy = threading.Thread(target=dict_copy, args=(d, exc,))
if type_ is weakref.WeakKeyDictionary:
t_collect = threading.Thread(target=pop_and_collect, args=(keys,))
else: # weakref.WeakValueDictionary
t_collect = threading.Thread(target=pop_and_collect, args=(values,))
t_copy.start()
t_collect.start()
t_copy.join()
t_collect.join()
# Test exceptions
if exc:
raise exc[0]
def test_threaded_weak_key_dict_copy(self):
# Issue #35615: Weakref keys or values getting GC'ed during dict
# copying should not result in a crash.
self.check_threaded_weak_dict_copy(weakref.WeakKeyDictionary, False)
def test_threaded_weak_key_dict_deepcopy(self):
# Issue #35615: Weakref keys or values getting GC'ed during dict
# copying should not result in a crash.
self.check_threaded_weak_dict_copy(weakref.WeakKeyDictionary, True)
def test_threaded_weak_value_dict_copy(self):
# Issue #35615: Weakref keys or values getting GC'ed during dict
# copying should not result in a crash.
self.check_threaded_weak_dict_copy(weakref.WeakValueDictionary, False)
def test_threaded_weak_value_dict_deepcopy(self):
# Issue #35615: Weakref keys or values getting GC'ed during dict
# copying should not result in a crash.
self.check_threaded_weak_dict_copy(weakref.WeakValueDictionary, True)
@support.cpython_only
def test_remove_closure(self):
d = weakref.WeakValueDictionary()
self.assertIsNone(d._remove.__closure__)
from test import mapping_tests
class WeakValueDictionaryTestCase(mapping_tests.BasicTestMappingProtocol):
"""Check that WeakValueDictionary conforms to the mapping protocol"""
__ref = {"key1":Object(1), "key2":Object(2), "key3":Object(3)}
type2test = weakref.WeakValueDictionary
def _reference(self):
return self.__ref.copy()
class WeakKeyDictionaryTestCase(mapping_tests.BasicTestMappingProtocol):
"""Check that WeakKeyDictionary conforms to the mapping protocol"""
__ref = {Object("key1"):1, Object("key2"):2, Object("key3"):3}
type2test = weakref.WeakKeyDictionary
def _reference(self):
return self.__ref.copy()
class FinalizeTestCase(unittest.TestCase):
class A:
pass
def _collect_if_necessary(self):
# we create no ref-cycles so in CPython no gc should be needed
if sys.implementation.name != 'cpython':
support.gc_collect()
def test_finalize(self):
def add(x,y,z):
res.append(x + y + z)
return x + y + z
a = self.A()
res = []
f = weakref.finalize(a, add, 67, 43, z=89)
self.assertEqual(f.alive, True)
self.assertEqual(f.peek(), (a, add, (67,43), {'z':89}))
self.assertEqual(f(), 199)
self.assertEqual(f(), None)
self.assertEqual(f(), None)
self.assertEqual(f.peek(), None)
self.assertEqual(f.detach(), None)
self.assertEqual(f.alive, False)
self.assertEqual(res, [199])
res = []
f = weakref.finalize(a, add, 67, 43, 89)
self.assertEqual(f.peek(), (a, add, (67,43,89), {}))
self.assertEqual(f.detach(), (a, add, (67,43,89), {}))
self.assertEqual(f(), None)
self.assertEqual(f(), None)
self.assertEqual(f.peek(), None)
self.assertEqual(f.detach(), None)
self.assertEqual(f.alive, False)
self.assertEqual(res, [])
res = []
f = weakref.finalize(a, add, x=67, y=43, z=89)
del a
self._collect_if_necessary()
self.assertEqual(f(), None)
self.assertEqual(f(), None)
self.assertEqual(f.peek(), None)
self.assertEqual(f.detach(), None)
self.assertEqual(f.alive, False)
self.assertEqual(res, [199])
def test_arg_errors(self):
def fin(*args, **kwargs):
res.append((args, kwargs))
a = self.A()
res = []
f = weakref.finalize(a, fin, 1, 2, func=3, obj=4)
self.assertEqual(f.peek(), (a, fin, (1, 2), {'func': 3, 'obj': 4}))
f()
self.assertEqual(res, [((1, 2), {'func': 3, 'obj': 4})])
with self.assertRaises(TypeError):
weakref.finalize(a, func=fin, arg=1)
with self.assertRaises(TypeError):
weakref.finalize(obj=a, func=fin, arg=1)
self.assertRaises(TypeError, weakref.finalize, a)
self.assertRaises(TypeError, weakref.finalize)
def test_order(self):
a = self.A()
res = []
f1 = weakref.finalize(a, res.append, 'f1')
f2 = weakref.finalize(a, res.append, 'f2')
f3 = weakref.finalize(a, res.append, 'f3')
f4 = weakref.finalize(a, res.append, 'f4')
f5 = weakref.finalize(a, res.append, 'f5')
# make sure finalizers can keep themselves alive
del f1, f4
self.assertTrue(f2.alive)
self.assertTrue(f3.alive)
self.assertTrue(f5.alive)
self.assertTrue(f5.detach())
self.assertFalse(f5.alive)
f5() # nothing because previously unregistered
res.append('A')
f3() # => res.append('f3')
self.assertFalse(f3.alive)
res.append('B')
f3() # nothing because previously called
res.append('C')
del a
self._collect_if_necessary()
# => res.append('f4')
# => res.append('f2')
# => res.append('f1')
self.assertFalse(f2.alive)
res.append('D')
f2() # nothing because previously called by gc
expected = ['A', 'f3', 'B', 'C', 'f4', 'f2', 'f1', 'D']
self.assertEqual(res, expected)
def test_all_freed(self):
# we want a weakrefable subclass of weakref.finalize
class MyFinalizer(weakref.finalize):
pass
a = self.A()
res = []
def callback():
res.append(123)
f = MyFinalizer(a, callback)
wr_callback = weakref.ref(callback)
wr_f = weakref.ref(f)
del callback, f
self.assertIsNotNone(wr_callback())
self.assertIsNotNone(wr_f())
del a
self._collect_if_necessary()
self.assertIsNone(wr_callback())
self.assertIsNone(wr_f())
self.assertEqual(res, [123])
@classmethod
def run_in_child(cls):
def error():
# Create an atexit finalizer from inside a finalizer called
# at exit. This should be the next to be run.
g1 = weakref.finalize(cls, print, 'g1')
print('f3 error')
1/0
# cls should stay alive till atexit callbacks run
f1 = weakref.finalize(cls, print, 'f1', _global_var)
f2 = weakref.finalize(cls, print, 'f2', _global_var)
f3 = weakref.finalize(cls, error)
f4 = weakref.finalize(cls, print, 'f4', _global_var)
assert f1.atexit == True
f2.atexit = False
assert f3.atexit == True
assert f4.atexit == True
def test_atexit(self):
prog = ('from test.test_weakref import FinalizeTestCase;'+
'FinalizeTestCase.run_in_child()')
rc, out, err = script_helper.assert_python_ok('-c', prog)
out = out.decode('ascii').splitlines()
self.assertEqual(out, ['f4 foobar', 'f3 error', 'g1', 'f1 foobar'])
self.assertTrue(b'ZeroDivisionError' in err)
libreftest = """ Doctest for examples in the library reference: weakref.rst
>>> import weakref
>>> class Dict(dict):
... pass
...
>>> obj = Dict(red=1, green=2, blue=3) # this object is weak referencable
>>> r = weakref.ref(obj)
>>> print(r() is obj)
True
>>> import weakref
>>> class Object:
... pass
...
>>> o = Object()
>>> r = weakref.ref(o)
>>> o2 = r()
>>> o is o2
True
>>> del o, o2
>>> print(r())
None
>>> import weakref
>>> class ExtendedRef(weakref.ref):
... def __init__(self, ob, callback=None, **annotations):
... super().__init__(ob, callback)
... self.__counter = 0
... for k, v in annotations.items():
... setattr(self, k, v)
... def __call__(self):
... '''Return a pair containing the referent and the number of
... times the reference has been called.
... '''
... ob = super().__call__()
... if ob is not None:
... self.__counter += 1
... ob = (ob, self.__counter)
... return ob
...
>>> class A: # not in docs from here, just testing the ExtendedRef
... pass
...
>>> a = A()
>>> r = ExtendedRef(a, foo=1, bar="baz")
>>> r.foo
1
>>> r.bar
'baz'
>>> r()[1]
1
>>> r()[1]
2
>>> r()[0] is a
True
>>> import weakref
>>> _id2obj_dict = weakref.WeakValueDictionary()
>>> def remember(obj):
... oid = id(obj)
... _id2obj_dict[oid] = obj
... return oid
...
>>> def id2obj(oid):
... return _id2obj_dict[oid]
...
>>> a = A() # from here, just testing
>>> a_id = remember(a)
>>> id2obj(a_id) is a
True
>>> del a
>>> try:
... id2obj(a_id)
... except KeyError:
... print('OK')
... else:
... print('WeakValueDictionary error')
OK
"""
__test__ = {'libreftest' : libreftest}
def test_main():
support.run_unittest(
ReferencesTestCase,
WeakMethodTestCase,
MappingTestCase,
WeakValueDictionaryTestCase,
WeakKeyDictionaryTestCase,
SubclassableWeakrefTestCase,
FinalizeTestCase,
)
support.run_doctest(sys.modules[__name__])
if __name__ == "__main__":
test_main()
|
py | 1a5314e3f7bc4552e45d97d191c9b0b6d4940e95 | """
Test basic properties of implemented datasets.
"""
import unittest
import numpy as np
from classicdata import Ionosphere
from classicdata.dataset import Dataset, CitationWarning
class TestLoading(unittest.TestCase):
def test_loading(self):
with self.assertWarns(CitationWarning):
for DatasetImplementation in Dataset.__subclasses__():
with self.subTest(Dataset=DatasetImplementation):
dataset_instance = DatasetImplementation()
# `.loaded` should not be set.
self.assertFalse(dataset_instance.loaded)
# Number of points and number of features should be correctly defined.
self.assertEqual(
dataset_instance.points.shape,
(dataset_instance.n_samples, dataset_instance.n_features),
)
# Number of labels must be defined correctly.
self.assertEqual(
dataset_instance.labels.shape, (dataset_instance.n_samples,)
)
# Convert labels “there and back”.
recoded_labels = dataset_instance.label_encoder.transform(
dataset_instance.decode_labels(dataset_instance.labels)
)
self.assertTrue(np.all(recoded_labels == dataset_instance.labels))
# `.loaded` should be set.
self.assertTrue(dataset_instance.loaded)
# Count random split.
(
train_points,
train_labels,
test_points,
test_labels,
) = dataset_instance.split_for_training()
self.assertEqual(
train_points.shape[0] + test_points.shape[0],
dataset_instance.n_samples,
)
self.assertEqual(
train_labels.shape[0] + test_labels.shape[0],
dataset_instance.n_samples,
)
def test_zero_test_split(self):
dataset = Ionosphere() # Arbitrarily chosen.
dataset.load()
(
train_points,
train_labels,
test_points,
test_labels,
) = dataset.split_for_training(test_size=0)
self.assertEqual(train_points.shape[0], dataset.n_samples)
self.assertEqual(train_labels.shape[0], dataset.n_samples)
self.assertEqual(test_points.shape[0], 0)
self.assertEqual(test_labels.shape[0], 0)
if __name__ == "__main__":
unittest.main()
|
py | 1a5315884024b2a1ee5522fadea468627401e890 | import logging
import os
import re
from shutil import copyfileobj
from tempfile import NamedTemporaryFile
import docutils.core
from django.conf import settings
from django.core.files import File
from django.core.urlresolvers import reverse
from django.core.validators import RegexValidator
from django.db import models
from django.db.models.signals import post_delete
from django.utils.html import escape
from django.utils.translation import ugettext_lazy as _
from docutils.utils import SystemMessage
from model_utils import Choices
from model_utils.fields import AutoCreatedField, AutoLastModifiedField
from model_utils.models import TimeStampedModel
from localshop.apps.packages.utils import delete_files
logger = logging.getLogger(__name__)
class Repository(TimeStampedModel):
name = models.CharField(max_length=250)
slug = models.CharField(max_length=200, unique=True)
description = models.CharField(max_length=500, blank=True)
teams = models.ManyToManyField(
'accounts.Team', related_name='repositories', blank=True)
enable_auto_mirroring = models.BooleanField(default=True)
upstream_pypi_url = models.CharField(
max_length=500,
blank=True,
default='https://pypi.python.org/simple',
help_text=_(
"The upstream pypi URL (default: https://pypi.python.org/simple)"))
upstream_repositories = models.ManyToManyField('self')
@property
def packages_deep(self):
repos = set(self.upstream_repositories.all())
repos.add(self)
packages = Package.objects.filter(
repository__in=repos,
)
return packages
def __str__(self):
return self.name
@property
def simple_index_url(self):
return reverse('packages:simple_index', kwargs={
'repo': self.slug
})
def user_has_access(self, user):
if user.is_superuser:
return True
if not user.is_authenticated:
return False
return self.teams.filter(members__user=user).exists()
def check_user_role(self, user, roles):
if user.is_superuser:
return True
if not user.is_authenticated:
return False
return self.teams.filter(
members__user=user,
members__role__in=roles
).exists()
@property
def upstream_pypi_url_api(self):
if self.upstream_pypi_url == 'https://pypi.python.org/simple':
return 'https://pypi.python.org/pypi'
return self.upstream_pypi_url
class Classifier(models.Model):
name = models.CharField(max_length=255, unique=True)
def __str__(self):
return self.name
class Package(models.Model):
created = AutoCreatedField(db_index=True)
modified = AutoLastModifiedField()
repository = models.ForeignKey(Repository, related_name='packages')
name = models.CharField(max_length=200, db_index=True, validators=[
RegexValidator(
re.compile(r'^[-a-zA-Z0-9_\.]+\Z'),
_("Enter a valid package name consisting"),
'invalid')
])
#: Indicate if this package is local (a private package)
is_local = models.BooleanField(default=False)
#: Timestamp when we last retrieved the metadata
update_timestamp = models.DateTimeField(null=True)
owners = models.ManyToManyField(settings.AUTH_USER_MODEL)
class Meta:
ordering = ['name']
unique_together = [
('repository', 'name')
]
permissions = (
("view_package", "Can view package"),
)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('dashboard:package_detail', kwargs={
'repo': self.repository.slug, 'name': self.name
})
def get_all_releases(self):
result = {}
for release in self.releases.all():
files = dict((r.filename, r) for r in release.files.all())
result[release.version] = (release, files)
return result
@property
def last_release(self):
return self.releases.order_by('-created')[0]
class Release(models.Model):
created = AutoCreatedField()
modified = AutoLastModifiedField()
author = models.CharField(max_length=128, blank=True)
author_email = models.CharField(max_length=255, blank=True)
classifiers = models.ManyToManyField(Classifier)
description = models.TextField(blank=True)
download_url = models.CharField(max_length=200, blank=True, null=True)
home_page = models.CharField(max_length=200, blank=True, null=True)
license = models.TextField(blank=True)
metadata_version = models.CharField(max_length=64, default=1.0)
package = models.ForeignKey(Package, related_name="releases")
summary = models.TextField(blank=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True)
version = models.CharField(max_length=512)
class Meta:
ordering = ['-version']
def __str__(self):
return self.version
@property
def description_html(self):
try:
parts = docutils.core.publish_parts(
self.description, writer_name='html4css1')
return parts['fragment']
except SystemMessage:
desc = escape(self.description)
return '<pre>%s</pre>' % desc
def release_file_upload_to(instance, filename):
package = instance.release.package
assert package.name and instance.python_version
return os.path.join(
instance.release.package.repository.slug,
instance.python_version,
package.name[0],
package.name,
filename)
class ReleaseFile(models.Model):
TYPES = Choices(
('sdist', 'Source'),
('bdist_egg', 'Egg'),
('bdist_msi', 'MSI'),
('bdist_dmg', 'DMG'),
('bdist_rpm', 'RPM'),
('bdist_dumb', 'bdist_dumb'),
('bdist_wininst', 'bdist_wininst'),
('bdist_wheel', 'bdist_wheel'),
)
created = AutoCreatedField()
modified = AutoLastModifiedField()
release = models.ForeignKey(Release, related_name="files")
size = models.IntegerField(null=True)
filetype = models.CharField(max_length=25, choices=TYPES)
distribution = models.FileField(upload_to=release_file_upload_to, max_length=512)
filename = models.CharField(max_length=200, blank=True, null=True)
md5_digest = models.CharField(max_length=512)
python_version = models.CharField(max_length=100)
url = models.CharField(max_length=1024, blank=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True)
class Meta:
unique_together = ('release', 'filetype', 'python_version', 'filename')
def __str__(self):
return self.filename
def get_absolute_url(self):
url = reverse('packages:download', kwargs={
'repo': self.release.package.repository.slug,
'name': self.release.package.name,
'pk': self.pk, 'filename': self.filename
})
return '%s#md5=%s' % (url, self.md5_digest)
def save_filecontent(self, filename, fh):
tmp_file = NamedTemporaryFile()
copyfileobj(fh, tmp_file)
self.distribution.save(filename, File(tmp_file))
@property
def file_is_available(self):
return self.distribution and self.distribution.storage.exists(self.distribution.name)
def download(self):
"""Start a celery task to download the release file from pypi.
If `settings.LOCALSHOP_ISOLATED` is True then download the file
in-process.
"""
from .tasks import download_file
if not settings.LOCALSHOP_ISOLATED:
download_file.delay(pk=self.pk)
else:
download_file(pk=self.pk)
if settings.LOCALSHOP_DELETE_FILES:
post_delete.connect(
delete_files, sender=ReleaseFile,
dispatch_uid="localshop.apps.packages.utils.delete_files")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.