blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6f6e441bbde59763d7fe65221a6f86714e769020 | 2082cd57fa2325a508af5f10bd00e8eca059bc09 | /src/geometry/manifolds/translation_algebra.py | 67b1958cc3594e112a9cdeb471c3976addf27f7f | []
| no_license | efernandez/geometry | 98e5894a83acaa32eefb2187374d4c34801a5600 | ec7fa1308224f3d156c54495bc4b05ce47a41004 | refs/heads/master | 2021-01-18T16:51:13.964917 | 2014-11-04T14:03:59 | 2014-11-04T14:03:59 | 36,390,891 | 0 | 1 | null | 2015-05-27T19:35:14 | 2015-05-27T19:35:14 | null | UTF-8 | Python | false | false | 1,237 | py | from . import MatrixLieAlgebra
from .. import extract_pieces, combine_pieces
from contracts import contract
import numpy as np
class tran(MatrixLieAlgebra):
'''
lie algebra for translation
'''
@contract(n="1|2|3")
def __init__(self, n):
MatrixLieAlgebra.__init__(self, n + 1, dimension=n)
def norm(self, X):
W, v, zero, zero = extract_pieces(X) # @UnusedVariable
return np.linalg.norm(v)
def project(self, X):
W, v, zero, zero = extract_pieces(X) # @UnusedVariable
return combine_pieces(W * 0, v, v * 0, 0)
def __repr__(self):
return 'tr%s' % (self.n - 1)
def interesting_points(self):
points = []
points.append(self.zero())
return points
@contract(a='belongs')
def vector_from_algebra(self, a):
W, v, zero, zero = extract_pieces(a) # @UnusedVariable
if v.shape == ():
v = v.reshape(1)
assert v.size == self.n - 1
return v
@contract(returns='belongs', v='array[K]')
def algebra_from_vector(self, v):
assert v.size == self.n - 1
return combine_pieces(np.zeros((self.n - 1, self.n - 1)), v, v * 0, 0)
| [
"[email protected]"
]
| |
187d5e4a2c3a6f4bda7055934dce787d7d1d0339 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_352/ch6_2020_03_04_10_31_08_746058.py | c84cfa947f1fae70b02bc0450ea33744f1c14660 | []
| no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 107 | py | def celsius_para_fahrenheit(fahrenheit,celsius):
fahrenheit=(celsius*9)/5+32
return fahrenheit
| [
"[email protected]"
]
| |
2cc09c81e60862cf246d6b8773beb89d120f3c54 | 532e337751c44b89e68f0022966d6295116928a9 | /client/tests/filesystem_test.py | 9c7e531de3f22e663c3dd4952f1d1023c288e5c0 | [
"MIT"
]
| permissive | laashub-soa/pyre-check | 8f1a2717888a22c15a7f6608e0d732e62fa060f9 | cc1a1b5c1007bf3e0e52e7f8b04c8e8fc365db44 | refs/heads/master | 2022-04-13T12:12:46.317095 | 2020-04-11T03:39:21 | 2020-04-11T03:41:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,467 | py | # Copyright (c) 2016-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
import errno
import fcntl
import os
import pathlib # noqa
import subprocess
import tempfile
import unittest
from contextlib import contextmanager
from unittest.mock import MagicMock, Mock, call, patch
from .. import __name__ as client_name, buck, commands, filesystem
from ..analysis_directory import SharedAnalysisDirectory
from ..commands.command import __name__ as command_name
from ..exceptions import EnvironmentException
from ..filesystem import (
Filesystem,
MercurialBackedFilesystem,
__name__ as filesystem_name,
_delete_symbolic_link,
acquire_lock,
acquire_lock_if_needed,
add_symbolic_link,
find_python_paths,
find_root,
remove_if_exists,
)
class FilesystemTest(unittest.TestCase):
def test_find_python_paths(self) -> None:
root = tempfile.mkdtemp()
# When there are no paths, returns empty list.
self.assertListEqual(find_python_paths(root), [])
def create_file(name: str) -> None:
with open(os.path.join(root, name), "w+"):
pass
def create_symlink(target: str, source: str) -> None:
os.symlink(os.path.join(root, target), os.path.join(root, source))
create_file("a.py")
create_file("b.pyi")
create_file("c.cpp")
create_symlink("a.py", "link1.py")
create_symlink("dangling.py", "link2.py")
create_symlink("c.cpp", "link3.py")
create_symlink("a.py", "link4.cpp")
os.mkdir(os.path.join(root, "mypy"))
os.mkdir(os.path.join(root, "scipyi"))
os.mkdir(os.path.join(root, "spy.py"))
create_symlink("spy.py", "directory_symlink.py")
create_file("mypy/my.py")
create_file("scipyi/sci.pyi")
create_symlink("mypy/my.py", "mypy/another.pyi")
create_symlink("scipyi/sci.pyi", "scipyi/another.py")
actual_paths = sorted(
os.path.relpath(path, root) for path in find_python_paths(root)
)
self.assertEqual(
actual_paths,
[
"a.py",
"b.pyi",
"directory_symlink.py",
"link1.py",
"link2.py",
"link3.py",
"mypy/another.pyi",
"mypy/my.py",
"scipyi/another.py",
"scipyi/sci.pyi",
],
)
def test_remove_if_exists(self) -> None:
# File removal.
with patch("os.remove") as os_remove, patch("shutil.rmtree") as shutil_rmtree:
os_remove.side_effect = OSError()
remove_if_exists("path")
os_remove.assert_called_once_with("path")
shutil_rmtree.assert_called_once_with("path")
# Directory removal.
with patch("os.remove") as os_remove, patch("shutil.rmtree") as shutil_rmtree:
shutil_rmtree.side_effect = OSError()
remove_if_exists("path")
os_remove.assert_called_once_with("path")
shutil_rmtree.assert_called_once_with("path")
# Both throw.
with patch("os.remove") as os_remove, patch("shutil.rmtree") as shutil_rmtree:
os_remove.side_effect = FileNotFoundError()
shutil_rmtree.side_effect = OSError()
remove_if_exists("path")
os_remove.assert_called_once_with("path")
shutil_rmtree.assert_called_once_with("path")
@patch("fcntl.lockf")
def test_acquire_lock(self, lock_file: Mock) -> None:
(_, path) = tempfile.mkstemp()
lockfile_file_descriptor = None
with acquire_lock(path, blocking=False) as file_descriptor:
lockfile_file_descriptor = file_descriptor
with acquire_lock(path, blocking=True):
pass
lock_file.assert_has_calls(
[
call(lockfile_file_descriptor, fcntl.LOCK_EX | fcntl.LOCK_NB),
call(lockfile_file_descriptor, fcntl.LOCK_UN),
call(lockfile_file_descriptor, fcntl.LOCK_EX),
call(lockfile_file_descriptor, fcntl.LOCK_UN),
]
)
def fail_on_exclusive(_, lock_kind):
if lock_kind == fcntl.LOCK_EX | fcntl.LOCK_NB:
raise OSError()
return None
lock_file.side_effect = fail_on_exclusive
with self.assertRaises(OSError):
with acquire_lock(path, blocking=False):
pass
@patch.object(filesystem, "acquire_lock")
def test_acquire_lock_if_needed(self, acquire_lock: MagicMock) -> None:
acquire_lock_if_needed("/some/path", blocking=True, needed=True)
acquire_lock.assert_called_once()
@patch.object(filesystem, "acquire_lock")
def test_acquire_lock_if_needed__not_needed(self, acquire_lock: MagicMock) -> None:
acquire_lock_if_needed("/some/path", blocking=True, needed=False)
acquire_lock.assert_not_called()
@patch("shutil.rmtree")
def test_cleanup(self, rmtree) -> None:
shared_analysis_directory = SharedAnalysisDirectory(["first", "second"], [])
shared_analysis_directory.cleanup()
rmtree.assert_not_called()
shared_analysis_directory = SharedAnalysisDirectory(
["first", "second"], [], isolate=True
)
shared_analysis_directory.cleanup()
rmtree.assert_called_with(shared_analysis_directory.get_root())
def test_filesystem_list_bare(self):
filesystem = Filesystem()
with patch.object(subprocess, "run") as run:
filesystem.list(".", [".pyre_configuration.local"])
run.assert_has_calls(
[
call(
["find", ".", "(", "-path", "./.pyre_configuration.local", ")"],
stdout=subprocess.PIPE,
cwd=".",
),
call().stdout.decode("utf-8"),
call().stdout.decode().split(),
]
)
with patch.object(subprocess, "run") as run:
filesystem.list("/root", ["**/*.py", "foo.cpp"], exclude=["bar/*.py"])
run.assert_has_calls(
[
call(
[
"find",
".",
"(",
"-path",
"./**/*.py",
"-or",
"-path",
"./foo.cpp",
")",
"-and",
"!",
"(",
"-path",
"./bar/*.py",
")",
],
stdout=subprocess.PIPE,
cwd="/root",
),
call().stdout.decode("utf-8"),
call().stdout.decode().split(),
]
)
def fail_command(arguments, **kwargs):
return subprocess.CompletedProcess(
args=[], returncode=1, stdout="".encode("utf-8")
)
with patch.object(subprocess, "run") as run:
run.side_effect = fail_command
self.assertEqual([], filesystem.list(".", [".pyre_configuration.local"]))
run.assert_has_calls(
[
call(
["find", ".", "(", "-path", "./.pyre_configuration.local", ")"],
stdout=subprocess.PIPE,
cwd=".",
)
]
)
def test_filesystem_list_mercurial(self):
filesystem = MercurialBackedFilesystem()
with patch.object(subprocess, "run") as run:
filesystem.list(".", [".pyre_configuration.local"])
run.assert_has_calls(
[
call(
["hg", "files", "--include", ".pyre_configuration.local"],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
cwd=".",
),
call().stdout.decode("utf-8"),
call().stdout.decode().split(),
]
)
with patch.object(subprocess, "run") as run:
filesystem.list("/root", ["**/*.py", "foo.cpp"], exclude=["bar/*.py"])
run.assert_has_calls(
[
call(
[
"hg",
"files",
"--include",
"**/*.py",
"--include",
"foo.cpp",
"--exclude",
"bar/*.py",
],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
cwd="/root",
),
call().stdout.decode("utf-8"),
call().stdout.decode().split(),
]
)
def fail_command(arguments, **kwargs):
return subprocess.CompletedProcess(
args=[], returncode=1, stdout="".encode("utf-8")
)
with patch.object(subprocess, "run") as run:
run.side_effect = fail_command
self.assertEqual([], filesystem.list(".", [".pyre_configuration.local"]))
run.assert_has_calls(
[
call(
["hg", "files", "--include", ".pyre_configuration.local"],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
cwd=".",
)
]
)
@patch.object(filesystem, "_compute_symbolic_link_mapping")
@patch("os.getcwd")
@patch.object(subprocess, "check_output")
def test_get_scratch_directory(self, check_output, getcwd, compute_symbolic_links):
# No scratch, no local configuration
check_output.side_effect = FileNotFoundError
getcwd.return_value = "default"
shared_analysis_directory = SharedAnalysisDirectory(["first", "second"], [])
directory = shared_analysis_directory.get_scratch_directory()
self.assertEqual(directory, "default/.pyre")
root = shared_analysis_directory.get_root()
self.assertEqual(root, "default/.pyre/shared_analysis_directory")
# Scratch, no local configuration
check_output.side_effect = None
check_output.return_value = "/scratch\n".encode("utf-8")
shared_analysis_directory = SharedAnalysisDirectory(["first", "second"], [])
directory = shared_analysis_directory.get_scratch_directory()
self.assertEqual(directory, "/scratch")
root = shared_analysis_directory.get_root()
self.assertEqual(root, "/scratch/shared_analysis_directory")
# No scratch, using local configuration
check_output.side_effect = FileNotFoundError
getcwd.return_value = "default"
shared_analysis_directory = SharedAnalysisDirectory(
["first", "second"],
[],
filter_paths={"path/to/local"},
local_configuration_root="path/to/local",
)
directory = shared_analysis_directory.get_scratch_directory()
self.assertEqual(directory, "default/.pyre")
root = shared_analysis_directory.get_root()
self.assertEqual(root, "default/.pyre/path/to/local")
# Scratch, using local configuration
check_output.side_effect = None
check_output.return_value = "/scratch\n".encode("utf-8")
shared_analysis_directory = SharedAnalysisDirectory(
["first", "second"],
[],
filter_paths={"path/to/local"},
local_configuration_root="path/to/local",
)
directory = shared_analysis_directory.get_scratch_directory()
self.assertEqual(directory, "/scratch")
root = shared_analysis_directory.get_root()
self.assertEqual(root, "/scratch/path/to/local")
@patch.object(tempfile, "mkdtemp", return_value="/tmp/pyre_tmp_xyz")
@patch.object(filesystem, "find_root", return_value="/buck_root")
@patch("os.makedirs")
@patch(filesystem_name + ".acquire_lock")
@patch.object(SharedAnalysisDirectory, "get_root", return_value="/analysis_root")
def test_prepare(self, get_root, acquire_lock, makedirs, find_root, mkdtemp):
@contextmanager
def acquire(*args, **kwargs):
yield
with patch.object(SharedAnalysisDirectory, "_clear") as clear, patch.object(
SharedAnalysisDirectory, "_merge"
) as merge:
shared_analysis_directory = SharedAnalysisDirectory(["first", "second"], [])
acquire_lock.side_effect = acquire
shared_analysis_directory.prepare()
merge.assert_has_calls([call()])
clear.assert_has_calls([call()])
@patch("{}.Path".format(command_name))
@patch("{}.Path.mkdir".format(command_name))
@patch("os.path.realpath", side_effect=lambda path: "realpath({})".format(path))
@patch("os.getcwd", return_value="/root")
@patch("os.path.exists", return_value=True)
@patch("{}.find_project_root".format(command_name), return_value="/root/local")
@patch("{}.find_local_root".format(command_name), return_value=None)
@patch("os.chdir")
def test_resolve_source_directories(
self,
chdir,
find_local_root,
find_project_root,
exists,
cwd,
realpath,
path_mkdir,
path,
) -> None:
arguments = MagicMock()
arguments.source_directories = []
arguments.command = commands.Check
arguments.use_buck_builder = False
arguments.ignore_unbuilt_dependencies = False
arguments.local_configuration = None
arguments.logger = None
configuration = MagicMock()
configuration.source_directories = []
configuration.local_configuration_root = "/root/local"
configuration.use_buck_builder = False
configuration.ignore_unbuilt_dependencies = False
with self.assertRaises(EnvironmentException):
buck_builder = buck.SimpleBuckBuilder()
analysis_directory = SharedAnalysisDirectory(
[],
[],
original_directory="/root",
filter_paths=set(),
buck_builder=buck_builder,
)
analysis_directory._resolve_source_directories()
# Arguments override configuration.
with patch.object(
buck, "generate_source_directories", return_value=[]
) as buck_source_directories:
arguments.source_directories = ["arguments_source_directory"]
configuration.source_directories = ["configuration_source_directory"]
buck_builder = buck.SimpleBuckBuilder()
analysis_directory = SharedAnalysisDirectory(
["some_source_directory"],
["configuration_source_directory"],
original_directory="/root",
filter_paths=set(),
buck_builder=buck_builder,
)
analysis_directory._resolve_source_directories()
buck_source_directories.assert_called_with(
{"configuration_source_directory"}
)
self.assertEqual(
analysis_directory._source_directories, {"some_source_directory"}
)
with patch.object(
buck, "generate_source_directories", return_value=["arguments_target"]
) as buck_source_directories:
cwd.return_value = "/"
original_directory = "/root"
arguments.source_directories = []
arguments.targets = ["arguments_target"]
configuration.source_directories = ["configuration_source_directory"]
command = commands.Check(arguments, original_directory, configuration)
analysis_directory = command._analysis_directory
assert isinstance(analysis_directory, SharedAnalysisDirectory)
analysis_directory._resolve_source_directories()
buck_source_directories.assert_called_with({"arguments_target"})
self.assertEqual(
analysis_directory._source_directories,
{"realpath(root/arguments_target)"},
)
with patch.object(
buck, "generate_source_directories", return_value=["arguments_target"]
) as buck_source_directories:
# same test as above, but Start instead of Check; build should be False
cwd.return_value = "/"
original_directory = "/root"
command = commands.Start(
arguments,
original_directory,
terminal=False,
store_type_check_resolution=False,
use_watchman=True,
incremental_style=commands.command.IncrementalStyle.FINE_GRAINED,
configuration=configuration,
)
analysis_directory = command._analysis_directory
assert isinstance(analysis_directory, SharedAnalysisDirectory)
analysis_directory._resolve_source_directories()
buck_source_directories.assert_called_with({"arguments_target"})
self.assertEqual(
analysis_directory._source_directories,
{"realpath(root/arguments_target)"},
)
# Restart and start always rebuild buck targets
with patch.object(
buck, "generate_source_directories", return_value=["arguments_target"]
) as buck_source_directories:
cwd.side_effect = ["/", "/", "/"]
original_directory = "/root"
command = commands.Start(
arguments,
original_directory,
terminal=False,
store_type_check_resolution=False,
use_watchman=True,
incremental_style=commands.command.IncrementalStyle.FINE_GRAINED,
configuration=configuration,
)
analysis_directory = command._analysis_directory
assert isinstance(analysis_directory, SharedAnalysisDirectory)
analysis_directory._resolve_source_directories()
buck_source_directories.assert_called_with({"arguments_target"})
command = commands.Restart(arguments, original_directory, configuration)
analysis_directory = command._analysis_directory
assert isinstance(analysis_directory, SharedAnalysisDirectory)
analysis_directory._resolve_source_directories()
buck_source_directories.assert_called_with({"arguments_target"})
# Configuration is picked up when no arguments provided.
with patch.object(
buck,
"generate_source_directories",
return_value=["configuration_source_directory"],
) as buck_source_directories:
cwd.return_value = "/"
original_directory = "/root"
arguments.source_directories = []
arguments.targets = []
arguments.command = commands.Check
configuration.targets = ["configuration_target"]
configuration.source_directories = []
command = commands.Check(arguments, original_directory, configuration)
analysis_directory = command._analysis_directory
assert isinstance(analysis_directory, SharedAnalysisDirectory)
analysis_directory._resolve_source_directories()
buck_source_directories.assert_called_with({"configuration_target"})
self.assertEqual(
analysis_directory._source_directories,
{"realpath(root/configuration_source_directory)"},
)
# Files are translated relative to project root
with patch.object(
buck, "generate_source_directories", return_value=["."]
) as buck_source_directories:
cwd.side_effect = ["/", "/"]
original_directory = "/root"
arguments.source_directories = []
arguments.targets = []
configuration.targets = ["."]
command = commands.Check(arguments, original_directory, configuration)
analysis_directory = command._analysis_directory
assert isinstance(analysis_directory, SharedAnalysisDirectory)
analysis_directory._resolve_source_directories()
self.assertEqual(
analysis_directory._source_directories, {"realpath(root/.)"}
)
@patch("os.path.isfile")
def test_find_configuration(self, os_mock_isfile) -> None:
os_mock_isfile.side_effect = [False, False, False, True]
self.assertEqual(find_root("/a/b/c/d", "configuration"), "/a")
os_mock_isfile.side_effect = [True]
self.assertEqual(find_root("/a", "configuration"), "/a")
os_mock_isfile.side_effect = [False, False]
self.assertEqual(find_root("/a/b", "configuration"), None)
@patch("os.unlink")
def test_delete_symbolic_link(self, unlink):
# delete succeeds
unlink.return_value = None
_delete_symbolic_link("exists")
unlink.assert_called_once_with("exists")
# delete fails
unlink.reset_mock()
unlink.side_effect = OSError
self.assertRaises(OSError, _delete_symbolic_link, "exception_occurs")
unlink.assert_called_once_with("exception_occurs")
@patch("os.unlink")
@patch("os.symlink")
@patch("os.makedirs")
def test_add_symbolic_link(self, makedirs, symlink, unlink):
add_symbolic_link("/a/link", "file.py")
# standard use-cases
makedirs.assert_called_once_with("/a")
symlink.assert_called_once_with("file.py", "/a/link")
symlink.reset_mock()
makedirs.reset_mock()
add_symbolic_link("/a/b/c/d/link", "file.py")
makedirs.assert_called_once_with("/a/b/c/d")
symlink.assert_called_once_with("file.py", "/a/b/c/d/link")
# symlink exists
symlink.reset_mock()
makedirs.reset_mock()
error = OSError()
error.errno = errno.EEXIST
symlink.side_effect = [error, None]
add_symbolic_link("/a/b/link", "file.py")
makedirs.assert_called_once_with("/a/b")
symlink.assert_called_with("file.py", "/a/b/link")
unlink.assert_called_once_with("/a/b/link")
# symlink fails
symlink.reset_mock()
makedirs.reset_mock()
unlink.reset_mock()
symlink.side_effect = OSError()
add_symbolic_link("/a/link", "file.py")
makedirs.assert_called_once_with("/a")
symlink.assert_called_once_with("file.py", "/a/link")
unlink.assert_not_called()
@patch.object(filesystem, "find_paths_with_extensions")
@patch.object(
os.path,
"realpath",
side_effect=lambda path: path.replace("ANALYSIS_ROOT", "LOCAL_ROOT"),
)
def test_compute_symbolic_link_mapping(self, realpath, find_paths_with_extensions):
find_paths_with_extensions.return_value = [
"ANALYSIS_ROOT/a.py",
"ANALYSIS_ROOT/b.thrift",
"ANALYSIS_ROOT/subX/d.pyi",
"ANALYSIS_ROOT/subX/e.py",
"ANALYSIS_ROOT/subY/subZ/g.pyi",
]
self.assertDictEqual(
filesystem._compute_symbolic_link_mapping(
"ANALYSIS_ROOT", ["py", "pyi", "thrift"]
),
{
"LOCAL_ROOT/a.py": "ANALYSIS_ROOT/a.py",
"LOCAL_ROOT/b.thrift": "ANALYSIS_ROOT/b.thrift",
"LOCAL_ROOT/subX/d.pyi": "ANALYSIS_ROOT/subX/d.pyi",
"LOCAL_ROOT/subX/e.py": "ANALYSIS_ROOT/subX/e.py",
"LOCAL_ROOT/subY/subZ/g.pyi": "ANALYSIS_ROOT/subY/subZ/g.pyi",
},
)
| [
"[email protected]"
]
| |
006d5216c55a276b30c61478f4da189fc81ca037 | 7bf287e00b35f50afa70e8585f41d1db543d98f2 | /Medium/FindLeavesOfBinaryTree.py | c914ab48ab422fd2ee73e77c175d3f5a5d0fe9c8 | []
| no_license | mangalagb/Leetcode | eac611453de07ffc635265e98c39b46255cf76c6 | fcf6c3d5d60d13706950247d8a2327adc5faf17e | refs/heads/master | 2022-05-14T23:16:28.007044 | 2022-04-29T19:33:24 | 2022-04-29T19:33:24 | 158,616,622 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,011 | py | # #Given the root of a binary tree, collect a tree's nodes as if you were doing this:
#
# Collect all the leaf nodes.
# Remove all the leaf nodes.
# Repeat until the tree is empty.
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution(object):
def findLeaves(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
ans = []
while root is not None:
leaves = []
self.remove_leaves(root, None, None, leaves)
#it is a root node
if len(leaves) == 0:
leaves.append(root.val)
root = None
ans.append(leaves)
return ans
def remove_leaves(self, node, parent, isLeftChild, leaves):
if not node:
return
#If node is a leaf
if not node.left and not node.right:
if isLeftChild is None:
return
if isLeftChild:
parent.left = None
else:
parent.right = None
leaves.append(node.val)
if node.left:
self.remove_leaves(node.left, node, True, leaves)
if node.right:
self.remove_leaves(node.right, node, False, leaves)
def make_tree(self):
root = TreeNode(1)
node2 = TreeNode(2)
node3 = TreeNode(3)
node4 = TreeNode(4)
node5 = TreeNode(5)
root.left = node2
root.right = node3
node2.left = node4
node2.right = node5
return root
def make_tree1(self):
root = TreeNode(1)
return root
my_sol = Solution()
root = my_sol.make_tree()
print(my_sol.findLeaves(root)) #[[4,5,3],[2],[1]]
root = my_sol.make_tree1()
print(my_sol.findLeaves(root)) #[[1]]
| [
"[email protected]"
]
| |
d2894ba6e632b91ec3412e5b44336eb0e03154d2 | fec261e7717769078dd0044b3ac19e509ff65afa | /python/sort/selection_sort.py | bb4e47579b4fa5b22a4eeda18c29a39cc587698f | []
| no_license | ne7ermore/playground | af94854c6da01b43b1e10ea891129a749ea9d807 | 072406e562e0d33c650ba01bf9ebfbe593f55d5c | refs/heads/master | 2021-06-02T13:19:34.110406 | 2020-05-28T10:49:12 | 2020-05-28T10:49:12 | 108,945,081 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 532 | py | def selection_sort(arr):
for i in range(len(arr)):
cor_index = i
for j in range(i, len(arr)):
if arr[j] < arr[cor_index]:
cor_index = j
arr[i], arr[cor_index] = arr[cor_index], arr[i]
return arr
if __name__ == "__main__":
arr = [10, 20, 5, 9, 3, 8, 12, 14, 90, 0, 60, 40, 23, 35, 95, 18]
assert len(selection_sort(arr)) == len(arr)
assert selection_sort(arr) == [0, 3, 5, 8, 9, 10, 12,
14, 18, 20, 23, 35, 40, 60, 90, 95]
| [
"[email protected]"
]
| |
bc871b75ba4a48cd1baa270703581d5d3cbdfaaf | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02646/s014246720.py | f54f26a26b36440b0006e25566aa3480de114870 | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | import sys
A,V = map(int, input().split())
B,W = map(int, input().split())
T = int(input())
#これO(1)で解けそう。
# スピードが同じ or 逃げる方のスピードが速いと無理
if V <= W:
print("NO")
sys.exit()
# 鬼の方がスピードが速い場合で場合訳
distance_AB = abs(A-B)
speed_AB = abs(V-W)
if speed_AB * T >= distance_AB:
print("YES")
else:
print("NO") | [
"[email protected]"
]
| |
b35629b13b4288e9f2d3b4ff7f9c2a7af9103d2b | 2d055595705582784624c6bde5abf1b3854b34a9 | /tweets/mixins.py | e35d46e2850f0171dca2e7dc62983077149c37d8 | []
| no_license | Anubhav722/twitter_clone | fc36568cb6b32ce1942923ffcf55ebcce714e53f | f76190b8f5f3ac8dfad87d35b2650c5285e5082b | refs/heads/master | 2021-05-01T08:25:42.857828 | 2017-02-07T14:32:09 | 2017-02-07T14:32:09 | 79,710,309 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 823 | py | from django import forms
from django.forms.utils import ErrorList
class FormUserNeededMixin(object):
def form_valid(self, form):
if self.request.user.is_authenticated():
form.instance.user = self.request.user
return super(FormUserNeededMixin, self).form_valid(form)
else:
form._errors[forms.forms.NON_FIELD_ERRORS] = ErrorList(['User must be logged in to continue.'])
return self.form_invalid(form)
class UserOwnerMixin(object):
def form_valid(self, form):
if form.instance.user == self.request.user:
return super(UserOwnerMixin, self).form_valid(form)
else:
form._errors[forms.forms.NON_FIELD_ERRORS] = ErrorList(['This user is not allowed to change the data'])
return self.form_invalid(form) | [
"[email protected]"
]
| |
db3779c711392ab68d99c733bcb2d858c18aee3a | f24c35bb0919f9ad75f45e7906691c3189536b33 | /xcb_ws/file/quarotor-master/cv_vision/devel/lib/python2.7/dist-packages/april_pro/msg/__init__.py | 242397c93cfa4f9a9cc60d5c7d837d15d027577f | []
| no_license | mfkiwl/supreme-xcb | 9b941f49bab5a811d23a0cd75790d1e5722aa9f0 | d1287657607bf86d4b1393acf285951760670925 | refs/heads/main | 2023-03-07T12:10:28.288282 | 2021-03-02T11:46:00 | 2021-03-02T11:46:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27 | py | from ._camera_pos import *
| [
"[email protected]"
]
| |
f29241cd5d7f9127aa55a2411375645fc606e2a5 | 46f358b954d2d0067a2093ee9006e222f831a8f8 | /great_expectations/expectations/core/expect_column_max_to_be_between.py | 386953ecb814ea47f4440e4be4ed4e92580474d9 | [
"Apache-2.0"
]
| permissive | dhruvvyas90/great_expectations | b963aa99c683a0da3a9e2b5a1046d2a32f622c7b | fddf5336065c644558c528301e601b9f02be87e2 | refs/heads/main | 2023-01-28T15:26:55.331282 | 2020-12-03T18:52:14 | 2020-12-03T18:52:14 | 319,719,900 | 1 | 0 | Apache-2.0 | 2020-12-08T18:02:33 | 2020-12-08T18:02:32 | null | UTF-8 | Python | false | false | 8,969 | py | from typing import Dict, List, Optional, Union
import numpy as np
import pandas as pd
from great_expectations.core.batch import Batch
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.execution_engine import (
ExecutionEngine,
PandasExecutionEngine,
SparkDFExecutionEngine,
)
from ...execution_engine.sqlalchemy_execution_engine import SqlAlchemyExecutionEngine
from ...render.types import RenderedStringTemplateContent
from ...render.util import (
handle_strict_min_max,
parse_row_condition_string_pandas_engine,
substitute_none_for_missing,
)
try:
import sqlalchemy as sa
except ImportError:
pass
from ...render.renderer.renderer import renderer
from ..expectation import ColumnExpectation, InvalidExpectationConfigurationError
from ..registry import extract_metrics
class ExpectColumnMaxToBeBetween(ColumnExpectation):
"""Expect the column max to be between an min and max value
expect_column_max_to_be_between is a \
:func:`column_aggregate_expectation
<great_expectations.execution_engine.MetaExecutionEngine.column_aggregate_expectation>`.
Args:
column (str): \
The column name
min_value (comparable type or None): \
The minimum number of unique values allowed.
max_value (comparable type or None): \
The maximum number of unique values allowed.
Keyword Args:
parse_strings_as_datetimes (Boolean or None): \
If True, parse min_value, max_values, and all non-null column values to datetimes before making \
comparisons.
output_strftime_format (str or None): \
A valid strfime format for datetime output. Only used if parse_strings_as_datetimes=True.
strict_min (boolean):
If True, the minimal column minimum must be strictly larger than min_value, default=False
strict_max (boolean):
If True, the maximal column minimum must be strictly smaller than max_value, default=False
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
These fields in the result object are customized for this expectation:
::
{
"observed_value": (list) The actual column max
}
* min_value and max_value are both inclusive unless strict_min or strict_max are set to True.
* If min_value is None, then max_value is treated as an upper bound
* If max_value is None, then min_value is treated as a lower bound
"""
# Setting necessary computation metric dependencies and defining kwargs, as well as assigning kwargs default values\
metric_dependencies = ("column.max",)
success_keys = ("min_value", "strict_min", "max_value", "strict_max")
# Default values
default_kwarg_values = {
"min_value": None,
"max_value": None,
"strict_min": None,
"strict_max": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
}
""" A Column Map MetricProvider Decorator for the Maximum"""
def validate_configuration(self, configuration: Optional[ExpectationConfiguration]):
"""
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
neccessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
True if the configuration has been validated successfully. Otherwise, raises an exception
"""
super().validate_configuration(configuration)
self.validate_metric_value_between_configuration(configuration=configuration)
@classmethod
@renderer(renderer_type="renderer.prescriptive")
def _prescriptive_renderer(
cls,
configuration=None,
result=None,
language=None,
runtime_configuration=None,
**kwargs,
):
runtime_configuration = runtime_configuration or {}
include_column_name = runtime_configuration.get("include_column_name", True)
include_column_name = (
include_column_name if include_column_name is not None else True
)
styling = runtime_configuration.get("styling")
params = substitute_none_for_missing(
configuration.kwargs,
[
"column",
"min_value",
"max_value",
"parse_strings_as_datetimes",
"row_condition",
"condition_parser",
"strict_min",
"strict_max",
],
)
if (params["min_value"] is None) and (params["max_value"] is None):
template_str = "maximum value may have any numerical value."
else:
at_least_str, at_most_str = handle_strict_min_max(params)
if params["min_value"] is not None and params["max_value"] is not None:
template_str = f"maximum value must be {at_least_str} $min_value and {at_most_str} $max_value."
elif params["min_value"] is None:
template_str = f"maximum value must be {at_most_str} $max_value."
elif params["max_value"] is None:
template_str = f"maximum value must be {at_least_str} $min_value."
if params.get("parse_strings_as_datetimes"):
template_str += " Values should be parsed as datetimes."
if include_column_name:
template_str = "$column " + template_str
if params["row_condition"] is not None:
(
conditional_template_str,
conditional_params,
) = parse_row_condition_string_pandas_engine(params["row_condition"])
template_str = conditional_template_str + ", then " + template_str
params.update(conditional_params)
return [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": template_str,
"params": params,
"styling": styling,
},
}
)
]
@classmethod
@renderer(renderer_type="renderer.descriptive.stats_table.max_row")
def _descriptive_stats_table_max_row_renderer(
cls,
configuration=None,
result=None,
language=None,
runtime_configuration=None,
**kwargs,
):
assert result, "Must pass in result."
return [
{
"content_block_type": "string_template",
"string_template": {
"template": "Maximum",
"tooltip": {"content": "expect_column_max_to_be_between"},
},
},
"{:.2f}".format(result.result["observed_value"]),
]
def _validate(
self,
configuration: ExpectationConfiguration,
metrics: Dict,
runtime_configuration: dict = None,
execution_engine: ExecutionEngine = None,
):
return self._validate_metric_value_between(
metric_name="column.max",
configuration=configuration,
metrics=metrics,
runtime_configuration=runtime_configuration,
execution_engine=execution_engine,
)
| [
"[email protected]"
]
| |
a6d6ea3b3be28c17d178c6810e34cf2b263d01b2 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /PTh7tBusAZRgjAWEZ_11.py | 60ec4e9a9fb3660fa0aac2d124fbbae29d276436 | []
| no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 69 | py |
def calc_diff(obj, limit):
return abs(sum(obj.values()) - limit)
| [
"[email protected]"
]
| |
a33b45b686b42f02891fe745b169f339692f91d2 | acdd393c25b32779a637a05b5a5574aaecdda9d6 | /pelican-plugins/more_categories/test_more_categories.py | 41dc4a2241313d4debe74eb3c9a78d38b9c38ad9 | [
"AGPL-3.0-only",
"MIT"
]
| permissive | JN-Blog/jn-blog.com | 51f1b8f9011138b3ebf62b93c2ecaba9e2d514bf | 669bf9a9c6813f2b7980792fb137f6718077aea1 | refs/heads/master | 2020-04-02T10:07:31.569949 | 2018-12-30T14:30:49 | 2018-12-30T14:30:49 | 154,325,262 | 0 | 0 | MIT | 2018-12-30T14:30:50 | 2018-10-23T12:36:12 | Python | UTF-8 | Python | false | false | 1,587 | py | """Unit tests for the more_categories plugin"""
import os
import unittest
from . import more_categories
from pelican.generators import ArticlesGenerator
from pelican.tests.support import get_context, get_settings
class TestArticlesGenerator(unittest.TestCase):
@classmethod
def setUpClass(cls):
more_categories.register()
settings = get_settings()
settings['DEFAULT_CATEGORY'] = 'default'
settings['CACHE_CONTENT'] = False
settings['PLUGINS'] = more_categories
context = get_context(settings)
base_path = os.path.dirname(os.path.abspath(__file__))
test_data_path = os.path.join(base_path, 'test_data')
cls.generator = ArticlesGenerator(
context=context, settings=settings,
path=test_data_path, theme=settings['THEME'], output_path=None)
cls.generator.generate_context()
def test_generate_categories(self):
"""Test whether multiple categories are generated correctly,
including ancestor categories"""
cats_generated = [cat.name for cat, _ in self.generator.categories]
cats_expected = ['default', 'foo', 'foo/bar', 'foo/baz',]
self.assertEqual(sorted(cats_generated), sorted(cats_expected))
def test_assign_articles_to_categories(self):
"""Test whether articles are correctly assigned to categories,
including whether articles are not assigned multiple times to the same
ancestor category"""
for cat, articles in self.generator.categories:
self.assertEqual(len(articles), 1) | [
"[email protected]"
]
| |
03dd33c5872c44c363516af41041b942fc4b82c7 | a6ed990fa4326c625a2a02f0c02eedf758ad8c7b | /meraki/sdk/python/removeNetworkSwitchSwitchStack.py | ea22d146e97d6bbedda21ccbaa78bfaab2c71d73 | []
| no_license | StevenKitavi/Meraki-Dashboard-API-v1-Documentation | cf2352976c6b6c00c17a5f6442cedf0aeed46c22 | 5ed02a7def29a2ce455a3f2cfa185f76f44789f5 | refs/heads/main | 2023-03-02T08:49:34.846055 | 2021-02-05T10:31:25 | 2021-02-05T10:31:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 509 | py | import meraki
# Defining your API key as a variable in source code is not recommended
API_KEY = '6bec40cf957de430a6f1f2baa056b99a4fac9ea0'
# Instead, use an environment variable as shown under the Usage section
# @ https://github.com/meraki/dashboard-api-python/
dashboard = meraki.DashboardAPI(API_KEY)
network_id = 'L_646829496481105433'
switch_stack_id = ''
serial = 'QBZY-XWVU-TSRQ'
response = dashboard.switch.removeNetworkSwitchSwitchStack(
network_id, switch_stack_id, serial
)
print(response) | [
"[email protected]"
]
| |
18eb37c2ffe3434f8bcd511d3c748630d8feec5c | d6458a979207e00da6dc653c278b9bfb818ce18d | /Additional Stuff/Medium Stuff/PythonCrypto/crypto9.py | c0eb73e93ad223663400383fdecbc56cad757bcf | []
| no_license | Hackman9912/05-Python-Programming | 61ce7bb48188b4cd3cd8e585480325fdd02e579b | d03a319c952794b2f298a3ef4ddd09c253e24d36 | refs/heads/master | 2020-08-29T14:28:48.403323 | 2019-12-18T21:30:55 | 2019-12-18T21:30:55 | 218,061,276 | 0 | 0 | null | 2019-10-28T14:07:31 | 2019-10-28T14:07:31 | null | UTF-8 | Python | false | false | 454 | py | alphabets = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
string_input = input("Enter a string: ")
input_length = len(string_input)
string_output = ""
for i in range(input_length):
character = string_input[i]
location_of_character = alphabets.find(character)
new_location = location_of_character + 3;
string_output = string_output + alphabets[new_location]
print("Encrypted text: ", string_output)
# print(string_input)
# print(input_length)
| [
"[email protected]"
]
| |
b5631a6b34a3cc98324f7486850ba9eb57bafb8b | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_24037.py | 9b3850dd7618c3e9e86acac4668b6393892f6468 | []
| no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 119 | py | # Multi-pattern regex in python
<a\s+(?=[^>]*?href="((?:(?!css).)*?)")[^>]*?>(?=[^<]*?(?:support|help))
| [
"[email protected]"
]
| |
53c98c8632974b7b1a5f574fe5fb035dda1104df | c4702d1a06640555829b367852138cc93ba4a161 | /dym_res_partner/models/dym_res_partner.py | 31e22a8a0cc57df5c41d49344807163c1b9cd62d | []
| no_license | Rizalimami/dym | 0ecadf9c049b22ebfebf92e4eab6eaad17dd3e26 | af1bcf7b77a3212bc8a8a0e41e6042a134587ed4 | refs/heads/master | 2020-04-08T10:56:43.605698 | 2018-11-27T06:44:08 | 2018-11-27T06:44:08 | 159,287,876 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 31,555 | py | import time
from datetime import datetime
import string
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp import api
from openerp.osv.expression import get_unaccent_wrapper
import re
import phonenumbers
from phonenumbers import carrier
from phonenumbers.phonenumberutil import number_type
class res_partner(osv.osv):
_inherit = 'res.partner'
def _get_payment_term(self, cr, uid, context=None):
obj_payment_term = self.pool.get('account.payment.term')
id_payment_term = obj_payment_term.search(cr, uid, [('name','=','Immediate Payment')])
if id_payment_term :
return id_payment_term[0]
return False
def _get_default_branch(self,cr,uid,ids,context=None):
user_obj = self.pool.get('res.users')
user_browse = user_obj.browse(cr,uid,uid)
branch_ids = False
branch_ids = user_browse.branch_ids and len(user_browse.branch_ids) == 1 and user_browse.branch_ids[0].id or False
return branch_ids
_columns = {
'parent_name': fields.related('parent_id', 'name', type='char', readonly=True, string='Parent name'),
'default_code': fields.char('Partner Code'),
'principle': fields.boolean('Principle'),
'biro_jasa': fields.boolean('Biro Jasa'),
'kas_negara': fields.boolean('Kas Negara'),
'forwarder': fields.boolean('Forwarder'),
'supplier': fields.boolean('General Supplier', help="Check this box if this contact is a supplier. If it's not checked, purchase people will not see it when encoding a purchase order."),
'showroom': fields.boolean('Showroom'),
'ahass': fields.boolean('Ahass'),
'dealer': fields.boolean('Dealer'),
'finance_company': fields.boolean('Finance Company'),
'vat': fields.related('npwp', string="TIN", type="char", help="Tax Identification Number. Check the box if this contact is subjected to taxes. Used by the some of the legal statements.", store=True),
'ahm_code': fields.char('AHM Code'),
'dealer_code': fields.char('Dealer Code'),
'kode_pajak_id':fields.selection([('1','010'),('2','020'),('3','030'),('4','040'),('5','050'),('6','060'),('7','070'),('8','080'),('9','090')],'Kode Transaksi FP'),
'tipe_faktur_pajak' : fields.selection([('tanpa_fp','Tanpa Faktur Pajak'),('satuan','Satuan'),('gabungan','Gabungan')],'Tipe Faktur Pajak'),
'pkp' : fields.boolean('PKP'),
'npwp': fields.char('No.NPWP'),
'tgl_kukuh': fields.date('Tgl Kukuh'),
'mobile_provider': fields.char('Mobile Provider'),
#Alamat di Header
'rt':fields.char('RT', size=3),
'rw':fields.char('RW',size=3),
'zip_id':fields.many2one('dym.kelurahan', 'ZIP Code',domain="[('kecamatan_id','=',kecamatan_id),('state_id','=',state_id),('city_id','=',city_id)]"),
'kelurahan':fields.char('Kelurahan',size=100),
'kecamatan_id':fields.many2one('dym.kecamatan','Kecamatan', size=128,domain="[('state_id','=',state_id),('city_id','=',city_id)]"),
'kecamatan':fields.char('Kecamatan', size=100),
'city_id':fields.many2one('dym.city','City',domain="[('state_id','=',state_id)]"),
#Alamat di Tab Customer Info
'sama':fields.boolean(''), #diberi required True
'street_tab': fields.char('Address'),
'street2_tab': fields.char(),
'rt_tab':fields.char('RT', size=3),
'rw_tab':fields.char('RW',size=3),
'zip_tab_id':fields.many2one('dym.kelurahan', 'ZIP Code',domain="[('kecamatan_id','=',kecamatan_tab_id),('state_id','=',state_tab_id),('city_id','=',city_tab_id)]"),
'kelurahan_tab':fields.char('Kelurahan',size=100),
'kecamatan_tab_id':fields.many2one('dym.kecamatan','Kecamatan', size=128,domain="[('state_id','=',state_tab_id),('city_id','=',city_tab_id)]"),
'kecamatan_tab':fields.char('Kecamatan', size=100),
'city_tab_id':fields.many2one('dym.city','City',domain="[('state_id','=',state_tab_id)]"),
'state_tab_id':fields.many2one('res.country.state', 'Province'),
#Field yang ada di Tab Customer Info
'birthday':fields.date('Date of Birth'),
'hp_status':fields.selection([('aktif','Aktif'),('TidakAktif','Tidak Aktif')],'HP Status'),
'gender':fields.selection([('lakilaki', 'Laki-laki'),('perempuan', 'Perempuan')],'Jenis Kelamin'),
'no_kk':fields.char('No. KK',50),
'religion':fields.selection([('Islam', 'Islam'),('Kristen', 'Kristen'),('Katholik', 'Katholik'),('Hindu', 'Hindu'),('Budha', 'Budha')],'Religion'),
'no_ktp':fields.char('No.KTP',50),
'property_account_payable': fields.property(
type='many2one',
relation='account.account',
string="Account Payable",
domain="[('type', '=', 'payable')]",
help="This account will be used instead of the default one as the payable account for the current partner",
required=False),
'property_account_receivable': fields.property(
type='many2one',
relation='account.account',
string="Account Receivable",
domain="[('type', '=', 'receivable')]",
help="This account will be used instead of the default one as the receivable account for the current partner",
required=False),
'property_account_rounding': fields.property(
type='many2one',
relation='account.account',
string="Account Rounding",
required=False),
'pendidikan':fields.selection([('noSD', 'Tidak Tamat SD'),('sd', 'SD'),('sltp', 'SLTP/SMP'),('slta', 'SLTA/SMA'),('akademik', 'Akademi/Diploma'),('sarjana', 'Sarjana(S1)'),('pascasarjana', 'Pasca Sarjana')],'Pendidikan'),
'pekerjaan':fields.selection([('pNegeri', 'Pegawai Negeri'),('pSwasta', 'Pegawai Swasta'),('ojek', 'Ojek'),('pedagang', 'Pedagang/Wiraswasta'),('pelajar', 'Pelajar/Mahasiswa'),('guru', 'Guru/Dosen'),('tni', 'TNI/Polri'),('irt', 'Ibu Rumah Tangga'),('petani/nelayan', 'Petani/Nelayan'),('pro', 'Profesional(Contoh : Dokter)'),('lain', 'Lainnya')],'Pekerjaan'),
'pengeluaran':fields.selection([('<900', '< Rp.900.000,-'),('900125', 'Rp.900.001,- s/d Rp.1.250.000,-'),('125175', 'Rp.1.250.001,- s/d Rp.1.750.000,-'),('175250', 'Rp.1.750.001,- s/d Rp.2.500.000,-'),('250400', 'Rp.2.500.001,- s/d Rp.4.000.000,-'),('400600', 'Rp.4.000.001,- s/d Rp.6.000.000,-'),('600000', '> Rp.6.000.000,-')],'Pengeluaran /Bulan'),
'rel_code': fields.related('default_code', string='Partner Code', type="char", readonly="True"),
'branch_id':fields.many2one('dym.branch',string='Branch'),
'direct_customer': fields.boolean(string='Direct Customer'),
'branch': fields.boolean(string='Branch (Boolean)'),
'is_customer_depo':fields.boolean('Customer Depo'),
'is_group_customer':fields.boolean('Group Customer'),
'member':fields.char('Member Number'),
'creditur_debitur':fields.boolean('Creditur / Debitur'),
#Forwarder
'driver_lines': fields.one2many('dym.driver.line','partner_id','Driver'),
'plat_number_lines': fields.one2many('dym.plat.number.line','partner_id','Plat Number'),
}
_defaults = {
'tz': api.model(lambda self: self.env.context.get('tz', 'Asia/Jakarta')),
'sama': True,
'default_code': 'BPA/',
'branch_id':_get_default_branch,
}
_sql_constraints = [
('unique_member', 'unique(member)', 'Nomor Member sudah terdaftar!'),
]
# def _unique_no_ktp(self, cr, uid, ids, context=None):
# for l in self.browse(cr, uid, ids, context=context):
# if l.no_ktp:
# if self.search(cr,uid,[('no_ktp','=',l.no_ktp),('id','!=',l.id)]):
# return False
# return True
# _constraints = [
# (_unique_no_ktp, 'No KTP Duplicate!', ['no_ktp']),
# ]
def default_get(self, cr, uid, fields, context=None):
context = context or {}
res = super(res_partner, self).default_get(cr, uid, fields, context=context)
if 'property_payment_term' in fields:
res.update({'property_payment_term': self._get_payment_term(cr, uid)})
return res
def _display_address(self, cr, uid, address, without_company=False, context=None):
'''
The purpose of this function is to build and return an address formatted accordingly to the
standards of the country where it belongs.
:param address: browse record of the res.partner to format
:returns: the address formatted in a display that fit its country habits (or the default ones
if not country is specified)
:rtype: string
'''
'''
<xpath expr="//field[@name='city']" position="before">
<group>
<div>
<field name="street" placeholder="Street..." on_change="onchange_address(street,street2,rt,rw,state_id,city_id,kecamatan_id,kecamatan,zip_id,kelurahan)" />
<div>
<field name="street2" placeholder="Street" style="width: 50%%" on_change="onchange_address(street,street2,rt,rw,state_id,city_id,kecamatan_id,kecamatan,zip_id,kelurahan)" />
<field name="rt" placeholder="RT" style="width: 25%%" on_change="onchange_address(street,street2,rt,rw,state_id,city_id,kecamatan_id,kecamatan,zip_id,kelurahan)" />
<field name="rw" placeholder="RW" style="width: 25%%" on_change="onchange_address(street,street2,rt,rw,state_id,city_id,kecamatan_id,kecamatan,zip_id,kelurahan)" />
<field name="state_id" on_change="onchange_address(street,street2,rt,rw,state_id,city_id,kecamatan_id,kecamatan,zip_id,kelurahan)" class="oe_no_button" placeholder="Province" style="width: 50%%" options='{"no_open": True}' />
<field name="city_id" on_change="onchange_address(street,street2,rt,rw,state_id,city_id,kecamatan_id,kecamatan,zip_id,kelurahan)" placeholder="City" style="width: 50%%" attrs="{'required': ['|','|',('direct_customer','=',True),('is_group_customer','=',True),('customer','=',True)]}" />
<field name="kecamatan_id" on_change="onchange_address(street,street2,rt,rw,state_id,city_id,kecamatan_id,kecamatan,zip_id,kelurahan)" placeholder="Kecamatan" style="width: 50%%" />
<field name="kecamatan" on_change="onchange_address(street,street2,rt,rw,state_id,city_id,kecamatan_id,kecamatan,zip_id,kelurahan)" placeholder="Kecamatan" style="width: 50%%" />
<field name="zip_id" on_change="onchange_address(street,street2,rt,rw,state_id,city_id,kecamatan_id,kecamatan,zip_id,kelurahan)" placeholder="ZIP" style="width: 50%%" options='{"no_open": True}' />
<field name="kelurahan" on_change="onchange_address(street,street2,rt,rw,state_id,city_id,kecamatan_id,kecamatan,zip_id,kelurahan)" class="oe_no_button" placeholder="Kelurahan" style="width: 50%%" />
</div>
</div>
</group>
</xpath>
'''
# get the information that will be injected into the display format
# get the address format
# address_format = address.country_id.address_format or \
# "%(street)s\n%(street2)s\nRT: %(rt)s RW: %(rw)s Desa/Kel:%(kelurahan)s Kec:%(kecamatan)s\nKab/Kota:%(city)s Prov:%(state_code)s %(zip)s\n%(country_name)s"
address_format = "%(street)s\n%(street2)s\nRT: %(rt)s RW: %(rw)s Desa/Kel:%(kelurahan)s Kec:%(kecamatan)s\nKab/Kota:%(city_name)s Prov: %(state_name)s Kode Pos: %(kode_pos)s\n%(country_name)s"
args = {
'state_code': address.state_id.code or '',
'state_name': address.state_id.name or '',
'country_code': address.country_id.code or '',
'country_name': address.country_id.name or '',
'company_name': address.parent_name or '',
'rt': address.rt or '-',
'rw': address.rw or '-',
'kelurahan': address.kelurahan or '-',
'kecamatan': address.kecamatan or '-',
'city_name': address.city_id and address.city_id.name or '-',
'kode_pos': address.zip_id and address.zip_id.zip or '-',
}
for field in self._address_fields(cr, uid, context=context):
args[field] = getattr(address, field) or ''
if without_company:
args['company_name'] = ''
elif address.parent_id:
address_format = '%(company_name)s\n' + address_format
return address_format % args
def npwp_onchange(self,cr,uid,ids,npwp,context=None):
warning = {}
value = {}
result = {}
if npwp:
formatted_npwp = ''
npwp_normalize = npwp.replace(' ', '').upper()
splitted_npwp = re.findall(r'\d+', npwp_normalize)
if len(splitted_npwp) == 6:
if len(splitted_npwp[0]) == 2 and len(splitted_npwp[1]) == 3 and len(splitted_npwp[2]) == 3 and len(splitted_npwp[3]) == 1 and len(splitted_npwp[4]) == 3 and len(splitted_npwp[5]) == 3:
formatted_npwp = splitted_npwp[0] + '.' + splitted_npwp[1] + '.' + splitted_npwp[2] + '.' + splitted_npwp[3] + '-' + splitted_npwp[4] + '.' + splitted_npwp[5]
return {'value':{'npwp':formatted_npwp}}
elif len(splitted_npwp) == 1 and len(splitted_npwp[0]) == 15:
formatted_npwp = splitted_npwp[0][:2] + '.' + splitted_npwp[0][2:-10] + '.' + splitted_npwp[0][5:-7] + '.' + splitted_npwp[0][8:-6] + '-' + splitted_npwp[0][9:-3] + '.' + splitted_npwp[0][-3:]
return {'value':{'npwp':formatted_npwp}}
warning = {
'title': ('Perhatian !'),
'message': (('Format nomor npwp salah, mohon isi nomor npwp dengan format yang benar! (ex. 99.999.999.9-999.999)')),
}
value['npwp'] = self.browse(cr, uid, ids).npwp
result['warning'] = warning
result['value'] = value
return result
def onchange_mobile(self, cr, uid, ids, mobile, context=None):
value = {}
warning = {}
if mobile:
id_number = phonenumbers.parse(mobile,"ID")
if not carrier._is_mobile(number_type(id_number)):
warning = {
'title': ('Perhatian !'),
'message': (('Masukkan nomor handphone dengan benar, misal: 0817989800')),
}
value['mobile'] = ''
else:
formatted_mobile = phonenumbers.format_number(id_number, phonenumbers.PhoneNumberFormat.E164)
provider_mobile = eval(repr(carrier.name_for_number(id_number, "en")))
value['mobile'] = formatted_mobile
value['mobile_provider'] = provider_mobile
return {
'warning': warning,
'value': value,
}
def onchange_customer(self, cr, uid, ids, customer):
if not customer:
return {
'value':{
'no_ktp':False,
'birthday':False,
'gender':False,
'religion':False,
'no_kk':False,
'pendidikan':False,
'pekerjaan':False,
'pengeluaran':False,
'sama':'',
}
}
return True
def onchange_dealer(self, cr, uid, ids, dealer, finance_company, principle, ahm_code, dealer_code):
def_ahm_code = False
def_dealer_code = False
if dealer:
def_ahm_code = True
def_dealer_code = True
if finance_company:
def_ahm_code = True
if principle:
def_ahm_code = True
return {
'value':{
'ahm_code':ahm_code if def_ahm_code else False,
'dealer_code': dealer_code if def_dealer_code else False,
}
}
def showroom_ahass_change(self, cr, uid, ids, showroom, ahass, dealer, context=None):
value = {}
value['dealer'] = False
if showroom or ahass :
value['dealer'] = True
return {'value':value}
def onchange_pkp(self, cr, uid, ids, pkp, context=None):
if not pkp==False:
return {
'value':{
'npwp':'',
'tgl_kukuh':False,
}
}
return True
def onchange_forwarder(self, cr, uid, ids, forwarder, context=None):
if not forwarder :
return {'value' : {'plat_number_lines':False, 'driver_lines':False}}
return True
def name_get(self, cr, uid, ids, context=None):
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
res = []
for record in self.browse(cr, uid, ids, context=context):
name = record.name
if record.parent_id and not record.is_company:
name = "%s, %s" % (record.parent_name, name)
if context.get('show_address_only'):
name = self._display_address(cr, uid, record, without_company=True, context=context)
if context.get('show_address'):
name = name + "\n" + self._display_address(cr, uid, record, without_company=True, context=context)
name = name.replace('\n\n','\n')
name = name.replace('\n\n','\n')
if context.get('show_email') and record.email:
name = "%s <%s>" % (name, record.email)
if record.default_code:
name = "[%s] %s %s" % (record.default_code, name, '(' + record.member + ')' if record.member else '')
res.append((record.id, name))
return res
def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):
if not args:
args = []
if name and operator in ('=', 'ilike', '=ilike', 'like', '=like') and len(name) >= 3:
self.check_access_rights(cr, uid, 'read')
where_query = self._where_calc(cr, uid, args, context=context)
self._apply_ir_rules(cr, uid, where_query, 'read', context=context)
from_clause, where_clause, where_clause_params = where_query.get_sql()
where_str = where_clause and (" WHERE %s AND " % where_clause) or ' WHERE '
# search on the name of the contacts and of its company
search_name = name
operator = 'like'
if operator in ('ilike', 'like'):
search_name = '%%%s%%' % name
if operator in ('=ilike', '=like'):
operator = operator[1:]
unaccent = get_unaccent_wrapper(cr)
where_str = where_str.replace('"res_partner"','p')
query = """SELECT p.id
FROM res_partner p
{where} (upper(p.{display_name}) {operator} {percent}
OR upper(p.{default_code}) {operator} {percent}
OR upper(p.{member}) {operator} {percent})
ORDER BY p.{display_name}, p.{default_code}
""".format(where=where_str, operator=operator,
display_name=unaccent('display_name'),
default_code=unaccent('default_code'),
member=unaccent('member'),
percent=unaccent('%s'))
where_clause_params += [search_name.upper(), search_name.upper(), search_name.upper()]
if limit:
query += ' limit %s'
where_clause_params.append(limit)
cr.execute(query, where_clause_params)
ids = map(lambda x: x[0], cr.fetchall())
if ids:
return self.name_get(cr, uid, ids, context)
else:
return []
return []
# def name_search(self, cr, uid, name, args=None, operator='=', context=None, limit=100):
# if not args:
# args = []
# operator = '='
# if name and operator in ('=', 'ilike', '=ilike', 'like', '=like'):
# self.check_access_rights(cr, uid, 'read')
# where_query = self._where_calc(cr, uid, args, context=context)
# self._apply_ir_rules(cr, uid, where_query, 'read', context=context)
# from_clause, where_clause, where_clause_params = where_query.get_sql()
# where_str = where_clause and (" WHERE %s AND " % where_clause) or ' WHERE '
# if '*' in name or '%' in name:
# operator = 'like'
# if '*' in name:
# name = name.replace('*','%')
# search_name = name
# if operator in ('=ilike', '=like'):
# operator = operator[1:]
# unaccent = get_unaccent_wrapper(cr)
# where_str = where_str.replace('"res_partner"','p')
# query = """SELECT p.id
# FROM res_partner p
# {where} (upper(p.{display_name}) {operator} {percent}
# OR upper(p.{default_code}) {operator} {percent}
# OR upper(p.{member}) {operator} {percent})
# ORDER BY p.{display_name}, p.{default_code}
# """.format(where=where_str, operator=operator,
# display_name=unaccent('display_name'),
# default_code=unaccent('default_code'),
# member=unaccent('member'),
# percent=unaccent('%s'))
# where_clause_params += [search_name.upper(), search_name.upper(), search_name.upper()]
# if limit:
# query += ' limit %s'
# where_clause_params.append(limit)
# cr.execute(query, where_clause_params)
# ids = map(lambda x: x[0], cr.fetchall())
# if ids:
# return self.name_get(cr, uid, ids, context)
# else:
# return []
# return []
def create(self, cr, uid, vals, context=None):
if vals.get('default_code','BPA/') == 'BPA/' :
vals['default_code'] = self.pool.get('ir.sequence').get_sequence(cr, uid, 'BPA', division=False, padding=6)
partner_id = super(res_partner, self).create(cr, uid, vals, context=context)
self.write(cr, uid, partner_id, {'company_id':False})
return partner_id
def onchange_letter(self,cr,uid,ids,sama,street=None,street2=None,rt=None,rw=None,state_id=None,city_id=None,kecamatan_id=None,kecamatan=None,zip_id=None,kelurahan=None,context=None):
value ={}
if not sama :
value = {
'street_tab':False,
'street2_tab':False,
'rt_tab':False,
'rw_tab':False,
'state_tab_id':False,
'city_tab_id':False,
'kecamatan_tab_id':False,
'kecamatan_tab':False,
'zip_tab_id':False,
'kelurahan_tab':False,
}
if sama :
value = {
'street_tab':street,
'street2_tab':street2,
'rt_tab':rt,
'rw_tab':rw,
'state_tab_id':state_id,
'city_tab_id':city_id,
'kecamatan_tab_id':kecamatan_id,
'kecamatan_tab':kecamatan,
'zip_tab_id':zip_id,
'kelurahan_tab':kelurahan,
}
return {'value':value}
def _onchange_kecamatan_tab(self, cr, uid, ids, kecamatan_id):
if kecamatan_id:
kec = self.pool.get("dym.kecamatan").browse(cr, uid, kecamatan_id)
return {'value' : {'kecamatan_tab':kec.name}}
else:
return {'value' : {'kecamatan_tab':False}}
return True
def _onchange_zip_tab(self, cr, uid, ids, zip_id):
if zip_id:
kel = self.pool.get("dym.kelurahan").browse(cr, uid, zip_id)
return {'value' : {'kelurahan_tab':kel.name,}}
else:
return {'value' : {'kelurahan_tab':False,}}
return True
def onchange_address(self,cr,uid,ids,street=None,street2=None,rt=None,rw=None,state_id=None,city_id=None,kecamatan_id=None,kecamatan=None,zip_id=None,kelurahan=None,context=None):
value ={}
warning = {}
if street :
value['street_tab'] = street
if street2 :
value['street2_tab'] = street2
if rt :
if len(rt) > 3 :
warning = {
'title': ('Perhatian !'),
'message': (('RT tidak boleh lebih dari 3 digit ! ')),
}
value = {
'rt':False
}
cek = rt.isdigit()
if not cek :
warning = {
'title': ('Perhatian !'),
'message': (('RT hanya boleh angka ! ')),
}
value = {
'rt':False
}
else :
value['rt_tab'] = rt
if rw :
if len(rw) > 3 :
warning = {
'title': ('Perhatian !'),
'message': (('RW tidak boleh lebih dari 3 digit ! ')),
}
value = {
'rw':False
}
cek = rw.isdigit()
if not cek :
warning = {
'title': ('Perhatian !'),
'message': (('RW hanya boleh angka ! ')),
}
value = {
'rw':False
}
else :
value['rw_tab'] = rw
if state_id :
value['state_tab_id'] = state_id
if city_id :
value['city_tab_id'] = city_id
if kecamatan_id :
kec = self.pool.get("dym.kecamatan").browse(cr, uid, kecamatan_id)
value['kecamatan_tab_id'] = kecamatan_id
value['kecamatan_tab'] = kec.name
value['kecamatan'] = kec.name
if zip_id :
kel = self.pool.get("dym.kelurahan").browse(cr, uid, zip_id)
value['zip_tab_id'] = zip_id
value['kelurahan_tab'] = kel.name
value['kelurahan'] = kel.name
return {'value':value,'warning':warning}
def change_nomor(self,cr,uid,ids,nohp,notelp,context=None):
value = {}
warning = {}
# if nohp :
# if len(nohp) > 13 :
# warning = {
# 'title': ('Perhatian !'),
# 'message': (('No HP tidak boleh lebih dari 13 digit ! ')),
# }
# value = {
# 'no_hp':False
# }
# else :
# cek = nohp.isdigit()
# if not cek :
# warning = {
# 'title': ('Perhatian !'),
# 'message': (('No HP hanya boleh angka ! ')),
# }
# value = {
# 'no_hp':False
# }
# if notelp :
# if len(notelp) > 11 :
# warning = {
# 'title': ('Perhatian !'),
# 'message': (('No Telepon tidak boleh lebih dari 11 digit ! ')),
# }
# value = {
# 'no_telp':False
# }
# else :
# cek = notelp.isdigit()
# if not cek :
# warning = {
# 'title': ('Perhatian !'),
# 'message': (('No Telepon hanya boleh angka ! ')),
# }
# value = {
# 'no_telp':False
# }
return {'warning':warning,'value':value}
def onchange_punctuation(self,cr,uid,ids,no_ktp,context=None):
value = {}
warning = {}
if no_ktp:
if no_ktp == '0':
value = {
'no_ktp':no_ktp
}
elif no_ktp != '0' and len(no_ktp) == 16:
# if no_ktp :
ktp = self.search(cr,uid,[('no_ktp','=',no_ktp)])
if ktp :
warning = {
'title': ('Perhatian !'),
'message': (('No KTP %s sudah pernah dibuat ! ')%(no_ktp)),
}
value = {
'no_ktp':False
}
if not warning :
no_ktp = "".join(l for l in no_ktp if l not in string.punctuation)
value = {
'no_ktp':no_ktp
}
elif no_ktp != '0' and len(no_ktp) != '16':
warning = {
'title': ('Perhatian !'),
'message': (('No KTP harus 16 digit ! ')),
}
value = {
'no_ktp':False
}
return {'value':value,'warning':warning}
class dym_driver_line(osv.osv):
_name = "dym.driver.line"
_rec_name = 'driver'
_columns = {
'partner_id': fields.many2one('res.partner', 'Forwarder'),
'driver': fields.char('Driver'),
}
def driver_change(self, cr, uid, ids, driver, context=None):
value = {}
if driver :
driver = driver.upper()
value['driver'] = driver
return {'value':value}
class dym_plat_number_line(osv.osv):
_name = "dym.plat.number.line"
_rec_name = 'plat_number'
_columns = {
'partner_id': fields.many2one('res.partner', 'Forwarder'),
'plat_number': fields.char('Plat Number'),
}
def plat_number_change(self, cr, uid, ids, plat_number, context=None):
value = {}
warning = {}
if plat_number :
plat_number = plat_number.upper()
plat_number = plat_number.replace(' ','')
value['plat_number'] = plat_number
for x in plat_number :
if x in string.punctuation :
warning = {'title': 'Perhatian', 'message': 'Plat Number hanya boleh huruf dan angka !'}
value['plat_number'] = False
return {'value':value, 'warning':warning}
| [
"[email protected]"
]
| |
6dbf65dea55f3575b84c21c3e7a60a933815fa0e | 87b4c1e282782ddfa22df95d8f494322bf2f2fb9 | /Flower Classification with Image Histogram/dataset.py | 1b47f756b06a5dd1afd718f35f291a0afe4c1872 | []
| no_license | janFrancoo/Python-Projects | 34e9515ae167bdca2f8e601c3ccc4bd4a6cb48cb | 875ed126e4adb7cd4c2884660f24d6515086995c | refs/heads/master | 2021-06-26T17:40:47.740967 | 2021-01-31T15:27:25 | 2021-01-31T15:27:25 | 199,189,125 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 931 | py | import os
import cv2
labels = ["Daffodil", "Snowdrop", "Lilly Valley", "Bluebell", "Crocus", "Iris", "Tigerlily", "Tulip", "Fritillary",
"Sunflower", "Daisy", "Colts' Foot", "Dandelion", "Cowslip", "Buttercup", "Windflower", "Pansy"]
def get_flowers(flowers_path, masks_path):
count = -1
masks = []
flowers = []
classes = []
for i, file_name in enumerate(os.listdir(flowers_path)):
if i % 80 == 0:
count += 1
raw_file_name = file_name.split(".")[0]
file_name_for_mask = raw_file_name + ".png"
if os.path.exists(os.path.join(masks_path, file_name_for_mask)):
mask = cv2.imread(os.path.join(masks_path, file_name_for_mask))
masks.append(cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY))
flowers.append(cv2.imread(os.path.join(flowers_path, file_name)))
classes.append(labels[count])
return flowers, masks, classes
| [
"[email protected]"
]
| |
4aaad55843e277a02646a91c6816ac641bb76a96 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_171/ch4_2019_04_03_14_50_06_906813.py | 13c4f9117194d74ac4dc2b5209ab49e9cc9ef2fc | []
| no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | def classifica_idade (x):
if x<=11:
print('crianca')
return x
elif x>11 and x<=17:
print('adolescente')
return x
else:
print('adulto')
return x
| [
"[email protected]"
]
| |
15f466d20b51d0e199a6bca4759d7a97d12b9d39 | e1aeede7cecf2bdb3317954e042f41810745b980 | /winston/commands/__init__.py | 879f94514a8039ff04a915527499ca075f99746c | []
| no_license | G10DRAS/winston | b0f50822af077d374e864f2eefa559275c673fef | c72c7f77a89f77d1de31cd0f401b3dc836338b36 | refs/heads/master | 2021-01-15T16:04:40.719122 | 2014-02-27T22:31:56 | 2014-02-27T22:31:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,365 | py | import re
class Command(object):
"""
Stores a command that is executed by external events such as a voice command,
a change of state or a notification.
"""
# The name with which all commands begin. Can be a word or a regex.
# Example: jenkins, alfred, robot. "Jenkins! Turn on the lights!"
signal = "winston"
def on_event(self, event, sender):
"""
Handles events from the interpreter and other sources
"""
# Do something here.
class RegexCommand(Command):
"""
Command that matches against a regex string
Set `polite` to True if the regex should match "could you", "please" and other
command decorations.
"""
# Command prefixes and suffixes. Can be a tuple of words or a regex
prefixes = "( can you| could you)?( please)?"
suffixes = "( please)?"
def __init__(self, regex, polite=False):
super(RegexCommand, self).__init__()
if polite:
final_regex = "{signal}{prefix} {command}{suffix}".format(
signal = self.signal,
command = regex,
prefix = self.prefixes,
suffix = self.suffixes,
)
self.regex = re.compile(final_regex)
else:
self.regex = re.compile(regex)
def match(self, text):
return self.regex.match(text) | [
"[email protected]"
]
| |
8acc9bc358a8f92477e4d4014cb1f0dd864c69da | 375c87462c4ed200cecce0aeab09c6161ac10dcd | /pwg_ls2/RV/dict_2_changes.py | dd80bde0aefbda7ab8b5fe3967bd41d33ad19f5b | []
| no_license | sanskrit-lexicon/PWG | 2e7ab371ec7e4da43d81d50663b06fa2e2b44806 | d32d701366cff1156b7f7bb0aea8ea27cd7fb7dd | refs/heads/master | 2023-02-07T02:49:53.179915 | 2023-02-03T19:53:25 | 2023-02-03T19:53:25 | 15,903,655 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,421 | py | #-*- coding:utf-8 -*-
""" dict_2_changes.py
"""
import sys,re,codecs
## https:##stackoverflow.com/questions/27092833/unicodeencodeerror-charmap-codec-cant-encode-characters
## This required by git bash to avoid error
## UnicodeEncodeError: 'charmap' codec cannot encode characters
## when run in a git bash script.
sys.stdout.reconfigure(encoding='utf-8')
class Change(object):
def __init__(self,metaline,iline,old,new):
self.metaline = metaline
self.iline = iline
self.old = old
self.new = new
def init_changes(lines1,lines2):
changes = [] # array of Change objects
metaline = None
imetaline1 = None
page = None
for iline,line1 in enumerate(lines1):
line2 = lines2[iline]
if iline == 0: # %***This File is E:\\APTE.ALL, Last update 11.09.06
continue #
if line1.startswith('<L>'):
metaline = line1
imetaline1 = iline+1
if line1 == line2:
continue
# generate a change
change = Change(metaline,iline,line1,line2)
changes.append(change)
print(len(changes),'changes found')
return changes
def change_out(change,ichange):
outarr = []
case = ichange + 1
#outarr.append('; TODO Case %s: (reason = %s)' % (case,change.reason))
try:
ident = change.metaline
except:
print('ERROR:',change.iline,change.old)
exit(1)
if ident == None:
ident = 'No metaline available'
outarr.append('; ' + ident)
# change for iline
lnum = change.iline + 1
line = change.old
new = change.new
outarr.append('%s old %s' % (lnum,line))
outarr.append('%s new %s' % (lnum,new))
outarr.append(';')
return outarr
def write_changes(fileout,changes,filein1,filein2):
with codecs.open(fileout,"w","utf-8") as f:
for ichange,change in enumerate(changes):
outarr = change_out(change,ichange)
for out in outarr:
f.write(out+'\n')
print(len(changes),"changes written to",fileout)
if __name__=="__main__":
filein1 = sys.argv[1] # xxx.txt (first version)
filein2 = sys.argv[2] # xxx.txt (second version)
fileout = sys.argv[3] # possible change transactions
with codecs.open(filein1,"r","utf-8") as f:
lines1 = [x.rstrip('\r\n') for x in f]
with codecs.open(filein2,"r","utf-8") as f:
lines2 = [x.rstrip('\r\n') for x in f]
if len(lines1) != len(lines2):
print('ERROR: require same number of lines in the two input files')
exit(1)
print(len(lines1),'lines compared')
changes = init_changes(lines1,lines2)
write_changes(fileout,changes,filein1,filein2)
| [
"[email protected]"
]
| |
a879df24d86dc8af1ae7633235f859be1a1e0509 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startQiskit_QC1759.py | bcb20cc7d58111256fe3f74a18f02994896b444e | [
"BSD-3-Clause"
]
| permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,797 | py | # qubit number=5
# total number=60
import cirq
import qiskit
from qiskit import IBMQ
from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.x(input_qubit[4]) # number=53
prog.cx(input_qubit[2],input_qubit[0]) # number=45
prog.z(input_qubit[2]) # number=46
prog.h(input_qubit[0]) # number=54
prog.cz(input_qubit[2],input_qubit[0]) # number=55
prog.h(input_qubit[0]) # number=56
prog.h(input_qubit[1]) # number=4
prog.rx(2.664070570244145,input_qubit[1]) # number=39
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[2]) # number=49
prog.cz(input_qubit[3],input_qubit[2]) # number=50
prog.h(input_qubit[2]) # number=51
prog.h(input_qubit[4]) # number=21
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(repeat):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[3]) # number=40
prog.y(input_qubit[4]) # number=35
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=25
prog.cz(input_qubit[1],input_qubit[0]) # number=26
prog.h(input_qubit[0]) # number=27
prog.h(input_qubit[0]) # number=36
prog.cz(input_qubit[1],input_qubit[0]) # number=37
prog.h(input_qubit[0]) # number=38
prog.cx(input_qubit[1],input_qubit[0]) # number=41
prog.x(input_qubit[0]) # number=42
prog.cx(input_qubit[1],input_qubit[0]) # number=43
prog.cx(input_qubit[1],input_qubit[0]) # number=34
prog.cx(input_qubit[1],input_qubit[0]) # number=24
prog.cx(input_qubit[0],input_qubit[1]) # number=29
prog.cx(input_qubit[2],input_qubit[3]) # number=44
prog.x(input_qubit[1]) # number=30
prog.h(input_qubit[1]) # number=57
prog.cz(input_qubit[0],input_qubit[1]) # number=58
prog.h(input_qubit[1]) # number=59
prog.x(input_qubit[2]) # number=11
prog.x(input_qubit[3]) # number=12
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.x(input_qubit[0]) # number=13
prog.x(input_qubit[1]) # number=14
prog.x(input_qubit[2]) # number=15
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
prog.z(input_qubit[1]) # number=52
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 2 and not x.configuration().simulator and x.status().operational == True))
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_QC1759.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| [
"[email protected]"
]
| |
7f95dc0c757ee5c602eda0c84f0a8b39f5e022ba | bc181d3e95743e498a1ec0cfbdac369a01d95218 | /apps/accounts/migrations/0001_initial.py | 7daca24efb48c0d09b39887195357f9e09d5df77 | []
| no_license | roman-oxenuk/welltory_test | 09bbbd8502735adb3662318affa3df10ef47f5af | 853dff24bbf38d5c2d6dce75dd5713ab6347a00d | refs/heads/master | 2021-01-21T23:23:55.809175 | 2017-06-23T18:50:54 | 2017-06-23T18:50:54 | 95,241,585 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,255 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('auth', '0006_require_contenttypes_0002'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(null=True, verbose_name='last login', blank=True)),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(unique=True, max_length=255, verbose_name='email')),
('first_name', models.CharField(max_length=255, null=True, verbose_name='\u0438\u043c\u044f', blank=True)),
('last_name', models.CharField(max_length=255, null=True, verbose_name='\u0444\u0430\u043c\u0438\u043b\u0438\u044f', blank=True)),
('is_active', models.BooleanField(default=True, verbose_name='\u0430\u043a\u0442\u0438\u0432\u043d\u044b\u0439')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='\u0434\u0430\u0442\u0430 \u0440\u0435\u0433\u0438\u0441\u0442\u0440\u0430\u0446\u0438\u0438')),
('is_staff', models.BooleanField(default=False, verbose_name='is staff')),
('groups', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Group', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', verbose_name='groups')),
('user_permissions', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Permission', blank=True, help_text='Specific permissions for this user.', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
},
),
]
| [
"[email protected]"
]
| |
f575acf5003fca4bb2f4ca047e2c00e3da6ca5bf | d1dc4dcd113acc3a954dc1fcadb584acb2135dbe | /adia/sequence.py | 810d445953712affb878b649ef818cabc74830f6 | [
"MIT"
]
| permissive | denysmiller/adia | 24a881b551def89b2e69bc1cef215d93174f71d5 | 86dc0c96c9b0bd804dff208e91c71a1958df56b0 | refs/heads/master | 2023-08-27T15:01:46.537505 | 2021-09-14T21:43:11 | 2021-09-14T21:43:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,955 | py | from io import StringIO
from .lazyattr import LazyAttribute
from .container import Container
from .interpreter import Interpreter, Consume, Final, FinalConsume, New, \
Ignore, Goto, Switch
from .token import NAME, NEWLINE, EVERYTHING, RARROW, COLON, AT, HASH, EOF, \
DOT, DEDENT, INDENT, MULTILINE, TILDA
class Module:
title = None
type = 'module'
def __init__(self, title):
self.title = title
class Item(Interpreter):
kind = None
args = None
text = None
multiline = None
def __init__(self, *args, **kw):
super().__init__('start', *args, **kw)
def _complete(self, kind, *args, text=None, multiline=False):
self.kind = kind
self.args = args
self.text = text.strip() if text else None
self.multiline = multiline
def _finish_multiline(self, kind, *args):
return self._finish(kind, *args, multiline=True)
def _finish(self, kind, *args, **kw):
args = list(args)
nargs = []
while args:
a = args.pop(0)
if a == ':':
break
nargs.append(a)
if args:
text = args[0]
else:
text = None
return self._complete(kind, *nargs, text=text, **kw)
@property
def left(self):
return self.kind
@property
def right(self):
return self.text
def __repr__(self):
return f'SequenceItem: {self.left}'
def dumps(self):
f = StringIO()
f.write(self.left)
if self.right:
f.write(': ')
if self.multiline:
f.write('|\n')
for line in self.right.splitlines():
f.write(f' {line}\n')
else:
f.write(f'{self.right}')
return f.getvalue()
statemap = {
'start': {
NAME: Goto(nextstate='name'),
},
'name': {
NAME: Goto(nextstate='name'),
TILDA: Goto(nextstate='name'),
NEWLINE: FinalConsume(_finish, alltokens=True),
COLON: Goto(nextstate=':'),
},
':': {
MULTILINE: FinalConsume(_finish_multiline, alltokens=True),
EVERYTHING: {
NEWLINE: FinalConsume(_finish, alltokens=True)
}
},
}
class Note(Item):
multiline = False
@LazyAttribute
def modules(self):
result = []
for m in self.args:
if m == '~':
continue
result.append(m)
return result
@LazyAttribute
def left(self):
result = self.kind
if self.args:
result += f'{" ".join(self.args)}'
return result
def _finish(self, *args, **kw):
super()._finish('@', *args, **kw)
statemap = {
'start': {NAME: {
TILDA: {
COLON: Goto(nextstate=':'),
NAME: {
COLON: Goto(nextstate=':'),
},
},
COLON: Goto(nextstate=':'),
}},
':': {
MULTILINE: FinalConsume(Item._finish_multiline, alltokens=True),
EVERYTHING: {
NEWLINE: FinalConsume(_finish, alltokens=True)
}
},
}
class ContainerItem(Item, Container):
def dumps(self):
f = StringIO()
f.write(super().dumps())
if len(self):
f.write('\n')
for c in self:
for line in c.dumps().splitlines():
f.write(f' {line}\n')
return f.getvalue().rstrip('\n')
class Call(ContainerItem):
caller = None
callee = None
returntext = None
returnsign = '=>'
@LazyAttribute
def left(self):
return f'{self.caller} -> {self.callee}'
@LazyAttribute
def right(self):
if not self.text:
return
f = StringIO()
f.write(self.text)
if self.returntext:
f.write(f' {self.returnsign} {self.returntext}')
return f.getvalue()
def _complete(self, caller, callee, text=None):
self.caller = caller
self.callee = callee
if text and self.returnsign in text:
text, returntext = text.rsplit(self.returnsign, 1)
self.returntext = returntext.strip()
super()._complete('call', text=text)
statemap = {
'start': {NAME: {RARROW: {NAME: Goto(nextstate='name -> name')}}},
'name -> name': {
NEWLINE: FinalConsume(_complete),
EOF: FinalConsume(_complete),
COLON: Goto(nextstate=':'),
},
':': {EVERYTHING: {
NEWLINE: FinalConsume(_complete)
}}
}
class Loop(ContainerItem):
pass
class Condition(ContainerItem):
pass
class SequenceDiagram(Interpreter, Container):
"""Represents a sequence diagram.
The :class:`adia.diagram` class creates an instance of this class for
each sequence diagram section.
"""
title = 'Untitled Sequence Diagram'
description = None
tags = None
def __init__(self, *args, **kwargs):
super().__init__('title', *args, **kwargs)
self.modules = {}
self.modules_order = []
self._callstack = []
def __repr__(self):
return f'SequenceDiagram: {self.title}'
def dumps(self):
f = StringIO()
f.write('sequence:')
if self.title:
f.write(f' {self.title}')
f.write('\n')
if self.description:
f.write(f'description: {self.description}\n')
if self.tags:
f.write(f'tags: {self.tags}\n')
modattrs = []
for k, v in sorted(self.modules.items()):
if k != v.title:
modattrs.append((k, 'title', v.title))
if 'module' != v.type:
modattrs.append((k, 'type', v.type))
if modattrs:
f.write('\n# Modules\n')
for m, a, v in modattrs:
f.write(f'{m}.{a}: {v}\n')
if len(self):
f.write('\n')
for c in self:
f.write(f'{c.dumps()}\n')
return f.getvalue()
def _ensuremodule(self, name, visible=False):
if name not in self.modules:
self.modules[name] = Module(name)
if visible and name not in self.modules_order:
self.modules_order.append(name)
@property
def current(self):
if self._callstack:
return self._callstack[-1]
return self
def _indent(self):
if len(self.current):
self._callstack.append(self.current[-1])
def _dedent(self):
if self._callstack:
self._callstack.pop()
def _new_call(self, call):
self._ensuremodule(call.caller, visible=True)
self._ensuremodule(call.callee, visible=True)
self.current.append(call)
def _new_note(self, note):
for m in note.modules:
self._ensuremodule(m, visible=False)
self.current.append(note)
def _new_loop(self, loop):
self.current.append(loop)
def _new_condition(self, condition):
self.current.append(condition)
def _attr(self, attr, value):
value = value.strip()
if attr == 'description':
self.description = value
elif attr == 'tags':
self.tags = value
else:
raise AttributeError(attr)
def _set_title(self, value):
self.title = value.strip()
def _module_attr(self, module, attr, value):
if not hasattr(Module, attr):
raise AttributeError(module, attr)
self._ensuremodule(module)
setattr(self.modules[module], attr, value.strip())
_keywords = {
'sequence': Final(nextstate='sequence'),
'state': Final(nextstate='start'),
'class': Final(nextstate='start'),
'for': New(Loop, callback=_new_loop, nextstate='start'),
'while': New(Loop, callback=_new_loop, nextstate='start'),
'loop': New(Loop, callback=_new_loop, nextstate='start'),
'if': New(Condition, callback=_new_condition, nextstate='start'),
'alt': New(Condition, callback=_new_condition, nextstate='start'),
'elif': New(Condition, callback=_new_condition, nextstate='start'),
'else': New(Condition, callback=_new_condition, nextstate='start'),
}
statemap = {
'title': {
EVERYTHING: {
NEWLINE: Consume(_set_title, nextstate='start')
}
},
'start': {
HASH: {EVERYTHING: {NEWLINE: Ignore(nextstate='start')}},
NEWLINE: Ignore(nextstate='start'),
INDENT: Ignore(callback=_indent, nextstate='indent'),
DEDENT: Ignore(callback=_dedent, nextstate='start'),
EOF: Final(nextstate='start'),
NAME: Switch(default=Goto(nextstate='name'), **_keywords),
AT: Ignore(nextstate='@'),
},
'indent': {
HASH: {EVERYTHING: {NEWLINE: Ignore(nextstate='start')}},
NAME: Switch(default=Goto(nextstate=' name'), **_keywords),
AT: Ignore(nextstate='@'),
NEWLINE: Ignore(nextstate='start'),
INDENT: Ignore(callback=_indent, nextstate='indent'),
},
'name': {
RARROW: New(Call, callback=_new_call, nextstate='start'),
COLON: Goto(nextstate='attr:'),
DOT: {NAME: {COLON: Goto(nextstate='mod.attr:')}},
},
' name': {
RARROW: New(Call, callback=_new_call, nextstate='start')
},
'attr:': {
EVERYTHING: {NEWLINE: Consume(_attr, nextstate='start')}
},
'mod.attr:': {
EVERYTHING: {NEWLINE: Consume(_module_attr, nextstate='start')}
},
'@': {
NAME: New(Note, callback=_new_note, nextstate='start'),
}
}
| [
"[email protected]"
]
| |
3cad6f2a85c33cca3170787570280831ebd1325a | 075d3661100eb7d247a23ca9c37f3b252f9318d9 | /test_readFromJson.py | 113c90cbcce836f17f6ab447f1a4bf909d22f5a1 | []
| no_license | phizaz/timeseries-clustering-using-color-histrogram | 60ce6c45d8cad96caee0535bd098a6c84bf65adb | 1be88df32383f819dc1af09bdd6744f8a40a27b3 | refs/heads/master | 2021-01-10T06:17:16.447599 | 2015-10-15T15:50:18 | 2015-10-15T15:50:18 | 44,231,475 | 12 | 2 | null | null | null | null | UTF-8 | Python | false | false | 79 | py | import File
histograms = File.File.open('_histograms.json')
print(histograms) | [
"[email protected]"
]
| |
35d4289b0d5b7197676570e63cb452d1d2bfd5cb | be5b91588f198a665160a574e2eba2dd0be84783 | /database/write_nlu.py | 5a4e3c8356059d789dcec0516b42e509f4a727a7 | []
| no_license | swqsd218219/rasa_uncertain_slot | f60f9162cc629552f2deef0fb6cd6ea8cb93ae42 | ec7a0912b9058e3b19acce6ae05b8e871d720141 | refs/heads/main | 2023-03-08T18:13:26.853865 | 2021-03-01T01:23:20 | 2021-03-01T01:23:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,783 | py |
'''
定义模板:
query attribute:
1. what is the cpu?
- what kind of entity do you want to ask?
- server
- please tell me about the ip of the entity
- 1.2.3.4
- 4 cores
2. what is the cpu of the 1.1.1.1?
- please tell me about the entity of the ip
- server
- 4 cores
3. what is the cpu of the server 1.2.3.1
- 5 cores
query relation
1. list all the server host in ?
- what kind of entity do you ask?(datacenter, cluster)
- cluster
- please tell me about the ip of entity
- 1.1.1.1
- dataframe of servers
2. list all the server host in datacenter?
- please tell me about the ip of entity
- 1.1.1.1
- dataframe of servers
3. list all the server host in datacenter 1.1.1.1
- dataframe of servers
'''
with open('cluster.csv','r',encoding='utf-8') as f1:
cluster = f1.readlines()
with open('datacenter.csv','r',encoding='utf-8') as f2:
datacenter = f2.readlines()
with open('server.csv','r',encoding='utf-8') as f3:
server = f3.readlines()
entity2attribute = {}
entity2ip = {}
entity2ip['cluster'] = []
entity2ip['datacenter'] = []
entity2ip['server'] = []
for index,line in enumerate(cluster):
if index == 0:
line = line.strip()
line = line.split(',')
ip = line[0]
name = line[1]
business = line[2]
city = line[3]
datacenter_ip = line[4]
entity2attribute['cluster'] = [name,business,city,datacenter_ip]
else:
line = line.strip()
line = line.split(',')
# print(line)
ip = line[0]
entity2ip['cluster'].append(ip)
for index,line in enumerate(datacenter):
if index == 0:
line = line.strip()
line = line.split(',')
ip = line[0]
name = line[1]
longitude = line[2]
latitude = line[3]
region = line[4]
cpu = line[5]
entity2attribute['datacenter'] = [name, longitude, latitude, region,cpu]
else:
line = line.strip()
line = line.split(',')
ip = line[0]
entity2ip['datacenter'].append(ip)
for index,line in enumerate(server):
if index == 0:
line = line.strip()
line = line.split(',')
ip = line[0]
name = line[1]
cpu = line[2]
memory = line[3]
disk = line[4]
server_ip = line[5]
datacenter_ip = line[6]
entity2attribute['server'] = [name, cpu, memory, disk,server_ip,datacenter_ip]
else:
line = line.strip()
line = line.split(',')
ip = line[0]
entity2ip['server'].append(ip)
relation2entity = {
'host in':{'server':['cluster','datacenter'],'cluster':['datacenter']},
'configuration by':{'datacenter':['cluster','server'],'cluster':['server']}
}
def write_query_attribute(f):
f.write('## intent: query_attribute' + '\n')
for entity,value in entity2attribute.items():
ips = entity2ip[entity]
for attribute in value:
for ip in ips:
temp1 = '- what is the ['+attribute+'](attribute) ?'
temp2 = '- what is the ['+attribute+'](attribute) of the ['+ip+'](ip) ?'
temp3 = '- what is the ['+attribute+'](attribute) of the [' +entity+'](entity) ['+ip+'](ip) ?'
f.write(temp1 + '\n')
f.write(temp2 + '\n')
f.write(temp3 + '\n')
def write_query_ralation(f):
for relation,entities in relation2entity.items():
relation_ = relation.replace(' ','_')
f.write('## intent: query_'+relation_ + '\n')
for s_entity,o_entities in entities.items():
for o_entity in o_entities:
ips = entity2ip[o_entity]
for ip in ips:
temp1 = '- list all the ['+s_entity+'](s_entity) '+relation + ' ?'
temp2 = '- list all the ['+s_entity+'](s_entity) '+relation+' ['+o_entity+'](o_entity) ?'
temp3 = '- list all the ['+s_entity+'](s_entity) '+relation+' ['+o_entity+'](o_entity) ['+ip+'](ip) ?'
f.write(temp1 + '\n')
f.write(temp2 + '\n')
f.write(temp3 + '\n')
def write_lookup(f):
f.write('## lookup:entity' + '\n')
f.write(' data/lookup/entity.txt' + '\n')
f.write('## lookup:attribute' + '\n')
f.write(' data/lookup/attribute.txt' + '\n')
f.write('## lookup:s_entity' + '\n')
f.write(' data/lookup/s_entity.txt' + '\n')
f.write('## lookup:o_entity' + '\n')
f.write(' data/lookup/o_entity.txt' + '\n')
f.write('## lookup:ip' + '\n')
f.write(' data/lookup/ip.txt' + '\n')
if __name__ == '__main__':
f = open('./nlu.md','a',encoding='utf-8')
write_query_attribute(f)
write_query_ralation(f)
write_lookup(f)
| [
"[email protected]"
]
| |
545da2d80571e4c8539199e79b3b92fa018cd91d | 8629b45d5cec27fa701c76644db2a1ac9a090b07 | /016/16.py | e848effd4500e3781e5281f0b148d840ea536535 | [
"MIT"
]
| permissive | bsamseth/project-euler | 96e3a7a94cc605ded3edf7176a93147f9836350e | 60d70b117960f37411935bc18eab5bb2fca220e2 | refs/heads/master | 2021-04-06T06:16:23.425225 | 2018-11-05T09:50:21 | 2018-11-05T09:50:21 | 59,105,649 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | """
2^15 = 32768 and the sum of its digits is 3 + 2 + 7 + 6 + 8 = 26.
What is the sum of the digits of the number 2^1000?
"""
print sum([int(char) for char in str(2**1000)])
| [
"[email protected]"
]
| |
852bcee70e02a31eea4fdda750582f430f99ea17 | 11ca0c393c854fa7212e783a34269f9dae84e8c7 | /Python/226. 翻转二叉树.py | 38463da19db06e4efb8634aea7b35a3f18030818 | []
| no_license | VictoriqueCQ/LeetCode | dc84d81163eed26fa9dbc2114bba0b5c2ea881f4 | a77b3ead157f97f5d9599badb4d4c5da69de44ba | refs/heads/master | 2021-06-05T06:40:24.659909 | 2021-03-31T08:31:51 | 2021-03-31T08:31:51 | 97,978,957 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 503 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def invertTree(self, root: TreeNode) -> TreeNode:
if not root:
return None
def dfs(root):
if not root:
return
root.left, root.right = root.right, root.left
dfs(root.left)
dfs(root.right)
dfs(root)
return root
| [
"1997Victorique0317"
]
| 1997Victorique0317 |
0a57942b9958442ababf76cf5c5edea1a6dacd8a | 13f4a06cd439f579e34bf38406a9d5647fe7a0f3 | /nn_ns/parse/MyLL1L/ProcessMatchResult_MyLL1L_of_SRRTL.py | 0aef1359eb2e3c4617306d57faaef7e442c70f50 | []
| no_license | edt-yxz-zzd/python3_src | 43d6c2a8ef2a618f750b59e207a2806132076526 | 41f3a506feffb5f33d4559e5b69717d9bb6303c9 | refs/heads/master | 2023-05-12T01:46:28.198286 | 2023-05-01T13:46:32 | 2023-05-01T13:46:32 | 143,530,977 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 5,673 | py |
from .tools_for_id2infoID_SRRTL import *
from .SRRTL_in_MyLL1L import mainID_MyLL1L_of_SRRTL
from .ProcessMatchResult_MyLL1L import ProcessMatchResult_MyLL1L
from .raw_tokenize_SRRTL import RawTokenizer_SRRTL
#from .id2infoID_MyLL1L_of_MyLL1L import tIDDict_MyLL1L_of_MyLL1L
class ProcessMatchResult_MyLL1L_of_SRRTL(ProcessMatchResult_MyLL1L):
def __init__(self, tIDDict_MyLL1L_of_SRRTL, tokens, pos2rc = None):
super().__init__(tIDDict_MyLL1L_of_SRRTL, tokens, pos2rc)
return
def to_id2infoID(self, match_result):
info_ls = self.process(match_result)
id2infoID = {info.ID : info for info in info_ls}
return id2infoID
def to_raw_tokenizer(self, mainID, match_result):
id2infoID = self.to_id2infoID(match_result)
assert mainID in id2infoID
return RawTokenizer_SRRTL(mainID, id2infoID)
def _pre_process(self, match_result):pass
def _process_leaf(self, match_result):pass
# match_result2raw_id2info
def _get_result(self, match_result):
raw_id2info = {}
ns = match_result[-1]
info_ls = ns.data
return info_ls
def _post_process(self, match_result):
explain = self.explain
e = self.explain(match_result)
tID = e.tID
ID, *rID = tID
ns = e.ns
case = e.define_type
self.gen_ns_data(e)
self.outbox_optional_Item(e)
if case == 'Token':
if ID == 'string':
ns.data = eval(ns.data)
assert type(ns.data) == str
elif ID == 'idstring':
ns.data = eval(ns.data[2:])
assert type(ns.data) == str
elif tID == ('define', 'otherwise'):
ns.data = None # rex anything
elif ID == 'state_op':
assert tID == ('state_op', 'return')
ns.data = InfoReturn()
elif case == 'Item':
pass
elif case == 'Block':
pass
#print(ID, repr(ns.data))
elif ID == 'strings':
ns.data = ''.join(ns.data)
elif ID == 'name':
ID, = rID
assert ID == 'idstrings'
ns.data = ''.join(ns.data)
ns.data = repr(ns.data)
elif ID == 'if_clause':
assert not rID
ns.data = ns.data[1]
elif ID == 'state_op':
ID, = rID
#print(tID)
if ID == 'goto':
state_id = ns.data[1]
ns.data = InfoGoto(state_id)
elif ID == 'call':
state_id = ns.data[1]
ns.data = InfoCall(state_id)
else:
assert ID == 'error'
err = ns.data[1]
ns.data = InfoError(err)
## elif ID == 'define':
## ID, = rID
## assert ID == 'rex'
## rex, = e[0].ns.data
## ns.data = rex
elif ID == 'define_body':
ID, = rID
if ID == 'normal_define':
rex, _, children = ns.data
if not children:
children = []
ns.data = InfoNormalDefine(rex, children)
else:
assert ID == 'define_if_clause'
rex, state_op, _ = ns.data # rex - None - match all
#print(state_op, rex)
ns.data = InfoDefineIfClause(state_op, rex)
elif ID == 'name_eq':
assert not rID
ns.data, _ = ns.data
elif ID == 'define_token_type':
ID, = rID
_id = None
if ID == 'named_define':
_id, body = ns.data
else:
body = ns.data
ns.data = InfoDefineTypeID(_id, body)
elif ID == 'sub_define_block':
assert not rID
_, ns.data, _ = ns.data
elif ID == 'define_state':
assert not rID
_id, _, children = ns.data
ns.data = InfoDefineStateID(_id, children)
## elif ID in {'rex', 'state_id', 'type_id', 'id'}:
## assert not rID
## ns.data, = e[0].ns.data
## #print(ID, ns.data)
## elif ID in {mainID_MyLL1L_of_SRRTL, 'define_block'}:
## assert not rID
## ns.data = e[0].ns.data
## #print(ID, ns.data)
#def lang_text2raw_id2info():
def test_ProcessMatchResult_MyLL1L_of_SRRTL():
from .parser_MyLL1L_of_SRRTL import parser_MyLL1L_of_SRRTL
from .SRRTL_in_MyLL1L import SRRTL_in_MyLL1L, mainID_MyLL1L_of_SRRTL
from .raw_tokenize_SRRTL import raw_tokenize_SRRTL
from .id2infoID_SRRTL_of_SRRTL import id2infoID_SRRTL_of_SRRTL
from .SRRTL_in_SRRTL import mainID_SRRTL_of_SRRTL, SRRTL_in_SRRTL
raw_tokens = list(raw_tokenize_SRRTL(SRRTL_in_SRRTL, \
mainID_SRRTL_of_SRRTL, id2infoID_SRRTL_of_SRRTL))
_tokenize = parser_MyLL1L_of_SRRTL.tokenize
_parse = parser_MyLL1L_of_SRRTL.parse_tokens
tIDDict = parser_MyLL1L_of_SRRTL.tIDDict
tokens = _tokenize(SRRTL_in_SRRTL)
_match_result = _parse(tokens)
raw_tokenizer = ProcessMatchResult_MyLL1L_of_SRRTL(tIDDict, tokens)\
.to_raw_tokenizer(mainID_SRRTL_of_SRRTL, _match_result)
_raw_tokens = list(raw_tokenizer.raw_tokenize(SRRTL_in_SRRTL))
if not _raw_tokens == raw_tokens:
assert repr(_raw_tokens) == repr(raw_tokens)
print(_raw_tokens)
print(raw_tokens)
assert _raw_tokens == raw_tokens
if __name__ == '__main__':
test_ProcessMatchResult_MyLL1L_of_SRRTL()
| [
"[email protected]"
]
| |
1ac9526b04e496e36c8caa591056247ab113c9a8 | fea444217851a92510651da2b60035b73344d7da | /todo/setup.py | ee4284355e4449097dd3991ca5c42f45b5f04dbb | []
| no_license | fuzzygwalchmei/scratchingPost | c70d4f3f37d3d4d6490edfbbae603305b2bb5764 | b232c54aac975aebb0945d66a841db3f241b7cd2 | refs/heads/master | 2023-01-29T13:02:22.615813 | 2020-12-15T00:47:56 | 2020-12-15T00:47:56 | 176,823,898 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 567 | py | from sqlalchemy.orm import sessionmaker
from sqlalchemy import Column, Integer, String, create_engine
from sqlalchemy.ext.declarative import declarative_base
engine = create_engine('sqlite:///todo.db')
Session = sessionmaker(bind=engine)
session = Session()
Base = declarative_base()
class ToDo(Base):
__tablename__ = 'todos'
id = Column(Integer, primary_key=True)
subject = Column(String)
note = Column(String)
def __repr__(self):
return f'<ToDo(id: {self.id} - note: {self.note}'
Base.metadata.create_all(engine)
session.commit() | [
"[email protected]"
]
| |
ba801aca965089f72776e5998d259a23802b74e6 | 8f3336bbf7cd12485a4c52daa831b5d39749cf9b | /Python/sliding-puzzle.py | 22a02e080a64c277d48f26a549604f17dc5dba51 | []
| no_license | black-shadows/LeetCode-Topicwise-Solutions | 9487de1f9a1da79558287b2bc2c6b28d3d27db07 | b1692583f7b710943ffb19b392b8bf64845b5d7a | refs/heads/master | 2022-05-30T22:16:38.536678 | 2022-05-18T09:18:32 | 2022-05-18T09:18:32 | 188,701,704 | 240 | 110 | null | 2020-05-08T13:04:36 | 2019-05-26T15:41:03 | C++ | UTF-8 | Python | false | false | 4,140 | py | # Time: O((m * n) * (m * n)!)
# Space: O((m * n) * (m * n)!)
import heapq
import itertools
# A* Search Algorithm
class Solution(object):
def slidingPuzzle(self, board):
"""
:type board: List[List[int]]
:rtype: int
"""
def dot(p1, p2):
return p1[0]*p2[0]+p1[1]*p2[1]
def heuristic_estimate(board, R, C, expected):
result = 0
for i in xrange(R):
for j in xrange(C):
val = board[C*i + j]
if val == 0: continue
r, c = expected[val]
result += abs(r-i) + abs(c-j)
return result
R, C = len(board), len(board[0])
begin = tuple(itertools.chain(*board))
end = tuple(range(1, R*C) + [0])
expected = {(C*i+j+1) % (R*C) : (i, j)
for i in xrange(R) for j in xrange(C)}
min_steps = heuristic_estimate(begin, R, C, expected)
closer, detour = [(begin.index(0), begin)], []
lookup = set()
while True:
if not closer:
if not detour:
return -1
min_steps += 2
closer, detour = detour, closer
zero, board = closer.pop()
if board == end:
return min_steps
if board not in lookup:
lookup.add(board)
r, c = divmod(zero, C)
for direction in ((-1, 0), (1, 0), (0, -1), (0, 1)):
i, j = r+direction[0], c+direction[1]
if 0 <= i < R and 0 <= j < C:
new_zero = i*C+j
tmp = list(board)
tmp[zero], tmp[new_zero] = tmp[new_zero], tmp[zero]
new_board = tuple(tmp)
r2, c2 = expected[board[new_zero]]
r1, c1 = divmod(zero, C)
r0, c0 = divmod(new_zero, C)
is_closer = dot((r1-r0, c1-c0), (r2-r0, c2-c0)) > 0
(closer if is_closer else detour).append((new_zero, new_board))
return min_steps
# Time: O((m * n) * (m * n)! * log((m * n)!))
# Space: O((m * n) * (m * n)!)
# A* Search Algorithm
class Solution2(object):
def slidingPuzzle(self, board):
"""
:type board: List[List[int]]
:rtype: int
"""
def heuristic_estimate(board, R, C, expected):
result = 0
for i in xrange(R):
for j in xrange(C):
val = board[C*i + j]
if val == 0: continue
r, c = expected[val]
result += abs(r-i) + abs(c-j)
return result
R, C = len(board), len(board[0])
begin = tuple(itertools.chain(*board))
end = tuple(range(1, R*C) + [0])
end_wrong = tuple(range(1, R*C-2) + [R*C-1, R*C-2, 0])
expected = {(C*i+j+1) % (R*C) : (i, j)
for i in xrange(R) for j in xrange(C)}
min_heap = [(0, 0, begin.index(0), begin)]
lookup = {begin: 0}
while min_heap:
f, g, zero, board = heapq.heappop(min_heap)
if board == end: return g
if board == end_wrong: return -1
if f > lookup[board]: continue
r, c = divmod(zero, C)
for direction in ((-1, 0), (1, 0), (0, -1), (0, 1)):
i, j = r+direction[0], c+direction[1]
if 0 <= i < R and 0 <= j < C:
new_zero = C*i+j
tmp = list(board)
tmp[zero], tmp[new_zero] = tmp[new_zero], tmp[zero]
new_board = tuple(tmp)
f = g+1+heuristic_estimate(new_board, R, C, expected)
if f < lookup.get(new_board, float("inf")):
lookup[new_board] = f
heapq.heappush(min_heap, (f, g+1, new_zero, new_board))
return -1
| [
"[email protected]"
]
| |
7029d9404d228661a4e2e7d27618a58caefe3e98 | 673e829dda9583c8dd2ac8d958ba1dc304bffeaf | /data/multilingual/Latn.SHP/Sans_8/pdf_to_json_test_Latn.SHP_Sans_8.py | a9ff90b902a1274df32bc2575bf41aceb7fb70ec | [
"BSD-3-Clause"
]
| permissive | antoinecarme/pdf_to_json_tests | 58bab9f6ba263531e69f793233ddc4d33b783b7e | d57a024fde862e698d916a1178f285883d7a3b2f | refs/heads/master | 2021-01-26T08:41:47.327804 | 2020-02-27T15:54:48 | 2020-02-27T15:54:48 | 243,359,934 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 301 | py | import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.SHP/Sans_8/udhr_Latn.SHP_Sans_8.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
| [
"[email protected]"
]
| |
e167205a15456bb695455dcffd27e9746cb15092 | c03717080fc76c8f442e4fc45681f2faa6d87819 | /a__preprocess_step1__comma_remove.py | 57cc31fb6685cc2a974ca85bf5625636ccaffd2f | []
| no_license | PharrellWANG/proj_vcmd | a010cc8eedd396cc7f22486daa32fb322e30ec4d | 952733f71ee3c8f18016144ddf95e6a74bc866c7 | refs/heads/master | 2021-01-20T03:09:04.226185 | 2017-05-12T02:52:02 | 2017-05-12T02:52:02 | 89,501,320 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 593 | py | # INPUT_FILE = '/Users/Pharrell_WANG/PycharmProjects/tf_dp/data/z_raw_partial_16.csv'
# OUTPUT_FILE = '/Users/Pharrell_WANG/PycharmProjects/tf_dp/data/z_partial_16.csv'
def comma_remover(INPUT_FILE, OUTPUT_FILE):
with open(INPUT_FILE, 'r') as r, \
open(OUTPUT_FILE, 'w') as w:
cnt = 0
for num, line in enumerate(r):
cnt += 1
if num >= 0:
newline = line[:-2] + "\n" if "\n" in line else line[:-1]
else:
newline = line
w.write(newline)
# print("total lines : " + str(cnt)) | [
"[email protected]"
]
| |
d2a3619d1a99b718458ffed7e6bdd3f373536969 | 04eaab6d9a6707b950d7ec4688707a883a009889 | /where/cleaners/__init__.py | d225d68de08cd820429206b62eea119429a5ee10 | [
"MIT"
]
| permissive | skjaeve/where | 3eae1036419e5f9c6b824b5f9b1dcedbe9d4da93 | 690558f64d54ce46c55a0bc3ef26f6fd992a3737 | refs/heads/master | 2020-04-05T03:35:01.737430 | 2018-11-28T11:04:59 | 2018-11-28T11:04:59 | 156,520,078 | 0 | 0 | null | 2018-11-07T09:13:35 | 2018-11-07T09:13:35 | null | UTF-8 | Python | false | false | 454 | py | """Framework for cleaning data
Description:
------------
Each data cleaner should be defined in a one of two directories:
+ `editors` - Editors can add new fields to the dataset.
+ `removers` - These cleaners only remove observations.
"""
# Make the apply-functions in subpackages available
from where.cleaners.editors import apply_editors # noqa
from where.cleaners.removers import apply_removers # noqa
# Do not support * imports
__all__ = []
| [
"[email protected]"
]
| |
ed5f6cde139950405c6ec1728493c26afb9a6799 | 9531e597cd3f865cc6b6f780498a18281c2413f8 | /comments/models.py | 956bf210ee9ab176d9e93f98dac9fd3202ac60d4 | []
| no_license | dpitkevics/DevNet | 7133b80ce5d56b9c11aa4c500d530faed7cb13f4 | 98ebc3916346e6c2bda79711a3896f7c2a8e2ac8 | refs/heads/master | 2020-04-15T12:04:00.245848 | 2015-09-14T17:45:39 | 2015-09-14T17:45:39 | 41,320,800 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 570 | py | from django.db import models
from django.contrib.auth.models import User
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from model_utils.models import TimeStampedModel
class Comment(TimeStampedModel):
user = models.ForeignKey(User)
parent_comment = models.ForeignKey('Comment')
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
comment_text = models.TextField()
| [
"[email protected]"
]
| |
1ec7d95d1793fcef3900410021a4866f130286d4 | 9e715dea01dc637ed91cde345df8ae81267f60a9 | /webapp/apps/taxbrain/migrations/0069_auto_20150314_2139.py | ffaec203e84da216bcbc53279e1dc8272924d4d0 | [
"MIT"
]
| permissive | kdd0211/webapp-public | f08b76201a6a59116bcfdc382ba995a46dd629cd | bcf94d5d6458ac5c6e89d0cf33d7fed06c85030d | refs/heads/master | 2021-01-16T21:07:44.059049 | 2016-01-14T05:09:50 | 2016-01-14T05:09:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,078 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('taxbrain', '0068_auto_20150314_2137'),
]
operations = [
migrations.RenameField(
model_name='taxsaveinputs',
old_name='long_rate_one',
new_name='_CG_rt1',
),
migrations.RenameField(
model_name='taxsaveinputs',
old_name='long_threshold_one_single',
new_name='_CG_thd1_0',
),
migrations.RenameField(
model_name='taxsaveinputs',
old_name='long_threshold_one_jointly',
new_name='_CG_thd1_1',
),
migrations.RenameField(
model_name='taxsaveinputs',
old_name='long_threshold_one_head',
new_name='_CG_thd1_2',
),
migrations.RenameField(
model_name='taxsaveinputs',
old_name='long_threshold_one_separately',
new_name='_CG_thd1_3',
),
]
| [
"[email protected]"
]
| |
91bf99a2e6bbb2f2dbb60eb172f61a1ec01f2632 | a5a7a70348420b5815d4a49d74aa42e4ca41b4ba | /SAN/lib/utils/box_utils.py | ab81dc6b92ea5b149614c3bfed69a410239b17bb | [
"MIT"
]
| permissive | 738654805/landmark-detection | 18f8692b0f81bb4198cb6a5baca42a3f9ec89e59 | 70f647752147592fd5f62f99e64c685a6cf45b4a | refs/heads/master | 2020-06-06T10:53:50.751520 | 2019-06-13T08:41:15 | 2019-06-13T08:41:15 | 192,720,661 | 1 | 0 | MIT | 2019-06-19T11:39:25 | 2019-06-19T11:39:25 | null | UTF-8 | Python | false | false | 1,836 | py | ##############################################################
### Copyright (c) 2018-present, Xuanyi Dong ###
### Style Aggregated Network for Facial Landmark Detection ###
### Computer Vision and Pattern Recognition, 2018 ###
##############################################################
import numpy as np
def bboxcheck_TLBR(bbox):
'''
check the input bounding box to be TLBR format
parameter:
bbox: N x 4 numpy array, TLBR format
return:
True or False
'''
OK1 = isinstance(bbox, np.ndarray) and bbox.shape[1] == 4 and bbox.shape[0] > 0
OK2 = (bbox[:, 3] >= bbox[:, 1]).all() and (bbox[:, 2] >= bbox[:, 0]).all()
return OK1 and OK2
def bbox2center(bbox):
'''
convert a bounding box to a point, which is the center of this bounding box
parameter:
bbox: N x 4 numpy array, TLBR format
return:
center: 2 x N numpy array, x and y correspond to first and second row respectively
'''
assert bboxcheck_TLBR(bbox), 'the input bounding box should be TLBR format'
num_bbox = bbox.shape[0]
center = np.zeros((num_bbox, 2), dtype='float32')
center[:, 0] = (bbox[:, 0] + bbox[:, 2]) / 2.
center[:, 1] = (bbox[:, 1] + bbox[:, 3]) / 2.
return np.transpose(center)
def bbox_TLBR2TLWH(bbox):
'''
transform the input bounding box with TLBR format to TLWH format
parameter:
bbox: N X 4 numpy array, TLBR format
return
bbox: N X 4 numpy array, TLWH format
'''
assert bboxcheck_TLBR(bbox), 'the input bounding box should be TLBR format'
bbox_TLWH = np.zeros_like(bbox)
bbox_TLWH[:, 0] = bbox[:, 0]
bbox_TLWH[:, 1] = bbox[:, 1]
bbox_TLWH[:, 2] = bbox[:, 2] - bbox[:, 0]
bbox_TLWH[:, 3] = bbox[:, 3] - bbox[:, 1]
return bbox_TLWH
| [
"[email protected]"
]
| |
dcf6bb640f9751ecee9553d97d552f8c75126a42 | 5d6ef1469740109d732441e88aed91890f2b8361 | /accounts/views.py | 9758580ce50b736a4f2a87156cc2586f2a14e758 | []
| no_license | boiyelove/workflow | 1ce88ee830fe4536db4423962296557629d81a7e | 273c168f0a0979f29f5154d3c67337091e0fe4b3 | refs/heads/master | 2023-07-17T16:40:05.178225 | 2020-08-08T16:59:06 | 2020-08-08T16:59:06 | 172,429,593 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,354 | py | import datetime
from django.shortcuts import render, get_object_or_404
from django.contrib.auth import logout
from django.views.generic import TemplateView, ListView
from django.views.generic.base import View
from django.views.generic.edit import FormView, UpdateView, CreateView, DeleteView
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.core.exceptions import PermissionDenied
from django.utils.decorators import method_decorator
from django.contrib import messages
from django.http import HttpResponseRedirect, Http404
from django.urls import reverse_lazy
from .forms import LoginForm, RegisterForm, UserProfileForm, DonateMethodForm, PasswordRequestForm, PasswordChangeForm
from .models import UserProfile, UserToken, DonateMethod, EmailVerification
from django.contrib.auth.mixins import LoginRequiredMixin
class FormLink:
def __init__(self, text, url):
self.name = text
self.url = url
def __str__(self):
return self.name
# Create your views here.
class LoginRqMixin(LoginRequiredMixin):
login_url = reverse_lazy('accounts:login')
redirect_field_name = 'rdr_to'
class AlreadyLoginedIn:
def dispatch(self, *args, **kwargs):
if self.request.user.is_authenticated:
return HttpResponseRedirect(reverse_lazy('accounts:dashboard'))
else:
return super(AlreadyLoginedIn, self).dispatch(*args, **kwargs)
class MustBeProfiled:
def dispatch(self, *args, **kwargs):
dm = DonateMethod.objects.filter(user = self.request.user)
up = UserProfile.objects.get(user=self.request.user)
if not dm:
messages.add_message(self.request, messages.INFO, 'Please add a way to recieve payment before you continue')
return HttpResponseRedirect(reverse_lazy('accounts:donatemethod-create'))
elif not up.phone_number or not up.full_name:
messages.add_message(self.request, messages.INFO, 'Please complete your profile information before you continue')
return HttpResponseRedirect(reverse_lazy('accounts:userprofile'))
else:
return super(MustBeProfiled, self).dispatch(*args, **kwargs)
class LoginView(AlreadyLoginedIn, FormView):
form_class = LoginForm
template_name = 'accounts/form.html'
success_url = reverse_lazy('accounts:dashboard')
def get_context_data(self, *args, **kwargs):
formlinks = [FormLink("I don't have a revenupa account", reverse_lazy('accounts:register')),
]
context = super(LoginView, self).get_context_data(*args, **kwargs)
context.update({'page_title' : 'Login',
'form_title': 'Login',
"form_action": reverse_lazy('accounts:login'),
"form_method": "post",
"form_value": "Take me to my account",
'form_cancel': FormLink('I forgot my password', reverse_lazy('accounts:password-request')),
'form_links': formlinks,
})
return context
def form_valid(self, form):
form.login_user(self.request)
messages.success(self.request, 'You are now logged in')
return super(LoginView, self).form_valid(form)
class LogoutView(LoginRqMixin, View):
def get(self, request, *args, **kwargs):
logout(request)
messages.success(request, 'You are now logged out')
return HttpResponseRedirect('/')
class RegisterView(FormView):
form_class = RegisterForm
template_name = 'accounts/form.html'
success_url = reverse_lazy('accounts:login')
# def dispatch(self, *args, **kwargs):
# if request.is_authenticated:
# return HttpResponseRedirect(reverse_lazy('accounts:dashboard'))
# super(RegisterView, self)
def get_context_data(self, *args, **kwargs):
formlinks = [FormLink('Take me to revenupa.org', reverse_lazy('webcore:home-page')),]
context = super(RegisterView, self).get_context_data(*args, **kwargs)
context.update({'page_title' : 'Register New Account',
'form_title' : 'Create New Account',
'form_method': 'POST',
'form_value': 'Create my revenupa account',
'form_action': reverse_lazy('accounts:register'),
'form_cancel': FormLink("I already have a revenupa account", reverse_lazy('accounts:login')),
'form_links': formlinks,
})
return context
def form_valid(self, form):
referral = self.request.COOKIES.get('referral_id')
form.register_user(referral)
messages.add_message(self.request, messages.SUCCESS, 'Registration successful')
messages.add_message(self.request, messages.INFO, 'An email has been sent to your email address, you can check your spam folder or wait a few minute to receive it')
return super(RegisterView, self).form_valid(form)
class DashboardView(LoginRqMixin, TemplateView):
template_name = "accounts/dashboard.html"
def get_context_data(self, **kwargs):
context = super(DashboardView, self).get_context_data(**kwargs)
context['page_title'] = "Dashboard"
context['balance'] = UserToken.objects.get(user = self.request.user)
profile = UserProfile.objects.get(user = self.request.user)
context['programs'] = profile.programs.all()
return context
class UserProfileView(LoginRqMixin, UpdateView):
form_class = UserProfileForm
success_url = reverse_lazy('accounts:userprofile')
template_name = 'accounts/form.html'
def get_context_data(self, *args, **kwargs):
new_context = {'page_title' : 'My Profile',
'form_title': 'Profile',
"form_action": reverse_lazy('accounts:userprofile'),
"form_method": "post",
"form_value": "Update Profile",
'error_message': "Please check the details you provided",
}
context = super(UserProfileView, self).get_context_data(*args, **kwargs)
context.update(new_context)
return context
def get_object(self, *args, **kwargs):
instance, created = UserProfile.objects.get_or_create(user = self.request.user)
return instance
class DonateMethodListView(LoginRqMixin, ListView):
template_name = 'donatemethod_list.html'
context_object_name = 'donatemethodlist'
def get_queryset(self, *args, **kwwargs):
return DonateMethod.objects.filter(user = self.request.user)
def get_context_data(self, *args, **kwargs):
new_context = {'page_title' : 'My Donation Profiles',
}
context = super(DonateMethodListView, self).get_context_data(*args, **kwargs)
context.update(new_context)
return context
class DonateMethodCreateView(LoginRqMixin, FormView):
form_class = DonateMethodForm
template_name = "accounts/form.html"
success_url = reverse_lazy('accounts:donatemethod-list')
def form_valid(self, form):
form.fineshed(self.request.user)
return super(DonateMethodCreateView, self).form_valid(form)
def get_context_data(self, *args, **kwargs):
new_context = {'page_title' : 'New Donation Information',
'form_title': 'New Donation Information',
"form_action": reverse_lazy('accounts:donatemethod-create'),
"form_method": "post",
"form_value": "Add This To My Payment Details",
'error_message': "Please check the details you provided",
}
context = super(DonateMethodCreateView, self).get_context_data(*args, **kwargs)
context.update(new_context)
return context
class DonateMethodUpdateView(LoginRqMixin, UpdateView):
form_class = DonateMethodForm
template_name = "accounts/form.html"
success_url = reverse_lazy('accounts:donatemethod-list')
def get_object(self, queryset=None):
pk = self.kwargs.pop('pk')
dm = get_object_or_404(DonateMethod, id=pk)
if dm.user == self.request.user:
return dm
else:
raise PermissionDenied
def get_context_data(self, *args, **kwargs):
new_context = {'page_title' : 'Update Donation Information',
'form_title': 'New Donation Information',
"form_action": '.',
"form_method": "post",
"form_value": "Update This Donation Information",
'error_message': "Please check the details you provided",
}
context = super(DonateMethodUpdateView, self).get_context_data(*args, **kwargs)
context.update(new_context)
return context
class DonateMethodDeleteView(LoginRqMixin, DeleteView):
model = DonateMethod
success_url = success_url = reverse_lazy('accounts:donatemethod-list')
template_name = "accounts/form.html"
def get_object(self, queryset=None):
obj = super(DonateMethodDeleteView, self).get_object()
if obj.user == self.request.user:
return obj
else:
raise PermissionDenied
def get_context_data(self, *args, **kwargs):
new_context = {'page_title' : 'Delete Donation Information',
'form_title': 'Are you sure you want to delete this?',
"form_action": '.',
"form_method": "post",
"form_value": "Yes, I'm sure. Delete this donation information",
}
context = super(DonateMethodDeleteView, self).get_context_data(*args, **kwargs)
context.update(new_context)
return context
class GetRef(AlreadyLoginedIn, View):
def get(self, request, *args, **kwargs):
response = HttpResponseRedirect(reverse_lazy('accounts:register'), request)
try:
uname = kwargs.pop('username')
user = User.objects.get(username = uname)
response.set_cookie('referral_id', user, expires=datetime.date.today() + datetime.timedelta(days=360))
messages.add_message(request, messages.SUCCESS, 'We commend {} for telling you about us. You are welcome'.format(uname))
except:
messages.add_message(request, messages.ERROR, 'Sorry, no user with that username')
return response
class EmailVerificationView(View):
def get(self, request, *args, **kwargs):
kw = kwargs.pop('verification_key')
try:
emver = EmailVerification.objects.get(slug = kw)
emver.confirmed = True
messages.add_message(request, messages.SUCCESS, 'Your email has been confirmed successfully')
if emver.actiontype == 'USER':
try:
user = User.objects.get(email = emver.email)
if not user.is_active:
user.is_active = True
user.save()
messages.add_message(request, messages.SUCCESS, 'Your account has been activated successfully')
except:
pass
return HttpResponseRedirect(emver.action, request)
except:
raise Http404
class PasswordChangeRequestView(FormView):
form_class = PasswordRequestForm
template_name = "accounts/form.html"
success_url = reverse_lazy('accounts:dashboard')
def dispatch(self, request, *args, **kwargs):
if request.user.is_authenticated:
return HttpResponseRedirect(reverse_lazy('accounts:password-change'))
else:
return super(PasswordChangeRequestView, self).dispatch(request, *args, **kwargs)
def form_valid(self, form):
form.done()
messages.add_message(self.request, messages.SUCCESS, 'An email containing your password was sent to your email address')
return super(PasswordChangeRequestView, self).form_valid(form)
def get_context_data(self, *args, **kwargs):
formlinks = [FormLink('Take me to revenupa.org', reverse_lazy('webcore:home-page')),]
new_context = {'page_title' : 'Password Recovery Request',
'form_title': 'Password Recovery Request',
"form_action": reverse_lazy('accounts:password-request'),
"form_method": "post",
"form_value": "Reset my password",
'error_message': "Please check the details you provided",
'form_cancel': FormLink("I remember my password", reverse_lazy('accounts:login')),
'form_links': formlinks,
}
context = super(PasswordChangeRequestView, self).get_context_data(*args, **kwargs)
context.update(new_context)
return context
class PasswordChangeView(LoginRqMixin, FormView):
form_class = PasswordChangeForm
template_name = "accounts/form.html"
success_url = reverse_lazy('accounts:dashboard')
def form_valid(self, form):
form.done()
'An email containing your password was sent to your email address'
messages.add_message(self.request, messages.SUCCESS, 'Your password as been changed and emailed to you')
return super(PasswordChangeView, self).form_valid(form)
def get_form_kwargs(self):
kwargs = super(PasswordChangeView, self).get_form_kwargs()
kwargs['request'] = self.request
print('when through here')
return kwargs
def get_context_data(self, *args, **kwargs):
formlinks = [FormLink('Take me to my profile', reverse_lazy('accounts:userprofile')),]
new_context = {'page_title' : 'Password Change Form',
'form_title': 'Password Change Form',
"form_action": reverse_lazy('accounts:password-change'),
"form_method": "post",
"form_value": "Change my password",
'error_message': "Please check the details you provided",
'form_cancel': FormLink("Nah, take me to dashboard", reverse_lazy('accounts:dashboard')),
'form_links': formlinks,
}
context = super(PasswordChangeView, self).get_context_data(*args, **kwargs)
context.update(new_context)
return context
| [
"[email protected]"
]
| |
8989148a1e906ae9fa35e8e5f99f07891fdd0d91 | 17e9441138f8ad09eab3d017c0fa13fa27951589 | /blog17-networkx/test02.py | 837cd862a077033de44e123fefe0dbd0a98117bc | []
| no_license | My-lsh/Python-for-Data-Mining | 159a09e76b35efd46ca3e32ad6dd2174847d5ec4 | f2dd0b8f3c4f5f51a10613dff99041bca4fd64c5 | refs/heads/master | 2023-03-26T08:48:32.088713 | 2021-03-25T14:57:07 | 2021-03-25T14:57:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,187 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Nov 02 10:33:58 2017
@author: eastmount CSDN 杨秀璋
"""
import pandas as pd
import numpy as np
import codecs
import networkx as nx
import matplotlib.pyplot as plt
""" 第一步:读取数据并获取姓名 """
data = pd.read_csv("data.csv",encoding ="gb2312") #中文乱码
print data[:4]
print data[u'姓名'] #获取某一列数据
print type(data[u'姓名'])
name = []
for n in data[u'姓名']:
name.append(n)
print name[0]
""" 第二步:计算共现矩阵 定义函数实现 """
a = np.zeros([2,3])
print a
print len(name)
word_vector = np.zeros([len(name),len(name)]) #共现矩阵
#1.计算学院共线矩阵
i = 0
while i<len(name): #len(name)
academy1 = data[u'学院'][i]
j = i + 1
while j<len(name):
academy2 = data[u'学院'][j]
if academy1==academy2: #学院相同
word_vector[i][j] += 1
word_vector[j][i] += 1
j = j + 1
i = i + 1
print word_vector
np_data = np.array(word_vector) #矩阵写入文件
pd_data = pd.DataFrame(np_data)
pd_data.to_csv('result.csv')
#2.计算大数据金融班级共线矩阵
#3.计算性别共线矩阵
#4.计算宿舍楼层共线矩阵
"""
i = 0
while i<len(name): #len(name)
academy1 = data[u'宿舍楼层'][i]
j = i + 1
while j<len(name):
academy2 = data[u'宿舍楼层'][j]
if academy1==academy2: #相同
word_vector[i][j] += 1
word_vector[j][i] += 1
j = j + 1
i = i + 1
print word_vector
"""
""" 第三步:共现矩阵计算(学生1 学生2 共现词频)文件 """
words = codecs.open("word_node.txt", "a+", "utf-8")
i = 0
while i<len(name): #len(name)
student1 = name[i]
j = i + 1
while j<len(name):
student2 = name[j]
#判断学生是否共现 共现词频不为0则加入
if word_vector[i][j]>0:
words.write(student1 + " " + student2 + " "
+ str(word_vector[i][j]) + "\r\n")
j = j + 1
i = i + 1
words.close()
""" 第四步:图形生成 """
a = []
f = codecs.open('word_node.txt','r','utf-8')
line = f.readline()
print line
i = 0
A = []
B = []
while line!="":
a.append(line.split()) #保存文件是以空格分离的
print a[i][0],a[i][1]
A.append(a[i][0])
B.append(a[i][1])
i = i + 1
line = f.readline()
elem_dic = tuple(zip(A,B))
print type(elem_dic)
print list(elem_dic)
f.close()
import matplotlib
matplotlib.rcParams['font.sans-serif'] = ['SimHei']
matplotlib.rcParams['font.family']='sans-serif'
colors = ["red","green","blue","yellow"]
G = nx.Graph()
G.add_edges_from(list(elem_dic))
#nx.draw(G,with_labels=True,pos=nx.random_layout(G),font_size=12,node_size=2000,node_color=colors) #alpha=0.3
#pos=nx.spring_layout(G,iterations=50)
pos=nx.random_layout(G)
nx.draw_networkx_nodes(G, pos, alpha=0.2,node_size=1200,node_color=colors)
nx.draw_networkx_edges(G, pos, node_color='r', alpha=0.3) #style='dashed'
nx.draw_networkx_labels(G, pos, font_family='sans-serif', alpha=0.5) #font_size=5
plt.show()
| [
"[email protected]"
]
| |
458e17eed0bc39f02d890a755f9aa6207076f831 | 2a9a136296e3d2abebf3a3dbfbbb091076e9f15f | /env/Lib/site-packages/pip/_vendor/html5lib/treeadapters/sax.py | 59b0a8ff79ffd1467ad8d32e2074685db1ed7e20 | []
| no_license | Lisukod/planet-tracker | a865e3920b858000f5d3de3b11f49c3d158e0e97 | 6714e6332b1dbccf7a3d44430620f308c9560eaa | refs/heads/master | 2023-02-18T19:26:16.705182 | 2021-01-23T01:51:58 | 2021-01-23T01:51:58 | 328,032,670 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,697 | py | from __future__ import absolute_import, division, unicode_literals
from xml.sax.xmlreader import AttributesNSImpl
from ..constants import adjustForeignAttributes, unadjustForeignAttributes
prefix_mapping = {}
for prefix, localName, namespace in adjustForeignAttributes.values():
if prefix is not None:
prefix_mapping[prefix] = namespace
def to_sax(walker, handler):
"""Call SAX-like content handler based on treewalker walker
:arg walker: the treewalker to use to walk the tree to convert it
:arg handler: SAX handler to use
"""
handler.startDocument()
for prefix, namespace in prefix_mapping.items():
handler.startPrefixMapping(prefix, namespace)
for token in walker:
type = token["type"]
if type == "Doctype":
continue
elif type in ("StartTag", "EmptyTag"):
attrs = AttributesNSImpl(token["data"], unadjustForeignAttributes)
handler.startElementNS(
(token["namespace"], token["name"]), token["name"], attrs
)
if type == "EmptyTag":
handler.endElementNS(
(token["namespace"], token["name"]), token["name"]
)
elif type == "EndTag":
handler.endElementNS(
(token["namespace"], token["name"]), token["name"]
)
elif type in ("Characters", "SpaceCharacters"):
handler.characters(token["data"])
elif type == "Comment":
pass
else:
assert False, "Unknown token type"
for prefix, namespace in prefix_mapping.items():
handler.endPrefixMapping(prefix)
handler.endDocument()
| [
"[email protected]"
]
| |
45a165fafb8b93e28d4d46d0bc49be317be87a2e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02783/s004767524.py | 890487d6a124996654e2b4b0893b434c99a5cee2 | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 122 | py | H, A = (int(x) for x in input().split())
if A >= H:
print(1)
elif H%A == 0:
print(H//A)
else:
print(H//A + 1) | [
"[email protected]"
]
| |
4ed8aacb5d5e8e915a445cc8c33ffb7f42a8ec4c | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /Gauss_v45r8/Gen/DecFiles/options/11134020.py | d61de03b13fd229fd8d73ea102ddc4195d7175b6 | []
| no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 947 | py | # file /home/hep/ss4314/cmtuser/Gauss_v45r8/Gen/DecFiles/options/11134020.py generated: Fri, 27 Mar 2015 15:47:57
#
# Event Type: 11134020
#
# ASCII decay Descriptor: {[[B0]nos -> (J/psi(1S) -> p+ p~-) (rho(770)0 -> pi+ pi-)]cc, [[B0]os -> (J/psi(1S) -> p+ p~-) (rho(770)0 -> pi- pi+)]cc}
#
from Configurables import Generation
Generation().EventType = 11134020
Generation().SampleGenerationTool = "SignalRepeatedHadronization"
from Configurables import SignalRepeatedHadronization
Generation().addTool( SignalRepeatedHadronization )
Generation().SignalRepeatedHadronization.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Bd_Jpsirho0,pp=DecProdCut.dec"
Generation().SignalRepeatedHadronization.CutTool = "DaughtersInLHCb"
Generation().SignalRepeatedHadronization.SignalPIDList = [ 511,-511 ]
| [
"[email protected]"
]
| |
1d14a6463b2ceaf9f8bc13e5d1c1c6450675751c | 49b048b05330fcc7ebd1ea6d3b619085af46b433 | /exe01.py | 751128bf3dd018f9c1442b0a37477fe9a947ef8a | []
| no_license | andreplacet/reinforcement-tasks-python-strings | a26e2c8544a2dbb161ffd27c4f806398c2096b8f | 1ee8f16bbc97bca138feb41992205674a4e07a57 | refs/heads/master | 2023-01-08T23:09:40.872807 | 2020-11-06T17:54:51 | 2020-11-06T17:54:51 | 310,668,589 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 525 | py | # Exercicio 01
print('Comparador de strings')
string_1 = str(input('Digite uma frase: ')).strip().split()
print(f'String 1: {string_1}')
string_1 = ''.join(string_1)
string_2 = str(input('Digite uma frase: ')).strip().split()
print(f'String 2: {string_2 = }')
string_2 = ''.join(string_2)
print(f'Tamanho da String 1 :{len(string_1)}\n'
f'Tamanho da String 2: {len(string_2)}')
if string_1 == string_2:
print('As strings possuem o mesmo conteudo!')
else:
print('As strings não possuem o mesmo conteudo!')
| [
"[email protected]"
]
| |
71ef0ac38df7ff3711365479429d3a21f262af87 | 1b48b3980abbe11691310a7f35efef62bc0ae831 | /_msic/py/_fp/rxpy/test_rx.py | 7ae445bd4cd75655f4c4f14080afa7efe81709e5 | []
| no_license | FXTD-ODYSSEY/MayaScript | 7619b1ebbd664988a553167262c082cd01ab80d5 | 095d6587d6620469e0f1803d59a506682714da17 | refs/heads/master | 2022-11-05T08:37:16.417181 | 2022-10-31T11:50:26 | 2022-10-31T11:50:26 | 224,664,871 | 45 | 11 | null | null | null | null | UTF-8 | Python | false | false | 299 | py | from rx import Observable
from random import randint
three_emissions = Observable.range(1, 3)
(
three_emissions.map(lambda i: randint(1, 100000))
.subscribe(lambda i: print("Subscriber 1 Received: {0}".format(i)))
.subscribe(lambda i: print("Subscriber 2 Received: {0}".format(i)))
)
| [
"[email protected]"
]
| |
dc081a3bdcb41c1fec957a206f7cd2c2a8b97677 | 3f6c16ea158a8fb4318b8f069156f1c8d5cff576 | /.PyCharm2019.1/system/python_stubs/-1850396913/lxml/etree/_Comment.py | d1bc71dd97ac493d449fb08c86cc8fe73d2b8f6e | []
| no_license | sarthak-patidar/dotfiles | 08494170d2c0fedc0bbe719cc7c60263ce6fd095 | b62cd46f3491fd3f50c704f0255730af682d1f80 | refs/heads/master | 2020-06-28T23:42:17.236273 | 2019-10-01T13:56:27 | 2019-10-01T13:56:27 | 200,369,900 | 0 | 0 | null | 2019-08-03T12:56:33 | 2019-08-03T11:53:29 | Shell | UTF-8 | Python | false | false | 1,038 | py | # encoding: utf-8
# module lxml.etree
# from /var/www/newsbytes/CricketPlayerDataScrapper/venv/lib/python3.6/site-packages/lxml/etree.cpython-36m-x86_64-linux-gnu.so
# by generator 1.147
""" The ``lxml.etree`` module implements the extended ElementTree API for XML. """
# imports
import builtins as __builtins__ # <module 'builtins' (built-in)>
from .__ContentOnlyElement import __ContentOnlyElement
class _Comment(__ContentOnlyElement):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
tag = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__pyx_vtable__ = None # (!) real value is '<capsule object NULL at 0x7f578a4838d0>'
| [
"[email protected]"
]
| |
db860acf670514cdb3a4a8ac172160bfbafee046 | a8e3ddb269a8b959b3bce38e7b21aaa1a7e69dd4 | /tensorpack/trainv1/config.py | abc02ccaa6e417f64904e35095a3b993806a2fc4 | [
"Apache-2.0"
]
| permissive | myelintek/tensorpack | 55945c7ea9d661b31f28c83e5477870d2f3dac86 | fcbf5869d78cf7f3b59c46318b6c883a7ea12056 | refs/heads/master | 2018-10-25T05:50:15.302077 | 2018-04-09T03:24:27 | 2018-04-09T03:24:27 | 114,971,878 | 0 | 2 | Apache-2.0 | 2022-09-29T03:16:20 | 2017-12-21T06:39:29 | Python | UTF-8 | Python | false | false | 150 | py | # -*- coding: utf-8 -*-
# File: config.py
# Author: Yuxin Wu <[email protected]>
__all__ = ['TrainConfig']
from ..train.config import TrainConfig
| [
"[email protected]"
]
| |
a753e07e6d28f973304135b49936433f388cb925 | 5f300f54929f2acdb2ab3959006d152c775f1b58 | /src/Products/TemporaryFolder/mount.py | 7132798470fe9f57bc4701d01dd7f7e0595191c3 | [
"ZPL-2.1"
]
| permissive | plone-ve/Products.TemporaryFolder | a26a40d88dc65ee4bb04d740162dd68f0a1db2c0 | 26bd1c00503594e17722c7337c69d543f28fd14b | refs/heads/master | 2020-05-25T18:35:24.224661 | 2019-05-08T14:57:12 | 2019-05-08T14:57:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,908 | py | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Mounted database support
"""
import time
import threading
import logging
import persistent
from Acquisition import Implicit
from Acquisition import ImplicitAcquisitionWrapper
from Acquisition import aq_base
from ZODB.POSException import StorageError
logger = logging.getLogger('ZODB.Mount')
# dbs is a holder for all DB objects, needed to overcome
# threading issues. It maps connection params to a DB object
# and a mapping of mount points.
dbs = {}
# dblock is locked every time dbs is accessed.
dblock = threading._allocate_lock()
class MountedStorageError(StorageError):
"""Unable to access mounted storage."""
def parentClassFactory(jar, module, name):
# Use the class factory from the parent database.
parent_conn = getattr(jar, '_mount_parent_jar', None)
parent_db = getattr(parent_conn, '_db', None)
if parent_db is None:
_globals = {}
_silly = ('__doc__',)
return getattr(__import__(
module, _globals, _globals, _silly), name)
else:
return parent_db.classFactory(parent_conn, module, name)
class MountPoint(persistent.Persistent, Implicit):
'''The base class for a Zope object which, when traversed,
accesses a different database.
'''
# Default values for non-persistent variables.
_v_db = None
_v_data = None
_v_connect_error = None
def __init__(self, path, params=None, classDefsFromRoot=None):
'''
@arg path The path within the mounted database from which
to derive the root.
@arg params The parameters used to connect to the database.
No particular format required.
If there is more than one mount point referring to a
database, MountPoint will detect the matching params
and use the existing database. Include the class name of
the storage. For example,
ZEO params might be "ZODB.ZEOClient localhost 1081".
'''
# The only reason we need a __mountpoint_id is to
# be sure we don't close a database prematurely when
# it is mounted more than once and one of the points
# is unmounted.
self.__mountpoint_id = '%s_%f' % (id(self), time.time())
if params is None:
# We still need something to use as a hash in
# the "dbs" dictionary.
params = self.__mountpoint_id
self._params = repr(params)
self._path = path
def _createDB(self):
'''Gets the database object, usually by creating a Storage object
and returning ZODB.DB(storage).
'''
raise NotImplementedError
def _getDB(self):
'''Creates or opens a DB object.
'''
newMount = 0
with dblock:
params = self._params
dbInfo = dbs.get(params, None)
if dbInfo is None:
logger.info('Opening database for mounting: %s', params)
db = self._createDB()
newMount = 1
dbs[params] = (db, {self.__mountpoint_id: 1})
else:
db, mounts = dbInfo
# Be sure this object is in the list of mount points.
if self.__mountpoint_id not in mounts:
newMount = 1
mounts[self.__mountpoint_id] = 1
self._v_db = db
return db, newMount
def _getMountpointId(self):
return self.__mountpoint_id
def _getMountParams(self):
return self._params
def __repr__(self):
return "%s(%s, %s)" % (self.__class__.__name__, repr(self._path),
self._params)
def _openMountableConnection(self, parent):
# Opens a new connection to the database.
db = self._v_db
if db is None:
self._v_close_db = 0
db, newMount = self._getDB()
else:
newMount = 0
jar = getattr(self, '_p_jar', None)
if jar is None:
# Get _p_jar from parent.
self._p_jar = jar = parent._p_jar
conn = db.open()
# Add an attribute to the connection which
# makes it possible for us to find the primary
# database connection. See ClassFactoryForMount().
conn._mount_parent_jar = jar
mcc = MountedConnectionCloser(self, conn)
jar.onCloseCallback(mcc)
return conn, newMount, mcc
def _getObjectFromConnection(self, conn):
obj = self._getMountRoot(conn.root())
data = aq_base(obj)
# Store the data object in a tuple to hide from acquisition.
self._v_data = (data,)
return data
def _getOrOpenObject(self, parent):
t = self._v_data
if t is None:
self._v_connect_error = None
conn = None
newMount = 0
mcc = None
try:
conn, newMount, mcc = self._openMountableConnection(parent)
data = self._getObjectFromConnection(conn)
except Exception:
# Possibly broken database.
if mcc is not None:
# Note that the next line may be a little rash--
# if, for example, a working database throws an
# exception rather than wait for a new connection,
# this will likely cause the database to be closed
# prematurely. Perhaps DB.py needs a
# countActiveConnections() method.
mcc.setCloseDb()
logger.warning('Failed to mount database. %s (%s)',
exc_info=True)
raise
if newMount:
try:
id = data.getId()
except Exception:
id = '???' # data has no getId() method. Bad.
p = '/'.join(parent.getPhysicalPath() + (id,))
logger.info('Mounted database %s at %s',
self._getMountParams(), p)
else:
data = t[0]
return data.__of__(parent)
def __of__(self, parent):
# Accesses the database, returning an acquisition
# wrapper around the connected object rather than around self.
try:
return self._getOrOpenObject(parent)
except Exception:
return ImplicitAcquisitionWrapper(self, parent)
def _test(self, parent):
'''Tests the database connection.
'''
self._getOrOpenObject(parent)
return 1
def _getMountRoot(self, root):
'''Gets the object to be mounted.
Can be overridden to provide different behavior.
'''
try:
app = root['Application']
except Exception:
raise MountedStorageError(
"No 'Application' object exists in the mountable database.")
try:
return app.unrestrictedTraverse(self._path)
except Exception:
raise MountedStorageError(
"The path '%s' was not found in the mountable database."
% self._path)
class MountedConnectionCloser(object):
'''Closes the connection used by the mounted database
while performing other cleanup.
'''
close_db = 0
def __init__(self, mountpoint, conn):
# conn is the child connection.
self.mp = mountpoint
self.conn = conn
def setCloseDb(self):
self.close_db = 1
def __call__(self):
# The onCloseCallback handler.
# Closes a single connection to the database
# and possibly the database itself.
conn = self.conn
close_db = 0
if conn is not None:
mp = self.mp
# Remove potential circular references.
self.conn = None
self.mp = None
# Detect whether we should close the database.
close_db = self.close_db
t = mp.__dict__.get('_v_data', None)
if t is not None:
del mp.__dict__['_v_data']
data = t[0]
if not close_db and data.__dict__.get(
'_v__object_deleted__', 0):
# This mount point has been deleted.
del data.__dict__['_v__object_deleted__']
close_db = 1
# Close the child connection.
try:
del conn._mount_parent_jar
except Exception:
pass
conn.close()
if close_db:
# Stop using this database. Close it if no other
# MountPoint is using it.
with dblock:
params = mp._getMountParams()
mp._v_db = None
if params in dbs:
dbInfo = dbs[params]
db, mounts = dbInfo
try:
del mounts[mp._getMountpointId()]
except Exception:
pass
if len(mounts) < 1:
# No more mount points are using this database.
del dbs[params]
db.close()
logger.info('Closed database: %s', params)
| [
"[email protected]"
]
| |
9488d6f82af89e6350f8e311867f201ac9056640 | 06d882216885b4cc82ef131afc27baa8a797537a | /food_api/zomato_api/restaurant_url.py | f3399ee659e6f39d9d23973d5b8cccebc3ea0faa | []
| no_license | bopopescu/restaurant_data_crawler | 7de91844ae51b71b1c64af57cf82067f28996940 | dd14839cabd114ab22c86eff15428143a310da5f | refs/heads/master | 2022-11-06T21:52:22.941089 | 2017-10-09T12:10:41 | 2017-10-09T12:10:41 | 282,031,811 | 0 | 0 | null | 2020-07-23T18:54:44 | 2020-07-23T18:54:43 | null | UTF-8 | Python | false | false | 3,633 | py | from bs4 import BeautifulSoup
from urllib2 import Request, urlopen, URLError
import re
from errorhandler import typec
from re import search
from re import sub
import json
def crawlRestaurants(restaurant_url):
try:
menu_url = []
restaurant_menu_url_with_unicode = restaurant_url + "/menu#food"
restaurant_menu_url_with_unicode = restaurant_menu_url_with_unicode.replace(unichr(233),'e')
restaurant_menu_url = sub(r"[^\x00-\x7F]+","",restaurant_menu_url_with_unicode)
try:
response = urlopen(restaurant_menu_url)
html = response.read()
# print html
rest_soup = BeautifulSoup(html)
for javascript_code in rest_soup.find_all("script",{"type":"text/javascript"}):
text = javascript_code.text
pat = "zomato.menuPages"
index = text.find(pat)
if index >= 0:
menu_items = search("zomato.menuPages = (.+?);",text).group(1)
menu_dict = json.loads(menu_items)
for urls in menu_dict:
menu_url.append(str(urls['url']))
return menu_url
except URLError as error:
print restaurant_menu_url
return restaurantsDB
except URLError as error:
print error
# <<<<<<< HEAD
# def crawlRestaurants(city_name,locality_name):
# try:
# restaurantsDB = []
# searchUrl = "https://www.zomato.com/" + city_name + "/" + locality_name.replace(" ","-").lower() + "-restaurants"
# response = urlopen(searchUrl)
# html = response.read()
# soup = BeautifulSoup(html)
# # Extracting no. of pages
# for pages in soup.find("div",{"class":"col-l-3 mtop0 alpha tmargin pagination-number"}):
# text = pages.text
# tokens = text.split(" ")
# flag = 0
# page_no = 1
# for token in tokens:
# if token.isdigit():
# if flag == 1:
# page_no = int(token) + 1
# flag = 1
# # Crawling on each page of restaurant locality
# for page in range(1,page_no):
# searchUrl = "https://www.zomato.com/" + city_name + "/" + locality_name.replace(" ","-").lower() + "-restaurants?page="+str(page)
# response = urlopen(searchUrl)
# html = response.read()
# soup = BeautifulSoup(html)
# for rest_div in soup.find_all("li",{"class":"resZS mbot0 pbot0 bb even status1"}) + soup.find_all("li",{"class":"resZS mbot0 pbot0 bb even near status1"}):
# restDB = {}
# restDB['id'] = rest_div['data-res_id']
# rest_url_a = rest_div.find("a",{"class":"result-title"})
# rest_url = rest_url_a["href"]
# rest_url = rest_url.replace(unichr(233),'e')
# rest_url = sub(r"[^\x00-\x7F]+","",rest_url)
# restDB['url'] = str(rest_url)
# restaurant_menu_url_with_unicode = restDB['url'] + "/menu#food"
# restaurant_menu_url_with_unicode = restaurant_menu_url_with_unicode.replace(unichr(233),'e')
# restaurant_menu_url = sub(r"[^\x00-\x7F]+","",restaurant_menu_url_with_unicode)
# try:
# response = urlopen(restaurant_menu_url)
# html = response.read()
# # print html
# rest_soup = BeautifulSoup(html)
# for javascript_code in rest_soup.find_all("script",{"type":"text/javascript"}):
# text = javascript_code.text
# pat = "zomato.menuPages"
# index = text.find(pat)
# if index >= 0:
# menu_items = search("zomato.menuPages = (.+?);",text).group(1)
# menu_dict = json.loads(menu_items)
# menu_url = []
# for urls in menu_dict:
# menu_url.append(str(urls['url']))
# restDB['menu'] = menu_url
# restaurantsDB.append(restDB)
# except URLError as error:
# print restaurant_menu_url
# return restaurantsDB
# except URLError as error:
# print error
# print crawlRestaurants(city_name,locality_name)
# =======
| [
"[email protected]"
]
| |
77007c1c919ffc67963fee14634b26ee9856e131 | a1bffb2795728a6369c4447ca58e9a60620a1e7d | /intro/matplotlib/examples/plot_aliased.py | 91281736e7c3d601518f28e84fe5b8b6f7ae0e36 | [
"CC-BY-4.0",
"CC-BY-3.0"
]
| permissive | imieza/scipy-lecture-notes | 03a4e0615f4fc4fdea3583d9557742fc1798ba65 | 74c8b7b491ceae0ce5be1745497b7adc0bad1406 | refs/heads/master | 2021-01-16T20:30:57.735341 | 2015-09-21T17:28:35 | 2015-09-21T17:28:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 493 | py | """
Aliased versus anti-aliased
=============================
This example demonstrates aliased versus anti-aliased text.
"""
import pylab as pl
size = 128, 16
dpi = 72.0
figsize= size[0] / float(dpi), size[1] / float(dpi)
fig = pl.figure(figsize=figsize, dpi=dpi)
fig.patch.set_alpha(0)
pl.axes([0, 0, 1, 1], frameon=False)
pl.rcParams['text.antialiased'] = False
pl.text(0.5, 0.5, "Aliased", ha='center', va='center')
pl.xlim(0, 1)
pl.ylim(0, 1)
pl.xticks(())
pl.yticks(())
pl.show()
| [
"[email protected]"
]
| |
bde24f32962bc7daa1d653fc2bfc6b034f25a563 | 4f972877da14226125440b3da9bdb058764d8a54 | /pythonDemo/argparseOpt/add_arg.py | e43e60028c712b282fd0fa4373dee4ad04ff9d48 | []
| no_license | ZhiYinZhang/study | 16c29990cb371e7e278c437aa0abc7c348614063 | 8c085310b4f65e36f2d84d0acda4ca257b7389af | refs/heads/master | 2021-07-09T16:05:02.925343 | 2020-06-30T07:53:05 | 2020-06-30T07:53:05 | 153,767,096 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,403 | py | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# datetime:2020/5/18 14:51
import argparse
if __name__=="__main__":
parser=argparse.ArgumentParser(description="在参数帮助文档之前显示的文本")
#参数需要使用--b或-b
parser.add_argument("--by","-b", #参数变量,‘-’表示缩写
action="store", #将命令行参数与action相关联,默认store:存储参数的值
type=str, #可以指定数据类型
help="b help", #帮助信息
const="1111", #给了-b/--b,但是-b后面没接参数值时,默认的值
default="2222", #没给-b/--b时,默认值,结合nargs使用
nargs="?", #"?"表示消耗一个参数,没有命令行参数,会使用default
required=False, #该参数是否可选,为True表示必须
dest="bb", #parse_args()返回的属性名,默认是和参数变量一样:by or b
metavar="3333", #参数示例
choices=["1111","2222","3333"] #参数范围
)
#按位置的参数,add_arg.py -b 1 2,a=2
parser.add_argument("a",type=int,help="a help",default=2)
# parser.print_help()
args=parser.parse_args()
print(args) | [
"[email protected]"
]
| |
f04c44f3b9373ead505307d3d465a8862a926482 | d541422113225e372b2d645fb1e8731b24d12d75 | /hello_name.py | 86c938b89f7a20f62469916e2fcd9bdfa3724004 | []
| no_license | vdpham326/Python_Coding_Exercises | 8c1d5d4cd87f57b94600138649dc865dc9282be3 | 9efd64b24671df1c56ccfac50582d6fd71bc14fc | refs/heads/master | 2022-11-10T09:56:36.252673 | 2020-06-26T09:36:47 | 2020-06-26T09:36:47 | 274,311,562 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 212 | py | # Given a string name, e.g. "Bob", return a greeting of the form "Hello Bob!".
def hello_name(name):
return "Hello " + name + '!'
print(hello_name('Bob'))
print(hello_name('Alice'))
print(hello_name('X'))
| [
"[email protected]"
]
| |
d3f4f28a07d725a745058165f9fa71a5072d5e6b | c8b095adbbea29211d699f4113a91bc89fa54493 | /jury/models.py | d1b52c51d89d1786c9bd0a9c7582d0bfc7f37143 | [
"MIT"
]
| permissive | maribedran/speakerfight | 9e554e7ea557c5bc44aafb616e46f0878fe8e2d5 | 26e3e70e1d06ec0be004a9b1598c2b55f9823a7d | refs/heads/master | 2021-07-18T04:13:18.974661 | 2017-10-19T17:46:36 | 2017-10-19T17:46:36 | 106,606,011 | 2 | 0 | null | 2017-10-11T20:29:57 | 2017-10-11T20:29:55 | Python | UTF-8 | Python | false | false | 371 | py | from django.utils.translation import ugettext as _
from django.db import models
from django.conf import settings
class Jury(models.Model):
# relations
users = models.ManyToManyField(to=settings.AUTH_USER_MODEL,
related_name='juries')
class Meta:
verbose_name = _('Jury')
verbose_name_plural = _('Juries')
| [
"[email protected]"
]
| |
738f5c35424d9fc3c0c2579254d86e5fe343b5e4 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/6c38443a9c1f79ebf131d120be0f36ccfbe963c6-<main>-bug.py | 191038ac35ab613322148994a91dc9df1e97b7c1 | []
| no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 885 | py | def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(names={
'default': [],
'type': 'list',
}))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if (not HAS_BOTO):
module.fail_json(msg='boto required for this module')
try:
(region, ec2_url, aws_connect_params) = get_aws_connection_info(module)
if (not region):
module.fail_json(msg='region must be specified')
names = module.params['names']
elb_information = ElbInformation(module, names, region, **aws_connect_params)
ec2_facts_result = dict(changed=False, elbs=elb_information.list_elbs())
except BotoServerError as err:
module.fail_json(msg='{0}: {1}'.format(err.error_code, err.error_message), exception=traceback.format_exc())
module.exit_json(**ec2_facts_result) | [
"[email protected]"
]
| |
327cab1f61b7fc63a691fa1106537977cd19c625 | e273ac58c34f6a0fba8360aef75f52a7ef03d5bb | /ansiblemetrics/playbook/num_unique_names.py | 86e83089e546f11c6247dd51d9b902c4e8b68bfe | [
"Apache-2.0"
]
| permissive | valeriapontillo/radon-ansible-metrics | e25b6c848fd40eb4b5802f540a6fd1ad20a77ce4 | 8a8e27d9b54fc1578d00526c8663184a2e686cb2 | refs/heads/master | 2023-09-06T06:21:43.417616 | 2021-11-04T14:28:04 | 2021-11-04T14:28:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,760 | py | import re
from collections import Counter
from ansiblemetrics.ansible_metric import AnsibleMetric
from ansiblemetrics.utils import key_value_list
class NumUniqueNames(AnsibleMetric):
""" This class measures the number of plays and tasks with unique a name.
"""
def count(self):
"""Return the number of plays and tasks with a unique name.
Example
-------
.. highlight:: python
.. code-block:: python
from ansiblemetrics.general.num_unique_names import NumUniqueNames
playbook = '''
---
- name: demo the logic # unique name
hosts: localhost
gather_facts: false
vars:
num1: 10
num3: 10
tasks:
- name: logic and comparison # duplicate
debug:
msg: "Can you read me?"
when: num1 >= num3 and num1 is even and num2 is not defined
- name: logic and comparison # duplicate
debug:
msg: "Can you read me again?"
when: num3 >= num1
'''
NumUniqueNames(playbook).count()
>> 1
Returns
-------
int
number of plays and tasks with a unique name
"""
names = []
for item in key_value_list(self.playbook): # [(key, value)]
if item[0] == 'name':
item = re.sub(r'\s+', '', str(item[1]))
names.append(item.strip())
frequencies = Counter(names).values() # counts the elements' frequency
unique = sum(1 for v in frequencies if v == 1)
return unique
| [
"[email protected]"
]
| |
4b07fb305ff28845f8393a96c6bf69fafb122469 | d5b526977adfce0ac21588598a61d0fabbd6ed48 | /tkinterBasic.py | 555ee324d4a6d9c89c44116dd5132240a671f8bd | [
"MIT"
]
| permissive | anishmo99/Python-Functionality | b001cdd40828fc0b6879d34ad057dab3524de933 | 91e963609b0ce600d0c46073748611ecbab61dae | refs/heads/master | 2022-12-15T18:46:47.534669 | 2020-09-10T07:14:13 | 2020-09-10T07:14:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 63 | py | import tkinter as tk
m=tk.Tk()
m.title('hi anish')
m.mainloop() | [
"[email protected]"
]
| |
a45a07dd66cbbfa57b6a3b8f8445747b4300de28 | 1d9e681b204e6ec2d7a710ef45b7dec082239491 | /venv/Lib/site-packages/od_python/models/inline_response_200_33.py | 2f87d5fa2b2a17141b43a2b9c133a4e168221558 | []
| no_license | 1chimaruGin/DotaAnalysis | 0e0b85805cc83e4cc491d46f7eadc014e8d6b1f1 | 6a74cde2ee400fc0dc96305203d60c5e56d7ecff | refs/heads/master | 2020-07-21T20:48:07.589295 | 2019-09-07T12:20:15 | 2019-09-07T12:20:15 | 206,972,180 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,902 | py | # coding: utf-8
"""
OpenDota API
# Introduction The OpenDota API provides Dota 2 related data including advanced match data extracted from match replays. Please keep request rate to approximately 1/s. **Begining 4/22/2018, the OpenDota API will be limited to 50,000 free calls per month.** We'll be offering a Premium Tier with unlimited API calls and higher rate limits. Check out the [API page](https://www.opendota.com/api-keys) to learn more.
OpenAPI spec version: 17.6.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class InlineResponse20033(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'table_name': 'str',
'column_name': 'str',
'data_type': 'str'
}
attribute_map = {
'table_name': 'table_name',
'column_name': 'column_name',
'data_type': 'data_type'
}
def __init__(self, table_name=None, column_name=None, data_type=None):
"""
InlineResponse20033 - a model defined in Swagger
"""
self._table_name = None
self._column_name = None
self._data_type = None
if table_name is not None:
self.table_name = table_name
if column_name is not None:
self.column_name = column_name
if data_type is not None:
self.data_type = data_type
@property
def table_name(self):
"""
Gets the table_name of this InlineResponse20033.
table_name
:return: The table_name of this InlineResponse20033.
:rtype: str
"""
return self._table_name
@table_name.setter
def table_name(self, table_name):
"""
Sets the table_name of this InlineResponse20033.
table_name
:param table_name: The table_name of this InlineResponse20033.
:type: str
"""
self._table_name = table_name
@property
def column_name(self):
"""
Gets the column_name of this InlineResponse20033.
column_name
:return: The column_name of this InlineResponse20033.
:rtype: str
"""
return self._column_name
@column_name.setter
def column_name(self, column_name):
"""
Sets the column_name of this InlineResponse20033.
column_name
:param column_name: The column_name of this InlineResponse20033.
:type: str
"""
self._column_name = column_name
@property
def data_type(self):
"""
Gets the data_type of this InlineResponse20033.
data_type
:return: The data_type of this InlineResponse20033.
:rtype: str
"""
return self._data_type
@data_type.setter
def data_type(self, data_type):
"""
Sets the data_type of this InlineResponse20033.
data_type
:param data_type: The data_type of this InlineResponse20033.
:type: str
"""
self._data_type = data_type
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, InlineResponse20033):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"[email protected]"
]
| |
fca0f3066fbd42e823817547f2c01715501558b4 | 046d9405f06774ab8d52cd356b137007e6c9599a | /python/python/老师笔记/python/day01_pm/day01/exercise/salary.py | 9b74a9a7b0e9d1efad83c45d995a6132447b113d | []
| no_license | jluocc/jluo2018 | 3a49d9884bd2bc2e02e0d6ef9f066269c2e430a8 | 30e39f93ef65810d39423a4b66dd253cdeaddae5 | refs/heads/master | 2020-04-07T15:24:07.850373 | 2019-01-06T06:05:19 | 2019-01-06T06:05:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 151 | py | # 2. 一个学生毕业薪资是10000元,
# 每年涨20%,问十年后它的薪资是多少?
# (要求打印出来)
print((1 + 0.2) ** 10 * 10000)
| [
"[email protected]"
]
| |
7a203d32c16d289fef8f26566ec33d36956c6123 | b11b16bf88d4d9be80986631ba161883cd9a28a4 | /examples/rc/packages/gnu.py | 1d096f2cec2adb7a006c51d7ab8534210bfb4da8 | [
"Apache-2.0"
]
| permissive | simone-campagna/zapper | 8ec11f68fdf6904cab3031789cd7553aa71f7869 | fee2aaddcb13f789768a30761670c8c142d2b54d | refs/heads/master | 2020-04-26T01:42:32.180173 | 2013-12-07T14:45:57 | 2013-12-07T14:45:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,317 | py | from zapper.package_file import *
gnu = Suite('gnu', NULL_VERSION)
gnu.add_conflicting_tag('compiler-suite')
for version in '4.1.2', '4.5.2', '4.7.0':
version_name = version.replace('.', '_')
gnu_version = Suite(version_name, NULL_VERSION, suite=gnu)
gnu_version.add_conflicting_tag('gnu-suite')
libfoo = PackageFamily('libfoo', 'library')
libfoo_0_5 = Package(libfoo, '0.5', suite=gnu_version)
libfoo_0_5.var_set("FOO_HOME", "/gnu-{0}/foo-0.5".format(version))
libfoo_0_5_3 = Package(libfoo, '0.5.3', suite=gnu_version)
libfoo_0_5_3.var_set("FOO_HOME", "/gnu-{0}/foo-0.5.3".format(version))
libbar = PackageFamily('libbar', 'library')
libbar_1_0_2 = Package(libbar, '1.0.2', suite=gnu_version)
libbar_1_0_2.var_set("BAR_HOME", "/gnu-{0}/bar-1.0.2".format(version))
baz = PackageFamily('baz', 'tool')
baz_1_1 = Package(baz, '1.1', suite=gnu_version)
baz_1_1.var_set("BAZ_HOME", "/gnu-{0}/baz-1.1".format(version))
baz_1_1.requires('libfoo', VERSION > '0.5')
baz_1_1.requires(libbar_1_0_2)
hello_world = PackageFamily("hello_world", 'application')
hello_world_0_0_1_beta = Package(hello_world, '0.0.1-beta', suite=gnu_version)
hello_world_0_0_1_beta.var_set("HELLO_WORLD_HOME", "/gnu-{0}/hello_world-0.0.1-beta".format(version))
| [
"[email protected]"
]
| |
80df44273e2f313dce7038b7329a31df34e2b601 | 7358fef64817a640f224f6a1b0ef22f7e4812d4b | /Materi/Materi 8 Fungsi/isGenap.py | 926f99fc36b94ff6225596af70dc71181e8fc136 | []
| no_license | bimarakajati/Dasar-Pemrograman | 8d4124701c61900c2cc41ec89be2b08c492c8541 | af5e7abf122b8b151625504ac6739ab98996fb7f | refs/heads/master | 2023-08-24T19:32:00.591820 | 2021-10-13T20:10:12 | 2021-10-13T20:10:12 | 302,336,101 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py | def is_Genap(i):
"""
diberikan suatu bilangan i dengan tipe
integer untuk mengecek apakah bilangan
tersebut bilangan genap atau bukan
"""
print('keterangan didalam fungsi is_Genap')
return i%2 == 0
is_Genap(4) | [
"[email protected]"
]
| |
d27c1611bd3737bd6b4d8b6aecbf4c536cec23b3 | 015098bcb0e7b5595337c1b3e702942ed5b01272 | /setup.py | 6036b2725a5bdc962a01ccd158e2d7961c4446af | []
| no_license | ASSNAKE/assnake-core-binning | f0e0676aa6bcdc4fc60fa98fcdb49d0f5fa960a7 | 7b7e539722e18226b1dd9cd23231a4fda4ba78c9 | refs/heads/master | 2022-12-25T18:39:22.303737 | 2020-10-09T09:53:07 | 2020-10-09T09:53:07 | 236,690,287 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | from setuptools import setup, find_packages
from setuptools.command.develop import develop
from setuptools.command.install import install
import os, shutil
setup(
name='assnake-core-binning',
version='0.0.1',
packages=find_packages(),
entry_points = {
'assnake.plugins': ['assnake-core-binning = assnake_core_binning.snake_module_setup:snake_module']
}
) | [
"[email protected]"
]
| |
b86275ae56f9d0014b5c3a45b2b8249d042a0397 | c74b29b68211a51d7283d57b24d7cf83422a8ceb | /historischekranten2folia.py | 49a1dadee9ba395be694155de271a6c80da1c684 | []
| no_license | proycon/nlpsandbox | 63359e7cdd709dd81d66aed9bf1437f8ecf706a0 | 22e5f85852b7b2a658c6b94c3dedd425a5d6396f | refs/heads/master | 2020-12-09T19:37:10.040962 | 2019-04-23T17:17:15 | 2019-04-23T17:17:15 | 2,347,265 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,678 | py | #!/usr/bin/env python3
import csv
import sys
from bs4 import BeautifulSoup
from pynlpl.formats import folia
for filename in sys.argv[1:]:
with open(filename, 'r',encoding='utf-8') as f:
reader = csv.DictReader(f, delimiter='\t', quotechar='"')
for row in reader:
docid = "historischekranten_" + row['id'] + '_' + row['article_id'] + '_' + row['paper_id']
print("Processing " + docid,file=sys.stderr)
doc = folia.Document(id=docid)
for key in ('id', 'article_id', 'article_title', 'paper_id', 'paper_title', 'date','article', 'err_text_type', 'colophon', 'colophon_text'):
doc.metadata[key] = row[key]
doc.declare(folia.Paragraph, "https://raw.githubusercontent.com/proycon/folia/master/setdefinitions/nederlab-historischekranten-par.ttl")
body = doc.append(folia.Text(doc, id=docid+".text"))
div = body.append(folia.Division, id=docid+".div")
if row['header'].strip():
head = div.append(folia.Head, BeautifulSoup(row['header'].strip(),'lxml').text, id=docid+".text.head")
if row['subheader'].strip():
div.append(folia.Paragraph, BeautifulSoup(row['subheader'].strip(), 'lxml').text, id=docid+".text.subheader", cls="subheader")
for i, partext in enumerate(row['article_text'].split('\n\n')):
partext = BeautifulSoup(partext.replace("=\n","").replace("\n"," "), "lxml").text.strip()
if partext:
paragraph = div.append(folia.Paragraph, partext, id=docid+".text.p." + str(i+1), cls="normal")
doc.save(docid + ".folia.xml")
| [
"[email protected]"
]
| |
b776e05c4aebbeae77ba412fb2ebf0fec81ef261 | d3aef2ce0ee88c92516e64018f6d9f880911438c | /demo/urls.py | 0137d9575b7afac8bf893f382ea0ac49ae67e9f8 | [
"BSD-3-Clause",
"BSD-2-Clause"
]
| permissive | Apkawa/django-material | 648451d28a21270ddff937abf92931592ab9a56e | 426e845ac27db0e1351bbb7f68377949581dfbd7 | refs/heads/master | 2021-01-15T17:51:49.304338 | 2016-02-26T10:34:15 | 2016-02-26T10:34:15 | 52,146,120 | 0 | 0 | null | 2016-02-20T09:29:25 | 2016-02-20T09:29:24 | null | UTF-8 | Python | false | false | 6,434 | py | from django.conf import settings
from django.conf.urls import include, url
from django.contrib import admin
from django.views import generic
from django.shortcuts import render
from formtools.wizard.views import SessionWizardView
from material.frontend import urls as frontend_urls
from . import forms, widget_forms
def index_view(request):
context = {
'login': forms.LoginForm(),
'registration': forms.RegistrationForm(),
'checkout': forms.CheckoutForm(),
'order': forms.OrderForm(),
'comment': forms.CommentForm(),
'bank': forms.BankForm(),
'hospital': forms.HospitalRegistrationForm(),
}
return render(request, 'index.html', context)
class Wizard(SessionWizardView):
form_list = [forms.WizardForm1, forms.WizardForm2]
def done(self, form_list, **kwargs):
return render(self.request, 'formtools/wizard/wizard_done.html', {
'form_data': [form.cleaned_data for form in form_list],
})
class WidgetFormView(generic.FormView):
template_name = 'widgets_demo.html'
def form_valid(self, form):
return self.render_to_response(
self.get_context_data(form=form))
urlpatterns = [
url(r'^$', index_view),
# demo
url(r'^demo/login/$', generic.FormView.as_view(
form_class=forms.LoginForm, success_url='/demo/login/', template_name="demo.html")),
url(r'^demo/registration/$', generic.FormView.as_view(
form_class=forms.RegistrationForm, success_url='/demo/registration/', template_name="demo.html")),
url(r'^demo/contact/$', generic.FormView.as_view(
form_class=forms.ContactForm, success_url='/demo/contact/', template_name="demo.html")),
url(r'^demo/order/$', generic.FormView.as_view(
form_class=forms.OrderForm, success_url='/demo/order/', template_name="demo.html")),
url(r'^demo/checkout/$', generic.FormView.as_view(
form_class=forms.CheckoutForm, success_url='/demo/checkout/', template_name="demo.html")),
url(r'^demo/comment/$', generic.FormView.as_view(
form_class=forms.CommentForm, success_url='/demo/comment/', template_name="demo.html")),
url(r'^demo/bank/$', generic.FormView.as_view(
form_class=forms.BankForm, success_url='/demo/bank/', template_name="demo.html")),
url(r'^demo/wizard/$', Wizard.as_view()),
url(r'^demo/hospital/$', generic.FormView.as_view(
form_class=forms.HospitalRegistrationForm, success_url='/demo/hospital/', template_name="demo.html")),
url(r'^foundation/basic/', generic.RedirectView.as_view(url='/?cache=no', permanent=False)),
# widget test
url(r'^demo/widget/boolean/$', WidgetFormView.as_view(form_class=widget_forms.BooleanFieldForm)),
url(r'^demo/widget/char/$', WidgetFormView.as_view(form_class=widget_forms.CharFieldForm)),
url(r'^demo/widget/choice/$', WidgetFormView.as_view(form_class=widget_forms.ChoiceFieldForm)),
url(r'^demo/widget/date/$', WidgetFormView.as_view(form_class=widget_forms.DateFieldForm)),
url(r'^demo/widget/datetime/$', WidgetFormView.as_view(form_class=widget_forms.DateTimeFieldForm)),
url(r'^demo/widget/decimal/$', WidgetFormView.as_view(form_class=widget_forms.DecimalFieldForm)),
url(r'^demo/widget/duration/$', WidgetFormView.as_view(form_class=widget_forms.DurationFieldForm)),
url(r'^demo/widget/email/$', WidgetFormView.as_view(form_class=widget_forms.EmailFieldForm)),
url(r'^demo/widget/file/$', WidgetFormView.as_view(form_class=widget_forms.FileFieldForm)),
url(r'^demo/widget/filepath/$', WidgetFormView.as_view(form_class=widget_forms.FilePathFieldForm)),
url(r'^demo/widget/float/$', WidgetFormView.as_view(form_class=widget_forms.FloatFieldForm)),
url(r'^demo/widget/image/$', WidgetFormView.as_view(form_class=widget_forms.ImageFieldForm)),
url(r'^demo/widget/integer/$', WidgetFormView.as_view(form_class=widget_forms.IntegerFieldForm)),
url(r'^demo/widget/ipaddress/$', WidgetFormView.as_view(form_class=widget_forms.GenericIPAddressFieldForm)),
url(r'^demo/widget/multiplechoice/$', WidgetFormView.as_view(form_class=widget_forms.MultipleChoiceFieldForm)),
url(r'^demo/widget/nullbolean/$', WidgetFormView.as_view(form_class=widget_forms.NullBooleanFieldForm)),
url(r'^demo/widget/regex/$', WidgetFormView.as_view(form_class=widget_forms.RegexFieldForm)),
url(r'^demo/widget/slug/$', WidgetFormView.as_view(form_class=widget_forms.SlugFieldForm)),
url(r'^demo/widget/time/$', WidgetFormView.as_view(form_class=widget_forms.TimeFieldForm)),
url(r'^demo/widget/url/$', WidgetFormView.as_view(form_class=widget_forms.URLFieldForm)),
url(r'^demo/widget/uuid/$', WidgetFormView.as_view(form_class=widget_forms.UUIDField)),
url(r'^demo/widget/combo/$', WidgetFormView.as_view(form_class=widget_forms.ComboFieldForm)),
url(r'^demo/widget/splitdatetime/$', WidgetFormView.as_view(form_class=widget_forms.SplitDateTimeFieldForm)),
url(r'^demo/widget/modelchoice/$', WidgetFormView.as_view(form_class=widget_forms.ModelChoiceFieldForm)),
url(r'^demo/widget/modelmultichoice/$', WidgetFormView.as_view(form_class=widget_forms.ModelMultipleChoiceFieldForm)),
url(r'^demo/widget/password/$', WidgetFormView.as_view(form_class=widget_forms.PasswordInputForm)),
url(r'^demo/widget/hidden/$', WidgetFormView.as_view(form_class=widget_forms.HiddenInputForm)),
url(r'^demo/widget/textarea/$', WidgetFormView.as_view(form_class=widget_forms.TextareaForm)),
url(r'^demo/widget/radioselect/$', WidgetFormView.as_view(form_class=widget_forms.RadioSelectForm)),
url(r'^demo/widget/checkboxmultiple/$', WidgetFormView.as_view(
form_class=widget_forms.CheckboxSelectMultipleForm)),
url(r'^demo/widget/fileinput/$', WidgetFormView.as_view(form_class=widget_forms.FileInputForm)),
url(r'^demo/widget/splithiddendatetime/$', WidgetFormView.as_view(
form_class=widget_forms.SplitHiddenDateTimeWidgetForm)),
url(r'^demo/widget/selectdate/$', WidgetFormView.as_view(form_class=widget_forms.SelectDateWidgetForm)),
# admin
url(r'^admin/', include(admin.site.urls)),
# frontend
url(r'^frontend/$', generic.RedirectView.as_view(url='/frontend/accounting/', permanent=False), name="index"),
url(r'', include(frontend_urls)),
]
if 'zinnia' in settings.INSTALLED_APPS:
urlpatterns += [url(r'^weblog/', include('zinnia.urls', namespace='zinnia'))]
| [
"[email protected]"
]
| |
8f6074736677b40ad3abc447c437659f71c7eb0f | fbf4f26a2b97d4fe35aa7b66e9cfed4cd0224e89 | /chlamdb/eutils/sequence_exact_match.py | 83e231040a3d492e29ca337400d07a339d4fe140 | []
| no_license | metagenlab/chlamdb | a100ab93407e15c33684b8d7175873adc6720d0b | f1829cf19ac1ded032d65689fbbff2d37489f739 | refs/heads/master | 2023-03-07T05:30:02.793914 | 2023-03-02T10:30:57 | 2023-03-02T10:30:57 | 179,291,344 | 6 | 1 | null | 2022-11-01T07:01:54 | 2019-04-03T13:02:40 | HTML | UTF-8 | Python | false | false | 4,816 | py | #!/usr/bin/env python
def process_tag(tag):
return tag.split('}')[-1]
def get_UPI(seq):
for element in seq:
if element.tag == '{http://model.picr.ebi.ac.uk}UPI':
return element.text
def get_hit_attributes(hit):
accession = ''
version = ''
taxon_id = ''
db_name = ''
for element in hit:
if element.tag == '{http://model.picr.ebi.ac.uk}accession':
accession = element.text
if element.tag == '{http://model.picr.ebi.ac.uk}accessionVersion':
version = element.text
if element.tag == '{http://model.picr.ebi.ac.uk}databaseName':
db_name = element.text
if element.tag == '{http://model.picr.ebi.ac.uk}taxonId':
taxon_id = element.text
return {"%s.%s" % (accession, version) : [db_name, taxon_id]}
def accession2exact_matches(sequence, target_databases):
'''
Givent an input AA sequence and target(s) database name(s), return:
- the uniparc accession of the sequence (if exists)
- a dictionary with accession(s) of identical sequence(s) and their taxon ID and source database.
(Accession.version keys)
Return None if no identical squence was found.
:param sequence: input AA sequence
:param target_databases: Input database name (see http://www.ebi.ac.uk/Tools/picr/)
'''
import urllib2
import xml.etree.cElementTree as ElementTree
database_string = '&database=' .join(target_databases)
link = "http://www.ebi.ac.uk/Tools/picr/rest/getUPIForSequence?sequence=%s&database=%s&includeattributes=true" % (sequence,
database_string)
print link
req = urllib2.Request(link)
try:
page = urllib2.urlopen(req)
tree = ElementTree.parse(page)
except:
import time
print 'connexion problem, trying again...'
time.sleep(60)
db2seq = {}
root = tree.getroot()
seq = root.find('{http://www.ebi.ac.uk/picr/AccessionMappingService}getUPIForSequenceReturn')
if seq is None:
return None
UPI = get_UPI(seq)
identical_seqs = seq.findall('{http://model.picr.ebi.ac.uk}identicalCrossReferences')
for seq in identical_seqs:
db2seq.update(get_hit_attributes(seq))
return UPI, db2seq
def fasta_corresp(fasta_file, target_database, n_keep=1):
from Bio import SeqIO
import sys
print 'keep', n_keep
with open(fasta_file, 'r') as f:
records = SeqIO.parse(f, 'fasta')
for record in records:
picr = accession2exact_matches(record.seq,
target_database)
if picr is None:
sys.stdout.write('%s\t%s\t%s\t%s\n' % (record.name, 'None', 'None', 'None'))
else:
uniparc_accession, matches = picr
database2count = {}
for accession in matches:
if matches[accession][0] not in database2count:
database2count[matches[accession][0]] = 1
else:
if database2count[matches[accession][0]] < n_keep:
database2count[matches[accession][0]] += 1
else:
break
sys.stdout.write('%s\t%s\t%s\t%s\n' % (record.name,
uniparc_accession,
accession,
matches[accession][1]))
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-p", '--protein_seq', type=str, help="Protein sequence")
parser.add_argument("-d", '--database', type=str, help="Target database(s): 'REFSEQ', 'TREMBL', ...", nargs='+', default= ['TREMBL', 'SWISSPROT'])
parser.add_argument("-f", '--fasta_file', type=str, help="Fasta file")
parser.add_argument("-k", '--keep', type=int, help="Number of hit(s) to keep (default: 1)", default=1)
args = parser.parse_args()
if args.protein_seq and args.fasta_file:
raise(IOError('Input either a fasta file or a protein seqience, not both!'))
elif args.protein_seq:
picr = accession2exact_matches(args.protein_seq,
args.database)
if picr is not None:
uniparc_accession, matches = picr
print uniparc_accession, matches
else:
if len(args.database) > 1:
raise(IOError('Fasta file match is only possible for a single database!'))
else:
fasta_corresp(args.fasta_file, args.database, n_keep=args.keep)
| [
"[email protected]"
]
| |
077d1f303b1d8e4453ccf710e00fdc43e75bd68c | 1f38f3cd0ba6d42dd73f273e3dc9df4ebdc0dc9d | /BuzzScoreSite/manage.py | 5463761455fffd46cd530a6382b889c5bc5c5ee1 | []
| no_license | MenshovSergey/BuzzScore | 2a5f8cfd9b46a85665455c2a5cfa298c9a3a698b | 348d1b2feb76a892e489016682f16e7a70a504a9 | refs/heads/master | 2021-01-16T19:49:40.996213 | 2013-10-12T09:43:46 | 2013-10-12T09:43:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "BuzzScoreSite.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"[email protected]"
]
| |
bca0bdb8b08da22691ffa39b5e1104087b4180c0 | 9b79dc0b4b2f13dea85a1d29177e5eb266b6e7f7 | /var/lib/python-support/python2.6/rdflib/store/REGEXMatching.py | b7500a712c707ce0fccfdef7843481cac2b31775 | []
| no_license | haniokasai/netwalker-rootfs | 0bc87efc0ae478338b6326fd9118befcbcc5cd06 | d08f7bf370a82b6970387bb9f165d374a9d9092b | refs/heads/master | 2021-01-10T11:04:34.436513 | 2016-01-12T06:09:50 | 2016-01-12T06:09:50 | 36,504,146 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 69 | py | /usr/share/python-support/python-rdflib/rdflib/store/REGEXMatching.py | [
"[email protected]"
]
| |
9fdf1fcd02e82e69e482cbdf80c02a24fcb02aef | 01200401ef046a917df1205268fa92f23cfd28d8 | /tests/test_histogram.py | f59279d6ed0771159428df32ce8d3a52d8e06d3d | [
"BSD-3-Clause"
]
| permissive | murodin/pyclesperanto_prototype | 5fa8922dcbbc98aa69e1aab779c62a326a6937d7 | 4687e3085a5f8bc12e798bf25acd295ee249fb5e | refs/heads/master | 2023-01-20T14:34:47.858014 | 2020-11-30T11:56:47 | 2020-11-30T11:56:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,534 | py | import pyclesperanto_prototype as cle
import numpy as np
def test_histogram():
test = cle.push_zyx(np.asarray([
[1, 2, 4, 4, 2, 3],
[3, 3, 4, 4, 5, 5]
]))
ref_histogram = [1, 2, 3, 4, 2]
my_histogram = cle.histogram(test, num_bins = 5)
print(my_histogram)
a = cle.pull(my_histogram)
assert (np.allclose(a, ref_histogram))
def test_histogram_3d():
test = cle.push_zyx(np.asarray([
[
[1, 2, 4, 4, 2, 3]
], [
[3, 3, 4, 4, 5, 5]
]
]))
ref_histogram = [1, 2, 3, 4, 2]
my_histogram = cle.histogram(test, num_bins = 5)
print(my_histogram)
a = cle.pull(my_histogram)
assert (np.allclose(a, ref_histogram))
def test_histogram_3d_2():
test = cle.push_zyx(np.asarray([
[
[1, 2, 4],
[4, 2, 3]
], [
[3, 3, 4],
[4, 5, 5]
]
]))
ref_histogram = [1, 2, 3, 4, 2]
my_histogram = cle.histogram(test, num_bins = 5)
print(my_histogram)
a = cle.pull(my_histogram)
assert (np.allclose(a, ref_histogram))
def test_histogram_against_scikit_image():
from skimage.data import camera
image = camera()
from skimage import exposure
hist, bc = exposure.histogram(image.ravel(), 256, source_range='image')
print(str(hist))
gpu_image = cle.push(image)
gpu_hist = cle.histogram(gpu_image, num_bins=256)
print(str(cle.pull_zyx(gpu_hist)))
assert (np.allclose(hist, cle.pull_zyx(gpu_hist)))
| [
"[email protected]"
]
| |
8e6a40aabb5d98acecdf713ba9a997923ae08b27 | 7bf617f77a55d8ec23fa8156c1380b563a5ac7f6 | /CG/SciPy/mm_color_cluster.py | c069d4d292db408ca47cdbeff36617ac590abb43 | []
| no_license | anyatran/school | c06da0e08b148e3d93aec0e76329579bddaa85d5 | 24bcfd75f4a6fe9595d790808f8fca4f9bf6c7ec | refs/heads/master | 2021-06-17T10:45:47.648361 | 2017-05-26T12:57:23 | 2017-05-26T12:57:23 | 92,509,148 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,066 | py | # Auto-clustering, suggested by Matt Terry
from skimage import io, color, exposure
from sklearn import cluster, preprocessing
import numpy as np
import matplotlib.pyplot as plt
url = 'http://blogs.mathworks.com/images/steve/2010/mms.jpg'
import os
if not os.path.exists('mm.png'):
print "Downloading M&M's..."
import urllib2
u = urllib2.urlopen(url)
f = open('mm.png', 'w')
f.write(u.read())
f.close()
print "Image I/O..."
mm = io.imread('mm.png')
mm_lab = color.rgb2lab(mm)
ab = mm_lab[..., 1:]
print "Mini-batch K-means..."
X = ab.reshape(-1, 2)
kmeans = cluster.MiniBatchKMeans(n_clusters=6)
y = kmeans.fit(X).labels_
labels = y.reshape(mm.shape[:2])
N = labels.max()
def no_ticks(ax):
ax.set_xticks([])
ax.set_yticks([])
# Display all clusters
for i in range(N):
mask = (labels == i)
mm_cluster = mm_lab.copy()
mm_cluster[..., 1:][~mask] = 0
ax = plt.subplot2grid((2, N), (1, i))
ax.imshow(color.lab2rgb(mm_cluster))
no_ticks(ax)
ax = plt.subplot2grid((2, N), (0, 0), colspan=2)
ax.imshow(mm)
no_ticks(ax)
# Display histogram
L, a, b = mm_lab.T
left, right = -100, 100
bins = np.arange(left, right)
H, x_edges, y_edges = np.histogram2d(a.flatten(), b.flatten(), bins,
normed=True)
ax = plt.subplot2grid((2, N), (0, 2))
H_bright = exposure.rescale_intensity(H, in_range=(0, 5e-4))
ax.imshow(H_bright,
extent=[left, right, right, left], cmap=plt.cm.gray)
ax.set_title('Histogram')
ax.set_xlabel('b')
ax.set_ylabel('a')
# Voronoi diagram
mid_bins = bins[:-1] + 0.5
L = len(mid_bins)
yy, xx = np.meshgrid(mid_bins, mid_bins)
Z = kmeans.predict(np.column_stack([xx.ravel(), yy.ravel()]))
Z = Z.reshape((L, L))
ax = plt.subplot2grid((2, N), (0, 3))
ax.imshow(Z, interpolation='nearest',
extent=[left, right, right, left],
cmap=plt.cm.Spectral, alpha=0.8)
ax.imshow(H_bright, alpha=0.2,
extent=[left, right, right, left],
cmap=plt.cm.gray)
ax.set_title('Clustered histogram')
no_ticks(ax)
plt.show()
| [
"[email protected]"
]
| |
e2fa124d83cd3c760b0eff2d53eef09fec49c3aa | 7e266469a84e06e3551a7ba0dca25e894f2f3111 | /Bloomy_Core_CreateQualityInspection_TestCase/test_createqualityinspection_testcase.py | 7f7b7aeeb1520353cfd2c98e5bd56a96ac51aa33 | []
| no_license | Bloomstack-Test-Automation/Bloomstack-Test-Automation | 43862b6761951effee5f17d7428f5be0c34b4499 | 2450df2018715cf6f0ec080ca1dc0751a230d969 | refs/heads/main | 2023-06-06T10:52:57.695175 | 2021-06-30T11:33:30 | 2021-06-30T11:33:30 | 368,438,826 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,647 | py | import time
from selenium.webdriver.common.by import By
from selenium.webdriver.support.select import Select
from src.testproject.classes import DriverStepSettings, StepSettings
from src.testproject.decorator import report_assertion_errors
from src.testproject.enums import SleepTimingType
from src.testproject.sdk.drivers import webdriver
import pytest
"""
This pytest test was automatically generated by TestProject
Project: Bloomy_Core
Package: TestProject.Generated.Tests.BloomyCore
Test: CreateQualityInspection_TestCase
Generated by: Rahul Prakash ([email protected])
Generated on 05/26/2021, 10:11:04
"""
@pytest.fixture()
def driver():
driver = webdriver.Chrome(token="5o-UXmLZug6gaKmDcoeI6tT7NM19XyG1qnolFybLul4",
project_name="Bloomy_Core",
job_name="CreateQualityInspection_TestCase")
step_settings = StepSettings(timeout=15000,
sleep_time=500,
sleep_timing_type=SleepTimingType.Before)
with DriverStepSettings(driver, step_settings):
yield driver
driver.quit()
@report_assertion_errors
def test_main(driver):
"""Generated By: Rahul."""
# Test Parameters
# Auto generated application URL parameter
ApplicationURL = "https://epitest-demo.bloomstack.io/"
# 1. Navigate to '{ApplicationURL}'
# Navigates the specified URL (Auto-generated)
driver.get(f'{ApplicationURL}')
# 2. Is 'Login' visible?
login = driver.find_element(By.XPATH,
"//a[. = 'Login']")
assert login.is_displayed()
# 3. Click 'Login'
login = driver.find_element(By.XPATH,
"//a[. = 'Login']")
login.click()
# 4. Click 'Email Address'
email_address = driver.find_element(By.CSS_SELECTOR,
"#login_email")
email_address.click()
# 5. Type '[email protected]' in 'Email Address'
email_address = driver.find_element(By.CSS_SELECTOR,
"#login_email")
email_address.send_keys("[email protected]")
# 6. Click 'Password'
password = driver.find_element(By.CSS_SELECTOR,
"#login_password")
password.click()
# 7. Type 'epi@123' in 'Password'
password = driver.find_element(By.CSS_SELECTOR,
"#login_password")
password.send_keys("epi@123")
# 8. Click 'Login1'
login1 = driver.find_element(By.XPATH,
"//button[. = '\n\t\t\t\tLogin']")
login1.click()
# 9. Click 'Search or type a command (Ctrl + G)'
search_or_type_a_command_ctrl_g_ = driver.find_element(By.CSS_SELECTOR,
"#navbar-search")
search_or_type_a_command_ctrl_g_.click()
# 10. Type 'quality ins' in 'Search or type a command (Ctrl + G)'
search_or_type_a_command_ctrl_g_ = driver.find_element(By.CSS_SELECTOR,
"#navbar-search")
search_or_type_a_command_ctrl_g_.send_keys("quality ins")
# 11. Click 'Quality Inspection List'
quality_inspection_list = driver.find_element(By.XPATH,
"//span[. = 'Quality Inspection List']")
quality_inspection_list.click()
# 12. Does 'Quality Inspection1' contain 'Quality Inspection'?
quality_inspection1 = driver.find_element(By.XPATH,
"//div[. = 'Quality Inspection']")
step_output = quality_inspection1.text
assert step_output and ("Quality Inspection" in step_output)
time.sleep(2)
# 13. Click 'New6'
new6 = driver.find_element(By.XPATH,
"//button[. = 'New']")
new6.click()
# 14. Is 'New Quality Inspection4' visible?
new_quality_inspection4 = driver.find_element(By.XPATH,
"//h4[. = 'New Quality Inspection']")
assert new_quality_inspection4.is_displayed()
# 15. Click 'SELECT19'
select19 = driver.find_element(By.XPATH,
"//div[3]/div/div[2]//select")
select19.click()
# 16. Select the 'Incoming' option in 'SELECT19'
select19 = driver.find_element(By.XPATH,
"//div[3]/div/div[2]//select")
Select(select19).select_by_value("Incoming")
# 17. Click 'SELECT19'
select19 = driver.find_element(By.XPATH,
"//div[3]/div/div[2]//select")
select19.click()
# 18. Click 'INPUT84'
input84 = driver.find_element(By.XPATH,
"//div[4]/div/div[2]//input")
input84.click()
# 19. Click 'P15'
p15 = driver.find_element(By.XPATH,
"//div/div/div/ul/li[1]/a/p")
p15.click()
# 20. Click 'INPUT12'
input12 = driver.find_element(By.XPATH,
"//div[5]/div/div[2]//input")
input12.click()
# 21. Type '3.00' in 'INPUT12'
input12 = driver.find_element(By.XPATH,
"//div[5]/div/div[2]//input")
input12.send_keys("3.00")
# 22. Click 'SELECT2'
select2 = driver.find_element(By.XPATH,
"//div[7]//select")
select2.click()
# 23. Select the 'Internal' option in 'SELECT2'
select2 = driver.find_element(By.XPATH,
"//div[7]//select")
Select(select2).select_by_value("Internal")
# 24. Click 'SELECT2'
select2 = driver.find_element(By.XPATH,
"//div[7]//select")
select2.click()
# 25. Click 'Save12'
save12 = driver.find_element(By.XPATH,
"//button[. = 'Save']")
save12.click()
# 26. Click 'Submit7'
submit7 = driver.find_element(By.XPATH,
"//button[. = 'Submit']")
submit7.click()
# 27. Click 'Settings1'
settings1 = driver.find_element(By.XPATH,
"//span[. = ' Settings']")
settings1.click()
# 28. Click 'Logout'
logout = driver.find_element(By.XPATH,
"//a[. = ' Logout']")
logout.click()
# 29. Does 'Login' contain 'Login'?
login = driver.find_element(By.XPATH,
"//a[. = 'Login']")
step_output = login.text
assert step_output and ("Login" in step_output)
| [
"[email protected]"
]
| |
b9ad5d3f538a6de721c9603acde868d0da3788d0 | bc167f434158921bcf2c678155c5cdfec1c9b0c9 | /PI_code/simulator/behaviourGeneration/firstGenScripts_preyHunter/behav388.py | 2b4f02b7722c2854864b1dddca35aacd975e7d93 | []
| no_license | s0217391/DifferentProjects | 6450efc89c64ecd21b86c705737e89e5c69433a6 | 7f4da153660817b6cbf72d2e823aa29c0c2f95a9 | refs/heads/master | 2021-01-17T02:58:46.219240 | 2015-05-26T22:45:46 | 2015-05-26T22:45:46 | 34,995,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 663 | py | #!/usr/bin/python
import sys
def compute(prey):
temp0 = prey[0] + prey[1]
if temp0 > prey[0]:
temp1 = min(temp0, temp0)
else:
if prey[1] != 0:
temp1 = prey[1] % prey[1]
else:
temp1 = prey[1]
temp0 = max(prey[0], temp0)
temp0 = temp0 - temp1
if prey[1] != 0:
temp2 = prey[0] / prey[1]
else:
temp2 = prey[1]
temp0 = prey[0] - prey[0]
temp1 = min(temp1, temp0)
temp0 = min(prey[1], prey[0])
temp3 = min(prey[0], temp0)
temp0 = min(prey[0], temp2)
temp0 = prey[1] + temp1
if prey[0] > temp1:
temp2 = prey[0] + temp0
else:
temp2 = -1 * prey[1]
if temp2 != 0:
temp2 = prey[0] / temp2
else:
temp2 = temp2
return [temp1, temp1]
| [
"[email protected]"
]
| |
721c16210b081c6ce406706a8bf7b814db33d02e | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/domain/AlipayDataAiserviceHellobikeSiteQueryModel.py | dc06ebbcb58713b563f9fd0994a388c9ab3da002 | [
"Apache-2.0"
]
| permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 896 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayDataAiserviceHellobikeSiteQueryModel(object):
def __init__(self):
self._plan_id = None
@property
def plan_id(self):
return self._plan_id
@plan_id.setter
def plan_id(self, value):
self._plan_id = value
def to_alipay_dict(self):
params = dict()
if self.plan_id:
if hasattr(self.plan_id, 'to_alipay_dict'):
params['plan_id'] = self.plan_id.to_alipay_dict()
else:
params['plan_id'] = self.plan_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayDataAiserviceHellobikeSiteQueryModel()
if 'plan_id' in d:
o.plan_id = d['plan_id']
return o
| [
"[email protected]"
]
| |
92aaa9f2c0851bde5ed7572fb8b8c62845c4c814 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /R4D59C9CQbJvqWaKd_6.py | ed52bb6e52badf15ab27956a07eb2844ef6a368d | []
| no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,240 | py | """
A baseball player's batting average is calculated by the following formula:
BA = (number of hits) / (number of official at-bats)
Batting averages are always expressed rounded to the nearest thousandth with
no leading zero. The top 3 MLB batting averages of all-time are:
1. Ty Cobb .366
2. Rogers Hornsby .358
3. Shoeless Joe Jackson .356
The given list represents a season of games. Each list item indicates a
player's `[hits, official at bats]` per game. Return a string with the
player's seasonal batting average rounded to the nearest thousandth.
### Examples
batting_avg([[0, 0], [1, 3], [2, 2], [0, 4], [1, 5]]) ➞ ".286"
batting_avg([[2, 5], [2, 3], [0, 3], [1, 5], [2, 4]]) ➞ ".350"
batting_avg([[2, 3], [1, 5], [2, 4], [1, 5], [0, 5]]) ➞ ".273"
### Notes
* The number of hits will not exceed the number of official at-bats.
* The list includes official at-bats only. No other plate-appearances (walks, hit-by-pitches, sacrifices, etc.) are included in the list.
* HINT: Think in terms of total hits and total at-bats.
"""
def batting_avg(lst):
x = str(round(sum(i[0] for i in lst)/sum(i[1] for i in lst),3))[1:]
if len(x) != 4:
x += '0'*(4 - len(x))
return x
| [
"[email protected]"
]
| |
861619f37d3f45ca55feb13d85f1c0ec4990fcef | 52a3beeb07ad326115084a47a9e698efbaec054b | /horizon/.venv/bin/pyscss | 80baac9d659e74f232c739e2139c1b9408819faa | [
"Apache-2.0"
]
| permissive | bopopescu/sample_scripts | 3dade0710ecdc8f9251dc60164747830f8de6877 | f9edce63c0a4d636f672702153662bd77bfd400d | refs/heads/master | 2022-11-17T19:19:34.210886 | 2018-06-11T04:14:27 | 2018-06-11T04:14:27 | 282,088,840 | 0 | 0 | null | 2020-07-24T00:57:31 | 2020-07-24T00:57:31 | null | UTF-8 | Python | false | false | 319 | #!/home/horizon/horizon/.venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pyScss==1.3.5','console_scripts','pyscss'
__requires__ = 'pyScss==1.3.5'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('pyScss==1.3.5', 'console_scripts', 'pyscss')()
)
| [
"[email protected]"
]
| ||
df91c9a9b9937a18b50fc7a7be16c73b905500d8 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_true.py | ada6ea76ff35da675f31a995503e77327d2954a1 | [
"MIT"
]
| permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 359 | py |
#calss header
class _TRUE():
def __init__(self,):
self.name = "TRUE"
self.definitions = [u'to not be in the correct position or to be slightly bent out of the correct shape: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"[email protected]"
]
| |
7d9b8ff5c86e7a469c4e54991a98f844dbd57066 | e4cab6feadcee618f092f23020a157c8ded42ffc | /Basics/Matrix/homework.py | 524046cb9ab52c37bb822c2aedc925bed9786d01 | []
| no_license | Larionov0/Group3_Lessons | 7c314898a70c61aa445db37383076e211692b56b | 628bc7efe6817d107cb39d3017cb7cee44b86ba4 | refs/heads/master | 2023-08-22T07:14:44.595963 | 2021-10-17T11:48:06 | 2021-10-17T11:48:06 | 339,141,183 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 241 | py | matrix = [
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]
]
while True:
i = int(input('enter number from 0 to 4: i = '))
j = int(input('enter number from 0 to 4: j = '))
print(matrix[i][j])
| [
"[email protected]"
]
| |
50db7d3cbbf9fa9c19ce0fb0431ea172406b3f3e | a9063fd669162d4ce0e1d6cd2e35974274851547 | /swagger_client/api/im_chat_api.py | 2f290030aa133f9b5950f05a99c87affd9397deb | []
| no_license | rootalley/py-zoom-api | 9d29a8c750e110f7bd9b65ff7301af27e8518a3d | bfebf3aa7b714dcac78be7c0affb9050bbce8641 | refs/heads/master | 2022-11-07T14:09:59.134600 | 2020-06-20T18:13:50 | 2020-06-20T18:13:50 | 273,760,906 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 28,116 | py | # coding: utf-8
"""
Zoom API
The Zoom API allows developers to safely and securely access information from Zoom. You can use this API to build private services or public applications on the [Zoom App Marketplace](http://marketplace.zoom.us). To learn how to get your credentials and create private/public applications, read our [Authorization Guide](https://marketplace.zoom.us/docs/guides/authorization/credentials). All endpoints are available via `https` and are located at `api.zoom.us/v2/`. For instance you can list all users on an account via `https://api.zoom.us/v2/users/`. # noqa: E501
OpenAPI spec version: 2.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class IMChatApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def im_chat_messages(self, session_id, _from, to, **kwargs): # noqa: E501
"""Retrieve IM Chat Messages # noqa: E501
Retrieve IM chat messages for a specified period of time. <aside>Note: This API only supports oauth2.</aside><br><br> **Scopes:** `imchat:read`<br> **[Rate Limit Label](https://marketplace.zoom.us/docs/api-reference/rate-limits#rate-limits):** `Medium`<br> # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.im_chat_messages(session_id, _from, to, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str session_id: IM chat session ID. (required)
:param date _from: Start date in 'yyyy-mm-dd' format. The date range defined by the \"from\" and \"to\" parameters should only be one month as the report includes only one month worth of data at once. (required)
:param date to: End date. (required)
:param int page_size: The number of records returned within a single API call.
:param str next_page_token: The next page token is used to paginate through large result sets. A next page token will be returned whenever the set of available results exceeds the current page size. The expiration period for this token is 15 minutes.
:return: InlineResponse20021
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.im_chat_messages_with_http_info(session_id, _from, to, **kwargs) # noqa: E501
else:
(data) = self.im_chat_messages_with_http_info(session_id, _from, to, **kwargs) # noqa: E501
return data
def im_chat_messages_with_http_info(self, session_id, _from, to, **kwargs): # noqa: E501
"""Retrieve IM Chat Messages # noqa: E501
Retrieve IM chat messages for a specified period of time. <aside>Note: This API only supports oauth2.</aside><br><br> **Scopes:** `imchat:read`<br> **[Rate Limit Label](https://marketplace.zoom.us/docs/api-reference/rate-limits#rate-limits):** `Medium`<br> # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.im_chat_messages_with_http_info(session_id, _from, to, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str session_id: IM chat session ID. (required)
:param date _from: Start date in 'yyyy-mm-dd' format. The date range defined by the \"from\" and \"to\" parameters should only be one month as the report includes only one month worth of data at once. (required)
:param date to: End date. (required)
:param int page_size: The number of records returned within a single API call.
:param str next_page_token: The next page token is used to paginate through large result sets. A next page token will be returned whenever the set of available results exceeds the current page size. The expiration period for this token is 15 minutes.
:return: InlineResponse20021
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['session_id', '_from', 'to', 'page_size', 'next_page_token'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method im_chat_messages" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'session_id' is set
if ('session_id' not in params or
params['session_id'] is None):
raise ValueError("Missing the required parameter `session_id` when calling `im_chat_messages`") # noqa: E501
# verify the required parameter '_from' is set
if ('_from' not in params or
params['_from'] is None):
raise ValueError("Missing the required parameter `_from` when calling `im_chat_messages`") # noqa: E501
# verify the required parameter 'to' is set
if ('to' not in params or
params['to'] is None):
raise ValueError("Missing the required parameter `to` when calling `im_chat_messages`") # noqa: E501
collection_formats = {}
path_params = {}
if 'session_id' in params:
path_params['sessionId'] = params['session_id'] # noqa: E501
query_params = []
if '_from' in params:
query_params.append(('from', params['_from'])) # noqa: E501
if 'to' in params:
query_params.append(('to', params['to'])) # noqa: E501
if 'page_size' in params:
query_params.append(('page_size', params['page_size'])) # noqa: E501
if 'next_page_token' in params:
query_params.append(('next_page_token', params['next_page_token'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth'] # noqa: E501
return self.api_client.call_api(
'/im/chat/sessions/{sessionId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse20021', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def im_chat_sessions(self, _from, to, **kwargs): # noqa: E501
"""Get IM Chat Sessions # noqa: E501
Retrieve IM Chat sessions for a specified period of time. <aside>Note: This API only supports Oauth2.</aside><br> **Scopes:** `imchat:read, imchat:read:admin`<br> **[Rate Limit Label](https://marketplace.zoom.us/docs/api-reference/rate-limits#rate-limits):** `Heavy` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.im_chat_sessions(_from, to, async_req=True)
>>> result = thread.get()
:param async_req bool
:param date _from: Start date in 'yyyy-mm-dd' format. The date range defined by the \"from\" and \"to\" parameters should only be one month as the report includes only one month worth of data at once. (required)
:param date to: End date. (required)
:param int page_size: The number of records returned within a single API call.
:param str next_page_token: The next page token is used to paginate through large result sets. A next page token will be returned whenever the set of available results exceeds the current page size. The expiration period for this token is 15 minutes.
:return: InlineResponse20020
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.im_chat_sessions_with_http_info(_from, to, **kwargs) # noqa: E501
else:
(data) = self.im_chat_sessions_with_http_info(_from, to, **kwargs) # noqa: E501
return data
def im_chat_sessions_with_http_info(self, _from, to, **kwargs): # noqa: E501
"""Get IM Chat Sessions # noqa: E501
Retrieve IM Chat sessions for a specified period of time. <aside>Note: This API only supports Oauth2.</aside><br> **Scopes:** `imchat:read, imchat:read:admin`<br> **[Rate Limit Label](https://marketplace.zoom.us/docs/api-reference/rate-limits#rate-limits):** `Heavy` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.im_chat_sessions_with_http_info(_from, to, async_req=True)
>>> result = thread.get()
:param async_req bool
:param date _from: Start date in 'yyyy-mm-dd' format. The date range defined by the \"from\" and \"to\" parameters should only be one month as the report includes only one month worth of data at once. (required)
:param date to: End date. (required)
:param int page_size: The number of records returned within a single API call.
:param str next_page_token: The next page token is used to paginate through large result sets. A next page token will be returned whenever the set of available results exceeds the current page size. The expiration period for this token is 15 minutes.
:return: InlineResponse20020
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['_from', 'to', 'page_size', 'next_page_token'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method im_chat_sessions" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter '_from' is set
if ('_from' not in params or
params['_from'] is None):
raise ValueError("Missing the required parameter `_from` when calling `im_chat_sessions`") # noqa: E501
# verify the required parameter 'to' is set
if ('to' not in params or
params['to'] is None):
raise ValueError("Missing the required parameter `to` when calling `im_chat_sessions`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if '_from' in params:
query_params.append(('from', params['_from'])) # noqa: E501
if 'to' in params:
query_params.append(('to', params['to'])) # noqa: E501
if 'page_size' in params:
query_params.append(('page_size', params['page_size'])) # noqa: E501
if 'next_page_token' in params:
query_params.append(('next_page_token', params['next_page_token'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth'] # noqa: E501
return self.api_client.call_api(
'/im/chat/sessions', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse20020', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def listimmessages(self, user_id, **kwargs): # noqa: E501
"""Get User’s IM Messages # noqa: E501
Get IM Chat messages for a specified period of time. <aside>Note: This API only supports Oauth2.</aside><br><br> **Scopes:** `imchat:read`<br> **[Rate Limit Label](https://marketplace.zoom.us/docs/api-reference/rate-limits#rate-limits):** `Medium`<br> # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.listimmessages(user_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str user_id: The user ID or email address. (required)
:param str chat_user: Chat user's ID or email address.
:param str channel: IM Channel's ID.
:param str _date: IM message's query date time, format as yyyy-MM-dd.
:param int page_size: The number of records returned within a single API call.
:param str next_page_token: The next page token is used to paginate through large result sets. A next page token will be returned whenever the set of available results exceeds the current page size. The expiration period for this token is 15 minutes.
:return: InlineResponse20060
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.listimmessages_with_http_info(user_id, **kwargs) # noqa: E501
else:
(data) = self.listimmessages_with_http_info(user_id, **kwargs) # noqa: E501
return data
def listimmessages_with_http_info(self, user_id, **kwargs): # noqa: E501
"""Get User’s IM Messages # noqa: E501
Get IM Chat messages for a specified period of time. <aside>Note: This API only supports Oauth2.</aside><br><br> **Scopes:** `imchat:read`<br> **[Rate Limit Label](https://marketplace.zoom.us/docs/api-reference/rate-limits#rate-limits):** `Medium`<br> # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.listimmessages_with_http_info(user_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str user_id: The user ID or email address. (required)
:param str chat_user: Chat user's ID or email address.
:param str channel: IM Channel's ID.
:param str _date: IM message's query date time, format as yyyy-MM-dd.
:param int page_size: The number of records returned within a single API call.
:param str next_page_token: The next page token is used to paginate through large result sets. A next page token will be returned whenever the set of available results exceeds the current page size. The expiration period for this token is 15 minutes.
:return: InlineResponse20060
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['user_id', 'chat_user', 'channel', '_date', 'page_size', 'next_page_token'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method listimmessages" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'user_id' is set
if ('user_id' not in params or
params['user_id'] is None):
raise ValueError("Missing the required parameter `user_id` when calling `listimmessages`") # noqa: E501
collection_formats = {}
path_params = {}
if 'user_id' in params:
path_params['userId'] = params['user_id'] # noqa: E501
query_params = []
if 'chat_user' in params:
query_params.append(('chat_user', params['chat_user'])) # noqa: E501
if 'channel' in params:
query_params.append(('channel', params['channel'])) # noqa: E501
if '_date' in params:
query_params.append(('date', params['_date'])) # noqa: E501
if 'page_size' in params:
query_params.append(('page_size', params['page_size'])) # noqa: E501
if 'next_page_token' in params:
query_params.append(('next_page_token', params['next_page_token'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth'] # noqa: E501
return self.api_client.call_api(
'/im/users/{userId}/chat/messages', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse20060', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def sendimmessages(self, **kwargs): # noqa: E501
"""Send IM messages # noqa: E501
Send chat message to a user. <aside>Note: This API only supports OAuth 2.0.</aside><br><br>**Scope:** `imchat:write` **[Rate Limit Label](https://marketplace.zoom.us/docs/api-reference/rate-limits#rate-limits):** `Medium` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.sendimmessages(async_req=True)
>>> result = thread.get()
:param async_req bool
:param Body117 body:
:param str chat_user: The email address (registered with Zoom) or the userId of the chat user.
:return: InlineResponse20122
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.sendimmessages_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.sendimmessages_with_http_info(**kwargs) # noqa: E501
return data
def sendimmessages_with_http_info(self, **kwargs): # noqa: E501
"""Send IM messages # noqa: E501
Send chat message to a user. <aside>Note: This API only supports OAuth 2.0.</aside><br><br>**Scope:** `imchat:write` **[Rate Limit Label](https://marketplace.zoom.us/docs/api-reference/rate-limits#rate-limits):** `Medium` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.sendimmessages_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param Body117 body:
:param str chat_user: The email address (registered with Zoom) or the userId of the chat user.
:return: InlineResponse20122
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'chat_user'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method sendimmessages" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'chat_user' in params:
query_params.append(('chat_user', params['chat_user'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
if 'message' in params:
form_params.append(('message', params['message'])) # noqa: E501
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth'] # noqa: E501
return self.api_client.call_api(
'/im/users/me/chat/messages', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse20122', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def sendimmessages(self, **kwargs): # noqa: E501
"""Send IM messages # noqa: E501
Send chat message to a user. <aside>Note: This API only supports OAuth 2.0.</aside><br><br>**Scope:** `imchat:write` **[Rate Limit Label](https://marketplace.zoom.us/docs/api-reference/rate-limits#rate-limits):** `Medium` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.sendimmessages(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str message:
:param str chat_user: The email address (registered with Zoom) or the userId of the chat user.
:return: InlineResponse20122
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.sendimmessages_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.sendimmessages_with_http_info(**kwargs) # noqa: E501
return data
def sendimmessages_with_http_info(self, **kwargs): # noqa: E501
"""Send IM messages # noqa: E501
Send chat message to a user. <aside>Note: This API only supports OAuth 2.0.</aside><br><br>**Scope:** `imchat:write` **[Rate Limit Label](https://marketplace.zoom.us/docs/api-reference/rate-limits#rate-limits):** `Medium` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.sendimmessages_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str message:
:param str chat_user: The email address (registered with Zoom) or the userId of the chat user.
:return: InlineResponse20122
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['message', 'chat_user'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method sendimmessages" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'chat_user' in params:
query_params.append(('chat_user', params['chat_user'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
if 'message' in params:
form_params.append(('message', params['message'])) # noqa: E501
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth'] # noqa: E501
return self.api_client.call_api(
'/im/users/me/chat/messages', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse20122', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| [
"[email protected]"
]
| |
4aa619c5f0da271cf82f1c1c1edb77fb610b3181 | 4b17d98ad2a3ef018cfb33f7f1d645ede72eb808 | /models.py | 317ff17f52dc2e3d03d3556e07facbc26924d19b | [
"MIT"
]
| permissive | poshan0126/Facial-Keypoint-Detection | 932ce0b85d7b1b0b893376537a5cf7c148704ee7 | fc52574b4c006e3afd86f209369e1a3e704a65fa | refs/heads/master | 2020-09-02T19:43:20.650541 | 2019-11-03T11:53:30 | 2019-11-03T11:53:30 | 219,292,492 | 0 | 0 | MIT | 2020-01-19T09:34:06 | 2019-11-03T11:47:10 | Jupyter Notebook | UTF-8 | Python | false | false | 3,030 | py | ## TODO: define the convolutional neural network architecture
num_output = 136 # As it's suggest final linear layer have to output 136 values, 2 for each of the 68 keypoint (x,y) pairs.
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
# can use the below import should you choose to initialize the weights of your Net
import torch.nn.init as I
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
## TODO: Define all the layers of this CNN, the only requirements are:
## 1. This network takes in a square (same width and height), grayscale image as input
## 2. It ends with a linear layer that represents the keypoints
## it's suggested that you make this last layer output 136 values, 2 for each of the 68 keypoint (x, y) pairs
# As an example, you've been given a convolutional layer, which you may (but don't have to) change:
## Note that among the layers to add, consider including:
# maxpooling layers, multiple conv layers, fully-connected layers, and other layers (such as dropout or batch normalization) to avoid overfitting
## Define layers of a CNN
## 1 input image channel (grayscale), 32 output channels/feature maps, 5x5 square convolution kernel
self.conv1 = nn.Conv2d(1, 32, kernel_size=(3,3), stride=1, padding=1)
self.conv2 = nn.Conv2d(32, 32, kernel_size=(3,3), stride=1, padding=1)
self.conv3 = nn.Conv2d(32, 64, kernel_size=(3,3), stride=1, padding=1)
self.conv4 = nn.Conv2d(64, 64, kernel_size=(3,3), stride=1, padding=1)
self.conv5 = nn.Conv2d(64, 128, kernel_size=(3,3), stride=1, padding=1)
self.pool = nn.MaxPool2d(kernel_size=(2,2), stride=2)
# Output of convulation layer would have height and width of 3 and depth of 128
self.fc1 = nn.Linear(28*28*128, num_output)
#self.fc2 = nn.Linear(10000, num_output)
self.dropout = nn.Dropout(0.5)
def forward(self, x):
#print("Enters Forward")
## TODO: Define the feedforward behavior of this model
## x is the input image and, as an example, here you may choose to include a pool/conv step:
## x = self.pool(F.relu(self.conv1(x)))
# a modified x, having gone through all the layers of your model, should be returned
x = F.relu(self.conv1(x))
x = self.pool(x)
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = self.pool(x)
x = F.relu(self.conv4(x))
x = F.relu(self.conv5(x))
x = self.pool(x)
# flatten the input image
#x = x.view(x.size(0), -1) same as x.view(-1, 28x28x128)
x = x.view(-1, 28*28*128)
# First hidden layer
x = F.relu(self.fc1(x))
x = self.dropout(x)
#x = self.fc2(x)
#print(x.shape)
#print("Forwarded")
return x
| [
"[email protected]"
]
| |
c11ee44633ac855b635d80da88d711d373e23c60 | 59886a1143cc4043b19e398fae1fddb5742b4b55 | /src/main/python/rlbot/agents/base_java_agent.py | 558144395485290f687591f9f3c43416c417fb28 | [
"MIT"
]
| permissive | RLBot/RLBot | a6c4f502403f02822b3e4078b27583226584432e | c2f7c9a07911691b112b5338008e2ec932e7aee0 | refs/heads/master | 2023-08-16T06:04:35.384448 | 2023-07-01T11:21:26 | 2023-07-01T11:21:26 | 80,671,678 | 482 | 138 | MIT | 2023-07-01T11:21:28 | 2017-02-01T22:36:52 | Python | UTF-8 | Python | false | false | 542 | py | from rlbot.agents.base_independent_agent import BaseIndependentAgent
class BaseJavaAgent(BaseIndependentAgent):
def __init__(self, name, team, index):
super().__init__(name, team, index)
raise NotImplementedError(
f"Cannot run {name} because BaseJavaAgent is deprecated! "
f"Please migrate to ExecutableWithSocketAgent! For more details see "
f"https://github.com/RLBot/RLBotJavaExample/wiki/Py4j-Deprecation")
def run_independently(self, terminate_request_event):
pass
| [
"[email protected]"
]
| |
795c71f40d7e4b7b4ba2a1d84f255eb7b5f64b2d | faa965776fb422437332440a169d9980437e4fce | /text/cleaners.py | b2c8c9d1e2e3a65a3eb3e110beec2fb2eb299138 | []
| no_license | IMLHF/lpc-tracotron | 752ac707568098c870bf5db107dc9d184a7f853d | 5994f84bf828afe11da845fb5153080f673a653e | refs/heads/master | 2020-07-02T16:50:18.803338 | 2019-09-03T03:20:41 | 2019-09-03T03:20:41 | 201,594,812 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,398 | py | '''
Cleaners are transformations that run over the input text at both training and eval time.
Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners"
hyperparameter. Some cleaners are English-specific. You'll typically want to use:
1. "english_cleaners" for English text
2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using
the Unidecode library (https://pypi.python.org/pypi/Unidecode)
3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update
the symbols in symbols.py to match your data).
'''
import re
from unidecode import unidecode
from .numbers import normalize_numbers
# Regular expression matching whitespace:
_whitespace_re = re.compile(r'\s+')
# List of (regular expression, replacement) pairs for abbreviations:
_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
('mrs', 'misess'),
('mr', 'mister'),
('dr', 'doctor'),
('st', 'saint'),
('co', 'company'),
('jr', 'junior'),
('maj', 'major'),
('gen', 'general'),
('drs', 'doctors'),
('rev', 'reverend'),
('lt', 'lieutenant'),
('hon', 'honorable'),
('sgt', 'sergeant'),
('capt', 'captain'),
('esq', 'esquire'),
('ltd', 'limited'),
('col', 'colonel'),
('ft', 'fort'),
]]
def expand_abbreviations(text):
for regex, replacement in _abbreviations:
text = re.sub(regex, replacement, text)
return text
def expand_numbers(text):
return normalize_numbers(text)
def lowercase(text):
return text.lower()
def collapse_whitespace(text):
return re.sub(_whitespace_re, ' ', text)
def convert_to_ascii(text):
return unidecode(text)
def basic_cleaners(text):
'''Basic pipeline that lowercases and collapses whitespace without transliteration.'''
# text = lowercase(text)
text = collapse_whitespace(text)
return text
def transliteration_cleaners(text):
'''Pipeline for non-English text that transliterates to ASCII.'''
#text = convert_to_ascii(text)
# text = lowercase(text)
text = collapse_whitespace(text)
return text
def english_cleaners(text):
'''Pipeline for English text, including number and abbreviation expansion.'''
#text = convert_to_ascii(text)
# text = lowercase(text)
#text = expand_numbers(text)
#text = expand_abbreviations(text)
text = collapse_whitespace(text)
return text
| [
"[email protected]"
]
| |
8365695a37b1717abeeb9271dabd58743f2349c9 | 414393a5048e5212223051d6a5541ecb873bcc53 | /cifar100_Resnet/main_half_clean_B3_20180911.py | 147079ab5dc22ce22a180aaa401fa66475b53c22 | []
| no_license | byh1321/CIFAR100_Distorted_Channel_Selective | 5a0fc1107ab9d60ce12504a8e474144762eda8df | 897f2dea4e645329dfc3bf3df6b147c783bfa83f | refs/heads/master | 2020-03-21T02:31:24.024771 | 2019-08-12T05:59:53 | 2019-08-12T05:59:53 | 138,002,631 | 0 | 0 | null | 2019-08-02T02:26:49 | 2018-06-20T08:26:51 | Python | UTF-8 | Python | false | false | 34,201 | py | """
some parts of code are extracted from "https://github.com/kuangliu/pytorch-cifar"
I modified some parts for our experiment
"""
from __future__ import print_function
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.datasets as dset
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from torch.autograd import Variable
from utils import progress_bar
import os
import argparse
#import VGG16
#import Resnet_vision as RS
import Resnet34 as RS2
import Resnet18 as RS
import cifar_dirty_test
import cifar_dirty_train
import struct
import random
parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')
parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')
parser.add_argument('--se', default=0, type=int, help='start epoch')
parser.add_argument('--ne', default=0, type=int, help='number of epoch')
parser.add_argument('--bs', default=128, type=int, help='batch size')
parser.add_argument('--mode', default=1, type=int, help='train or inference') #mode=1 is train, mode=0 is inference
parser.add_argument('--fixed', type=int, default=0, metavar='N',help='fixed=0 - floating point arithmetic')
parser.add_argument('--network', default='NULL', help='input network ckpt name', metavar="FILE")
parser.add_argument('--outputfile', default='garbage.txt', help='output file name', metavar="FILE")
args = parser.parse_args()
use_cuda = torch.cuda.is_available()
best_acc = 0 # best test accuracy
use_cuda = torch.cuda.is_available()
transform_train = transforms.Compose([transforms.RandomCrop(32,padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])])
transform_test = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])])
cifar_train = dset.CIFAR100("./", train=True, transform=transform_train, target_transform=None, download=True)
cifar_test = dset.CIFAR100("./", train=False, transform=transform_test, target_transform=None, download=True)
cifar_test_gaussian_025 = cifar_dirty_test.CIFAR100DIRTY_TEST("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.25_blur_0.0_test_targets.csv")
cifar_test_gaussian_016 = cifar_dirty_test.CIFAR100DIRTY_TEST("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.16_blur_0.0_test_targets.csv")
cifar_test_gaussian_008 = cifar_dirty_test.CIFAR100DIRTY_TEST("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.08_blur_0.0_test_targets.csv")
cifar_train_gaussian_025 = cifar_dirty_train.CIFAR100DIRTY_TRAIN("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.25_blur_0.0_train_targets.csv")
cifar_train_gaussian_016 = cifar_dirty_train.CIFAR100DIRTY_TRAIN("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.16_blur_0.0_train_targets.csv")
cifar_train_gaussian_008 = cifar_dirty_train.CIFAR100DIRTY_TRAIN("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.08_blur_0.0_train_targets.csv")
cifar_test_blur_10 = cifar_dirty_test.CIFAR100DIRTY_TEST("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.0_blur_1.0_test_targets.csv")
cifar_test_blur_09 = cifar_dirty_test.CIFAR100DIRTY_TEST("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.0_blur_0.9_test_targets.csv")
cifar_test_blur_08 = cifar_dirty_test.CIFAR100DIRTY_TEST("/home/yhbyun/A2S/cifar100_VGG16/cifar100_gaussian_0.0_blur_0.8_test_targets.csv")
cifar_test_blur_0675 = cifar_dirty_test.CIFAR100DIRTY_TEST("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.0_blur_0.675_test_targets.csv")
cifar_test_blur_06 = cifar_dirty_test.CIFAR100DIRTY_TEST("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.0_blur_0.6_test_targets.csv")
cifar_test_blur_05 = cifar_dirty_test.CIFAR100DIRTY_TEST("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.0_blur_0.5_test_targets.csv")
cifar_test_blur_045 = cifar_dirty_test.CIFAR100DIRTY_TEST("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.0_blur_0.45_test_targets.csv")
cifar_test_blur_04 = cifar_dirty_test.CIFAR100DIRTY_TEST("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.0_blur_0.4_test_targets.csv")
cifar_test_blur_03 = cifar_dirty_test.CIFAR100DIRTY_TEST("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.0_blur_0.3_test_targets.csv")
cifar_test_blur_066 = cifar_dirty_test.CIFAR100DIRTY_TEST("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.0_blur_0.66_test_targets.csv")
cifar_test_blur_033 = cifar_dirty_test.CIFAR100DIRTY_TEST("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.0_blur_0.33_test_targets.csv")
cifar_train_blur_10 = cifar_dirty_train.CIFAR100DIRTY_TRAIN("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.0_blur_1.0_train_targets.csv")
cifar_train_blur_09 = cifar_dirty_train.CIFAR100DIRTY_TRAIN("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.0_blur_0.9_train_targets.csv")
cifar_train_blur_08 = cifar_dirty_train.CIFAR100DIRTY_TRAIN("/home/yhbyun/A2S/cifar100_VGG16/cifar100_gaussian_0.0_blur_0.8_train_targets.csv")
cifar_train_blur_0675 = cifar_dirty_train.CIFAR100DIRTY_TRAIN("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.0_blur_0.675_train_targets.csv")
cifar_train_blur_06 = cifar_dirty_train.CIFAR100DIRTY_TRAIN("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.0_blur_0.6_train_targets.csv")
cifar_train_blur_05 = cifar_dirty_train.CIFAR100DIRTY_TRAIN("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.0_blur_0.5_train_targets.csv")
cifar_train_blur_045 = cifar_dirty_train.CIFAR100DIRTY_TRAIN("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.0_blur_0.45_train_targets.csv")
cifar_train_blur_04 = cifar_dirty_train.CIFAR100DIRTY_TRAIN("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.0_blur_0.4_train_targets.csv")
cifar_train_blur_03 = cifar_dirty_train.CIFAR100DIRTY_TRAIN("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.0_blur_0.3_train_targets.csv")
cifar_train_blur_066 = cifar_dirty_train.CIFAR100DIRTY_TRAIN("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.0_blur_0.66_train_targets.csv")
cifar_train_blur_033 = cifar_dirty_train.CIFAR100DIRTY_TRAIN("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.0_blur_0.33_train_targets.csv")
cifar_train_gaussian_025 = cifar_dirty_test.CIFAR100DIRTY_TEST("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.25_blur_0.0_train_targets.csv")
cifar_train_blur_10 = cifar_dirty_test.CIFAR100DIRTY_TEST("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.0_blur_1.0_train_targets.csv")
cifar_train_gaussian_008_blur_03_mixed = cifar_dirty_test.CIFAR100DIRTY_TEST("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.08_blur_0.3_train_targets.csv")
cifar_train_gaussian_016_blur_06_mixed = cifar_dirty_test.CIFAR100DIRTY_TEST("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.16_blur_0.6_train_targets.csv")
cifar_train_gaussian_008_blur_033_mixed = cifar_dirty_test.CIFAR100DIRTY_TEST("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.08_blur_0.33_train_targets.csv")
cifar_train_gaussian_016_blur_066_mixed = cifar_dirty_test.CIFAR100DIRTY_TEST("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.16_blur_0.66_train_targets.csv")
cifar_train_gaussian_016_blur_08_mixed = cifar_dirty_test.CIFAR100DIRTY_TEST("/home/yhbyun/A2S/cifar100_VGG16/cifar100_gaussian_0.16_blur_0.8_train_targets.csv")
cifar_train_gaussian_025_blur_10_mixed = cifar_dirty_test.CIFAR100DIRTY_TEST("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.25_blur_1.0_train_targets.csv")
#train_loader = torch.utils.data.DataLoader(cifar_train,batch_size=args.bs, shuffle=True,num_workers=8,drop_last=False)
train_loader = torch.utils.data.DataLoader(torch.utils.data.ConcatDataset([cifar_train, cifar_train_blur_09, cifar_train_gaussian_008_blur_033_mixed]),batch_size=args.bs, shuffle=True,num_workers=8,drop_last=False)
test_loader = torch.utils.data.DataLoader(cifar_test_blur_09,batch_size=10000, shuffle=False,num_workers=8,drop_last=False)
class ResNet18(nn.Module):
def __init__(self):
super(ResNet18,self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.ReLU(inplace=False),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1),
)
self.layer1_basic1 = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.ReLU(inplace=False),
)
self.layer1_basic2 = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
)
self.layer1_relu1 = nn.Sequential(
nn.ReLU(inplace=False),
)
self.layer1_basic3 = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.ReLU(inplace=False),
)
self.layer1_basic4 = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
)
self.layer1_relu2 = nn.Sequential(
nn.ReLU(inplace=False),
)
self.layer2_basic1 = nn.Sequential(
nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.ReLU(inplace=False),
)
self.layer2_downsample = nn.Sequential(
nn.Conv2d(64, 128, kernel_size=1, stride=2, bias=False),
nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
)
self.layer2_basic2 = nn.Sequential(
nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
)
self.layer2_relu1 = nn.Sequential(
nn.ReLU(inplace=False),
)
self.layer2_basic3 = nn.Sequential(
nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.ReLU(inplace=False),
)
self.layer2_basic4 = nn.Sequential(
nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
)
self.layer2_relu2 = nn.Sequential(
nn.ReLU(inplace=False),
)
self.layer3_basic1 = nn.Sequential(
nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.ReLU(inplace=False),
)
self.layer3_downsample = nn.Sequential(
nn.Conv2d(128, 256, kernel_size=1, stride=2, bias=False),
nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
)
self.layer3_basic2 = nn.Sequential(
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
)
self.layer3_relu1 = nn.Sequential(
nn.ReLU(inplace=False),
)
self.layer3_basic3 = nn.Sequential(
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.ReLU(inplace=False),
)
self.layer3_basic4 = nn.Sequential(
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.ReLU(inplace=False),
)
self.layer3_relu2 = nn.Sequential(
nn.ReLU(inplace=False),
)
self.layer4_basic1 = nn.Sequential(
nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.ReLU(inplace=False),
)
self.layer4_downsample = nn.Sequential(
nn.Conv2d(256, 512, kernel_size=1, stride=2, bias=False),
nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
)
self.layer4_basic2 = nn.Sequential(
nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.ReLU(inplace=False),
)
self.layer4_relu1 = nn.Sequential(
nn.ReLU(inplace=False),
)
self.layer4_basic3 = nn.Sequential(
nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.ReLU(inplace=False),
)
self.layer4_basic4 = nn.Sequential(
nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.ReLU(inplace=False),
)
self.layer4_relu2 = nn.Sequential(
nn.ReLU(inplace=False),
)
self.linear = nn.Sequential(
nn.Linear(512, 100, bias=False)
)
self._initialize_weights()
def forward(self,x):
if args.fixed:
x = quant(x)
x = roundmax(x)
out = x.clone()
out = self.conv1(out)
residual = out
if args.fixed:
out = quant(out)
out = roundmax(out)
out = self.layer1_basic1(out)
if args.fixed:
out = quant(out)
out = roundmax(out)
out = self.layer1_basic2(out)
if args.fixed:
residual = quant(residual)
residual = roundmax(residual)
if args.fixed:
out = quant(out)
out = roundmax(out)
out += residual
out = self.layer1_relu1(out)
residual = out
out = self.layer1_basic3(out)
if args.fixed:
out = quant(out)
out = roundmax(out)
out = self.layer1_basic4(out)
if args.fixed:
residual = quant(residual)
residual = roundmax(residual)
if args.fixed:
out = quant(out)
out = roundmax(out)
out += residual
out = self.layer1_relu2(out)
residual = self.layer2_downsample(out)
out = self.layer2_basic1(out)
if args.fixed:
out = quant(out)
out = roundmax(out)
out = self.layer2_basic2(out)
if args.fixed:
residual = quant(residual)
residual = roundmax(residual)
if args.fixed:
out = quant(out)
out = roundmax(out)
out += residual
out = self.layer2_relu1(out)
residual = out
out = self.layer2_basic3(out)
if args.fixed:
out = quant(out)
out = roundmax(out)
out = self.layer2_basic4(out)
if args.fixed:
residual = quant(residual)
residual = roundmax(residual)
if args.fixed:
out = quant(out)
out = roundmax(out)
out += residual
out = self.layer2_relu2(out)
residual = self.layer3_downsample(out)
out = self.layer3_basic1(out)
if args.fixed:
out = quant(out)
out = roundmax(out)
out = self.layer3_basic2(out)
if args.fixed:
residual = quant(residual)
residual = roundmax(residual)
if args.fixed:
out = quant(out)
out = roundmax(out)
out += residual
out = self.layer3_relu1(out)
residual = out
out = self.layer3_basic3(out)
if args.fixed:
out = quant(out)
out = roundmax(out)
out = self.layer3_basic4(out)
if args.fixed:
residual = quant(residual)
residual = roundmax(residual)
if args.fixed:
out = quant(out)
out = roundmax(out)
out += residual
out = self.layer3_relu2(out)
residual = self.layer4_downsample(out)
out = self.layer4_basic1(out)
if args.fixed:
out = quant(out)
out = roundmax(out)
out = self.layer4_basic2(out)
if args.fixed:
residual = quant(residual)
residual = roundmax(residual)
if args.fixed:
out = quant(out)
out = roundmax(out)
out += residual
out = self.layer4_relu1(out)
residual = out
out = self.layer4_basic3(out)
if args.fixed:
out = quant(out)
out = roundmax(out)
out = self.layer4_basic4(out)
if args.fixed:
residual = quant(residual)
residual = roundmax(residual)
if args.fixed:
out = quant(out)
out = roundmax(out)
out += residual
out = self.layer4_relu2(out)
out = F.avg_pool2d(out, 2)
out = out.view(out.size(0), -1)
#print(out.size())
out = self.linear(out)
return out
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
#print(m)
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
#if m.bias is not None:
#nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
#nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
#print(m)
nn.init.normal_(m.weight, 0, 0.01)
#nn.init.constant_(m.bias, 0)
def roundmax(input):
maximum = 2**args.iwidth-1
minimum = -maximum-1
input = F.relu(torch.add(input, -minimum))
input = F.relu(torch.add(torch.neg(input), maximum-minimum))
input = torch.add(torch.neg(input), maximum)
return input
def quant(input):
input = torch.round(input / (2 ** (-args.aprec))) * (2 ** (-args.aprec))
return input
def set_mask(mask, block, val):
if block == 0:
mask[0][:,:,:,:] = val
mask[1][:,:,:,:] = val
mask[2][:,:,:,:] = val
mask[3][:,:,:,:] = val
mask[4][:,:,:,:] = val
mask[5][:,:,:,:] = val
mask[6][:,:,:,:] = val
mask[7][:,:,:,:] = val
mask[8][:,:,:,:] = val
mask[9][:,:,:,:] = val
mask[10][:,:,:,:] = val
mask[11][:,:,:,:] = val
mask[12][:,:,:,:] = val
mask[13][:,:,:,:] = val
mask[14][:,:,:,:] = val
mask[15][:,:,:,:] = val
mask[16][:,:,:,:] = val
mask[17][:,:,:,:] = val
mask[18][:,:,:,:] = val
mask[19][:,:,:,:] = val
mask[20][:,:] = val
elif block == 1:
mask[0][0:55,:,:,:] = val
mask[1][0:55,0:55,:,:] = val
mask[2][0:55,0:55,:,:] = val
mask[3][0:55,0:55,:,:] = val
mask[4][0:55,0:55,:,:] = val
mask[5][0:111,0:55,:,:] = val
mask[6][0:111,0:111,:,:] = val
mask[7][0:111,0:111,:,:] = val
mask[8][0:111,0:111,:,:] = val
mask[9][0:223,0:111,:,:] = val
mask[10][0:223,0:223,:,:] = val
mask[11][0:223,0:223,:,:] = val
mask[12][0:223,0:223,:,:] = val
mask[13][0:447,0:223,:,:] = val
mask[14][0:447,0:447,:,:] = val
mask[15][0:447,0:447,:,:] = val
mask[16][0:447,0:447,:,:] = val
mask[17][0:111,0:55,:,:] = val
mask[18][0:223,0:111,:,:] = val
mask[19][0:447,0:223,:,:] = val
mask[20][:,0:447] = val
elif block == 2:
mask[0][0:47,:,:,:] = val
mask[1][0:47,0:47,:,:] = val
mask[2][0:47,0:47,:,:] = val
mask[3][0:47,0:47,:,:] = val
mask[4][0:47,0:47,:,:] = val
mask[5][0:95,0:47,:,:] = val
mask[6][0:95,0:95,:,:] = val
mask[7][0:95,0:95,:,:] = val
mask[8][0:95,0:95,:,:] = val
mask[9][0:191,0:95,:,:] = val
mask[10][0:191,0:191,:,:] = val
mask[11][0:191,0:191,:,:] = val
mask[12][0:191,0:191,:,:] = val
mask[13][0:383,0:191,:,:] = val
mask[14][0:383,0:383,:,:] = val
mask[15][0:383,0:383,:,:] = val
mask[16][0:383,0:383,:,:] = val
mask[17][0:95,0:47,:,:] = val
mask[18][0:191,0:95,:,:] = val
mask[19][0:383,0:191,:,:] = val
mask[20][:,0:383] = val
elif block == 3:
mask[0][0:39,:,:,:] = val
mask[1][0:39,0:39,:,:] = val
mask[2][0:39,0:39,:,:] = val
mask[3][0:39,0:39,:,:] = val
mask[4][0:39,0:39,:,:] = val
mask[5][0:79,0:39,:,:] = val
mask[6][0:79,0:79,:,:] = val
mask[7][0:79,0:79,:,:] = val
mask[8][0:79,0:79,:,:] = val
mask[9][0:159,0:79,:,:] = val
mask[10][0:159,0:159,:,:] = val
mask[11][0:159,0:159,:,:] = val
mask[12][0:159,0:159,:,:] = val
mask[13][0:319,0:159,:,:] = val
mask[14][0:319,0:319,:,:] = val
mask[15][0:319,0:319,:,:] = val
mask[16][0:319,0:319,:,:] = val
mask[17][0:79,0:39,:,:] = val
mask[18][0:159,0:79,:,:] = val
mask[19][0:319,0:159,:,:] = val
mask[20][:,0:319] = val
elif block == 4:
mask[0][0:31,:,:,:] = val
mask[1][0:31,0:31,:,:] = val
mask[2][0:31,0:31,:,:] = val
mask[3][0:31,0:31,:,:] = val
mask[4][0:31,0:31,:,:] = val
mask[5][0:63,0:31,:,:] = val
mask[6][0:63,0:63,:,:] = val
mask[7][0:63,0:63,:,:] = val
mask[8][0:63,0:63,:,:] = val
mask[9][0:127,0:63,:,:] = val
mask[10][0:127,0:127,:,:] = val
mask[11][0:127,0:127,:,:] = val
mask[12][0:127,0:127,:,:] = val
mask[13][0:255,0:127,:,:] = val
mask[14][0:255,0:255,:,:] = val
mask[15][0:255,0:255,:,:] = val
mask[16][0:255,0:255,:,:] = val
mask[17][0:63,0:31,:,:] = val
mask[18][0:127,0:63,:,:] = val
mask[19][0:255,0:127,:,:] = val
mask[20][:,0:255] = val
return mask
def save_network(layer):
for child in net2.children():
for param in child.conv1[0].parameters():
layer[0] = param.data
for child in net2.children():
for param in child.layer1_basic1[0].parameters():
layer[1] = param.data
for child in net2.children():
for param in child.layer1_basic2[0].parameters():
layer[2] = param.data
for child in net2.children():
for param in child.layer1_basic3[0].parameters():
layer[3] = param.data
for child in net2.children():
for param in child.layer1_basic4[0].parameters():
layer[4] = param.data
for child in net2.children():
for param in child.layer2_basic1[0].parameters():
layer[5] = param.data
for child in net2.children():
for param in child.layer2_basic2[0].parameters():
layer[6] = param.data
for child in net2.children():
for param in child.layer2_basic3[0].parameters():
layer[7] = param.data
for child in net2.children():
for param in child.layer2_basic4[0].parameters():
layer[8] = param.data
for child in net2.children():
for param in child.layer3_basic1[0].parameters():
layer[9] = param.data
for child in net2.children():
for param in child.layer3_basic2[0].parameters():
layer[10] = param.data
for child in net2.children():
for param in child.layer3_basic3[0].parameters():
layer[11] = param.data
for child in net2.children():
for param in child.layer3_basic4[0].parameters():
layer[12] = param.data
for child in net2.children():
for param in child.layer4_basic1[0].parameters():
layer[13] = param.data
for child in net2.children():
for param in child.layer4_basic2[0].parameters():
layer[14] = param.data
for child in net2.children():
for param in child.layer4_basic3[0].parameters():
layer[15] = param.data
for child in net2.children():
for param in child.layer4_basic4[0].parameters():
layer[16] = param.data
for child in net2.children():
for param in child.layer2_downsample[0].parameters():
layer[17] = param.data
for child in net2.children():
for param in child.layer3_downsample[0].parameters():
layer[18] = param.data
for child in net2.children():
for param in child.layer4_downsample[0].parameters():
layer[19] = param.data
for child in net2.children():
for param in child.linear[0].parameters():
layer[20] = param.data
return layer
def add_network():
layer = torch.load('mask_null.dat')
layer = save_network(layer)
for child in net.children():
for param in child.conv1[0].parameters():
param.data = torch.add(param.data,layer[0])
for child in net.children():
for param in child.layer1_basic1[0].parameters():
param.data = torch.add(param.data,layer[1])
for child in net.children():
for param in child.layer1_basic2[0].parameters():
param.data = torch.add(param.data,layer[2])
for child in net.children():
for param in child.layer1_basic3[0].parameters():
param.data = torch.add(param.data,layer[3])
for child in net.children():
for param in child.layer1_basic4[0].parameters():
param.data = torch.add(param.data,layer[4])
for child in net.children():
for param in child.layer2_basic1[0].parameters():
param.data = torch.add(param.data,layer[5])
for child in net.children():
for param in child.layer2_basic2[0].parameters():
param.data = torch.add(param.data,layer[6])
for child in net.children():
for param in child.layer2_basic3[0].parameters():
param.data = torch.add(param.data,layer[7])
for child in net.children():
for param in child.layer2_basic4[0].parameters():
param.data = torch.add(param.data,layer[8])
for child in net.children():
for param in child.layer3_basic1[0].parameters():
param.data = torch.add(param.data,layer[9])
for child in net.children():
for param in child.layer3_basic2[0].parameters():
param.data = torch.add(param.data,layer[10])
for child in net.children():
for param in child.layer3_basic3[0].parameters():
param.data = torch.add(param.data,layer[11])
for child in net.children():
for param in child.layer3_basic4[0].parameters():
param.data = torch.add(param.data,layer[12])
for child in net.children():
for param in child.layer4_basic1[0].parameters():
param.data = torch.add(param.data,layer[13])
for child in net.children():
for param in child.layer4_basic2[0].parameters():
param.data = torch.add(param.data,layer[14])
for child in net.children():
for param in child.layer4_basic3[0].parameters():
param.data = torch.add(param.data,layer[15])
for child in net.children():
for param in child.layer4_basic4[0].parameters():
param.data = torch.add(param.data,layer[16])
for child in net.children():
for param in child.layer2_downsample[0].parameters():
param.data = torch.add(param.data,layer[17])
for child in net.children():
for param in child.layer3_downsample[0].parameters():
param.data = torch.add(param.data,layer[18])
for child in net.children():
for param in child.layer4_downsample[0].parameters():
param.data = torch.add(param.data,layer[19])
for child in net.children():
for param in child.linear[0].parameters():
param.data = torch.add(param.data,layer[20])
return layer
def net_mask_mul(mask):
for child in net.children():
for param in child.conv1[0].parameters():
param.data = torch.mul(param.data,mask[0].cuda())
for child in net.children():
for param in child.layer1_basic1[0].parameters():
param.data = torch.mul(param.data,mask[1].cuda())
for child in net.children():
for param in child.layer1_basic2[0].parameters():
param.data = torch.mul(param.data,mask[2].cuda())
for child in net.children():
for param in child.layer1_basic3[0].parameters():
param.data = torch.mul(param.data,mask[3].cuda())
for child in net.children():
for param in child.layer1_basic4[0].parameters():
param.data = torch.mul(param.data,mask[4].cuda())
for child in net.children():
for param in child.layer2_basic1[0].parameters():
param.data = torch.mul(param.data,mask[5].cuda())
for child in net.children():
for param in child.layer2_basic2[0].parameters():
param.data = torch.mul(param.data,mask[6].cuda())
for child in net.children():
for param in child.layer2_basic3[0].parameters():
param.data = torch.mul(param.data,mask[7].cuda())
for child in net.children():
for param in child.layer2_basic4[0].parameters():
param.data = torch.mul(param.data,mask[8].cuda())
for child in net.children():
for param in child.layer3_basic1[0].parameters():
param.data = torch.mul(param.data,mask[9].cuda())
for child in net.children():
for param in child.layer3_basic2[0].parameters():
param.data = torch.mul(param.data,mask[10].cuda())
for child in net.children():
for param in child.layer3_basic3[0].parameters():
param.data = torch.mul(param.data,mask[11].cuda())
for child in net.children():
for param in child.layer3_basic4[0].parameters():
param.data = torch.mul(param.data,mask[12].cuda())
for child in net.children():
for param in child.layer4_basic1[0].parameters():
param.data = torch.mul(param.data,mask[13].cuda())
for child in net.children():
for param in child.layer4_basic2[0].parameters():
param.data = torch.mul(param.data,mask[14].cuda())
for child in net.children():
for param in child.layer4_basic3[0].parameters():
param.data = torch.mul(param.data,mask[15].cuda())
for child in net.children():
for param in child.layer4_basic4[0].parameters():
param.data = torch.mul(param.data,mask[16].cuda())
for child in net.children():
for param in child.layer2_downsample[0].parameters():
param.data = torch.mul(param.data,mask[17].cuda())
for child in net.children():
for param in child.layer3_downsample[0].parameters():
param.data = torch.mul(param.data,mask[18].cuda())
for child in net.children():
for param in child.layer4_downsample[0].parameters():
param.data = torch.mul(param.data,mask[19].cuda())
for child in net.children():
for param in child.linear[0].parameters():
param.data = torch.mul(param.data,mask[20].cuda())
# Model
if args.mode == 0:
print('==> Resuming from checkpoint..')
assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'
checkpoint = torch.load('./checkpoint/ckpt_20180911_half_clean_B3.t0')
net = checkpoint['net']
elif args.mode == 1:
checkpoint = torch.load('./checkpoint/ckpt_20180911_half_clean_B3.t0')
ckpt = torch.load('./checkpoint/ckpt_20180911_half_clean_B2.t0')
net = checkpoint['net']
net2 = ckpt['net']
if args.resume:
print('==> Resuming from checkpoint..')
best_acc = checkpoint['acc']
else:
best_acc = 0
if use_cuda:
net.cuda()
net = torch.nn.DataParallel(net, device_ids=range(0,8))
if args.mode > 0:
net2.cuda()
net2 = torch.nn.DataParallel(net2, device_ids=range(torch.cuda.device_count()))
cudnn.benchmark = True
'''
for child in net.children():
for param in child.conv1[0].parameters():
print(param.size())
for child in net.children():
for param in child.layer1_basic1[0].parameters():
print(param.size())
for child in net.children():
for param in child.layer1_basic2[0].parameters():
print(param.size())
for child in net.children():
for param in child.layer1_basic3[0].parameters():
print(param.size())
for child in net.children():
for param in child.layer1_basic4[0].parameters():
print(param.size())
for child in net.children():
for param in child.layer2_basic1[0].parameters():
print(param.size())
for child in net.children():
for param in child.layer2_basic2[0].parameters():
print(param.size())
for child in net.children():
for param in child.layer2_basic3[0].parameters():
print(param.size())
for child in net.children():
for param in child.layer2_basic4[0].parameters():
print(param.size())
for child in net.children():
for param in child.layer3_basic1[0].parameters():
print(param.size())
for child in net.children():
for param in child.layer3_basic2[0].parameters():
print(param.size())
for child in net.children():
for param in child.layer3_basic3[0].parameters():
print(param.size())
for child in net.children():
for param in child.layer3_basic4[0].parameters():
print(param.size())
for child in net.children():
for param in child.layer4_basic1[0].parameters():
print(param.size())
for child in net.children():
for param in child.layer4_basic2[0].parameters():
print(param.size())
for child in net.children():
for param in child.layer4_basic3[0].parameters():
print(param.size())
for child in net.children():
for param in child.layer4_basic4[0].parameters():
print(param.size())
for child in net.children():
for param in child.layer2_downsample[0].parameters():
print(param.size())
for child in net.children():
for param in child.layer3_downsample[0].parameters():
print(param.size())
for child in net.children():
for param in child.layer3_downsample[0].parameters():
print(param.size())
for child in net.children():
for param in child.linear[0].parameters():
print(param.size())
'''
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)
start_epoch = args.se
num_epoch = args.ne
# Training
def train(epoch):
print('\nEpoch: %d' % epoch)
net.train()
train_loss = 0
correct = 0
total = 0
mask_channel = torch.load('mask_null.dat')
mask_channel = set_mask(set_mask(mask_channel, 0, 1), 2, 0)
for batch_idx, (inputs, targets) in enumerate(train_loader):
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
optimizer.zero_grad()
inputs, targets = Variable(inputs), Variable(targets)
outputs = net(inputs)
loss = criterion(outputs, targets)
loss.backward()
net_mask_mul(mask_channel)
'''
for child in net.children():
for param in child.conv1[0].parameters():
for i in range(param.size()[0]):
for j in range(param.size()[1]):
print(param[i,j])
print("======================================================")
add_network()
for child in net.children():
for param in child.conv1[0].parameters():
for i in range(param.size()[0]):
for j in range(param.size()[1]):
print(param[i,j])
exit()
'''
add_network()
optimizer.step()
train_loss += loss.data[0]
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum().item()
progress_bar(batch_idx, len(train_loader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (train_loss/(batch_idx+1), 100.*correct/total, correct, total))
def test():
global best_acc
net.eval()
test_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(test_loader):
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = Variable(inputs, volatile=True), Variable(targets)
outputs = net(inputs)
loss = criterion(outputs, targets)
test_loss += loss.data[0]
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum().item()
progress_bar(batch_idx, len(test_loader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (test_loss/(batch_idx+1), 100.*correct/total, correct, total))
# Save checkpoint.
acc = 100.*correct/total
if acc > best_acc:
if args.mode == 0:
pass
else:
print('Saving..')
state = {
'net': net.module if use_cuda else net,
'acc': acc,
}
if not os.path.isdir('checkpoint'):
os.mkdir('checkpoint')
torch.save(state, './checkpoint/ckpt_20180911_half_clean_B3.t0')
best_acc = acc
return acc
# Train+inference vs. Inference
mode = args.mode
if mode == 1: # mode=1 is training & inference @ each epoch
for epoch in range(start_epoch, start_epoch+num_epoch):
train(epoch)
test()
elif mode == 0: # only inference
test()
else:
pass
| [
"[email protected]"
]
| |
fec187d97af48673db9a3cd1cb57dbaa81a53c2d | 28a462a28f443c285ca5efec181ebe36b147c167 | /tests/compile/basic/es2020/DeclarativeEnvironmentRecord.CreateImmutableBinding.spec | 32137e54076b90b69a23ec493062ece2b21f0272 | [
"BSD-3-Clause",
"BSD-2-Clause"
]
| permissive | kaist-plrg/jstar | 63e71f9156860dc21cccc33a9f6c638dfee448ea | 1282919127ea18a7e40c7a55e63a1ddaaf7d9db4 | refs/heads/main | 2022-07-22T08:12:34.947712 | 2022-02-27T04:19:33 | 2022-02-27T11:06:14 | 384,045,526 | 6 | 4 | NOASSERTION | 2022-02-27T11:05:26 | 2021-07-08T07:53:21 | Python | UTF-8 | Python | false | false | 398 | spec | 1. Let _envRec_ be the declarative Environment Record for which the method was invoked.
1. Assert: _envRec_ does not already have a binding for _N_.
1. Create an immutable binding in _envRec_ for _N_ and record that it is uninitialized. If _S_ is *true*, record that the newly created binding is a strict binding.
1. Return NormalCompletion(~empty~). | [
"[email protected]"
]
| |
922afb74fdeb65bf3a731c7e2f814a52234e3f75 | 8fd07ea363ba4263bafe25d213c72cc9a93e2b3e | /devops/Day1_fork_thread/Thread/5.凑够一定数量才能继续执行.py | d73b1743ccd66793d4ab5dc684274cdd8d96cd03 | []
| no_license | ml758392/python_tedu | 82e12ae014f0fc81230386fab07f901510fc8837 | 9f20798604db0ac8cd7b69d8c7a52ee361ebc7a7 | refs/heads/master | 2020-04-12T08:30:42.354663 | 2019-03-29T11:55:30 | 2019-03-29T11:55:30 | 162,386,878 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 334 | py | # -*-coding:utf-8-*-
import threading
import time
bar = threading.Barrier(6)
def run():
print('%s---start' % threading.current_thread().name)
time.sleep(1)
bar.wait()
print('%s---end' % threading.current_thread().name)
if __name__ == '__main__':
for i in range(5):
threading.Thread(target=run).start() | [
"yy.tedu.cn"
]
| yy.tedu.cn |
59f0627ece60217800e3c91abd0f3269841b99de | a3354726b126b85987a1455bd4b1ed0a4d05f5bb | /apps/posts/templatetags/urlify.py | dbcef20ecbbcf46d92a98266c40fc00add8b6040 | []
| no_license | RonaldTheodoro/django-blog | cea90ab619e69560013a995c8d67d65e4593e0a9 | 92b64aa93c495fef835e64a98c9619cba3f518c4 | refs/heads/master | 2020-04-01T06:05:08.492523 | 2018-10-20T15:28:53 | 2018-10-20T15:28:53 | 152,932,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 163 | py | from urllib.parse import quote_plus
from django import template
register = template.Library()
@register.filter
def urlify(value):
return quote_plus(value)
| [
"[email protected]"
]
| |
31642fa7ef14844e7529c37bd4b42f313d0a69bc | 32f7392217c50e1ee5a41db0414cbd6ca2427753 | /Tencent2020/txbase/emb.py | 2bf25c681898d4882c0639e0dc5cc6a532c10b48 | []
| no_license | Stella2019/KDD2020 | 0f315cd14c26bbcedc69b3982ca58d848d5d4a13 | 2604208d8bcac47ef097e6469633430637149b31 | refs/heads/main | 2023-07-02T02:22:07.707798 | 2021-08-14T06:15:04 | 2021-08-14T06:15:04 | 395,909,304 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,577 | py | from . import Cache
import numpy as np
class EmbBatchLoader:
def __init__(self,
all_emb_cols,
emb_base_dir=None,
key2index=None,
outer_emb=False):
"""
outer_emb: 设置该参数为True,如果是导入外部的embedding,
会造成key2index和word_emb_dict对不齐,词表不一样。
默认会认为用最大的值来填充低频词。
"""
self.all_emb_cols = all_emb_cols
self.all_emb_cols_backup = all_emb_cols
self.emb_base_dir = emb_base_dir
self.key2index = key2index
self.outer_emb = outer_emb
def _get_max_index(self, word_emb_dict):
'''
原先功能定位oov = max_index 后改为 =-1 此函数弃用
:param word_emb_dict:
:return:
'''
return str(sorted(map(int, list(word_emb_dict.keys())))[-1])
def get_emb_matrix(self, word_emb_dict, key2index_col): # modify by zlh
"""
prepare embedding for NN
initializing the embedding... id => emb vectors
the id is your own label encoding mapping...which stored in the self.key2index[col]
"""
if self.outer_emb:
# self._get_max_index(word_emb_dict) # 阿郑的是“max“为低频词
key_to_represent_rare = '-1'
else:
key_to_represent_rare = '-1' # 我的是”-1“为低频词
for _, k in word_emb_dict.items():
break
emb_size = k.shape[0]
voc_size = len(key2index_col)
# 真实的词表,编码始于1,你准备input sequence的时候编码的词表,GetSeqFeas.ipynb
# 100个词,编码就是1-100,所以初始化要+1
emb_matrix = np.zeros((voc_size + 1, emb_size)) # 0如何优化,mean?
# emb 中必须要有'-1'
if '-1' not in word_emb_dict.keys():
# emb中无-1 为全词表数据!需要自行计算均值emb vec
# 为embi 添加一个embedding
# 求词表与embi key的差
set_drop_words = list(
set(word_emb_dict.keys()).difference(set(
key2index_col.keys())))
if len(set_drop_words) > 0:
# 这些词的vector求均值作为这个oov词的embedding vector
vector_low_frequency_words = np.zeros((emb_size, ))
for w in set_drop_words:
vector_low_frequency_words += word_emb_dict[w]
vector_low_frequency_words = vector_low_frequency_words / len(
set_drop_words)
# emb添加一个key value
word_emb_dict['-1'] = vector_low_frequency_words
print(' file has ' + str(len(set_drop_words)) + \
' low frequency words and fill vector as:', vector_low_frequency_words)
for k, idx in key2index_col.items():
try:
emb_matrix[idx, :] = word_emb_dict[k]
except KeyError: # 如果k不在不在word_emb_dict中,则默认用max_key_to_represent_rare填充
# print('find oov:',(k, idx))
emb_matrix[idx, :] = word_emb_dict[key_to_represent_rare]
emb_matrix = np.float32(emb_matrix)
return emb_matrix
def load_batch_embedding(self, emb_base_name, pure_nm):
"""
批量导入embedding,目前对于一组embedding就self.all_emb_cols个变量
"""
emb_dict = {}
for col in self.all_emb_cols:
file_nm = F'{emb_base_name}_{col}'
try:
emb_dict[col] = Cache.reload_cache(
file_nm=file_nm,
pure_nm=pure_nm,
base_dir=self.emb_base_dir)['word_emb_dict']
except FileNotFoundError as e:
print("[Error]" + " = =" * 30)
print("ErrorMessage: ", e)
print("col: ", col)
print("file_nm:", file_nm)
print("[Error]" + " = =" * 30)
print(f"Raw self.all_emb_cols: {self.all_emb_cols}")
self.all_emb_cols = list(emb_dict.keys())
print(f"Updated self.all_emb_cols: {self.all_emb_cols}")
return emb_dict
def load_emb_dict_with_raw_embs(self,
marker=None,
emb_base_name=None,
sentence_id='user_id',
pure_nm=True):
if emb_base_name is None:
if marker is None:
raise ValueError(
"marker can't be None if emb_base_name is None!!")
else:
if marker.endswith("_advertiser_id") or marker.endswith(
"_user_id"):
# marker中包括了sentence_id,目前sentence_id只有_advertiser_id和_user_id
emb_base_name = F'EMB_DICT_{marker}'
else:
# marker中不包括sentence_id,需要添加
emb_base_name = F'EMB_DICT_{marker}_{sentence_id}'
else:
emb_base_name = emb_base_name.rstrip('_') # 对于一组embedding一致的名称
emb_dict_with_raw_embs = self.load_batch_embedding(
emb_base_name, pure_nm)
return emb_dict_with_raw_embs
def get_batch_emb_matrix(self,
marker=None,
emb_base_name=None,
sentence_id='user_id',
pure_nm=True):
emb_dict_with_raw_embs = self.load_emb_dict_with_raw_embs(
marker=marker,
emb_base_name=emb_base_name,
sentence_id=sentence_id,
pure_nm=pure_nm)
emb_matrix_ready_dict = {}
for col in self.all_emb_cols:
emb_matrix_ready_dict[col] = self.get_emb_matrix(
emb_dict_with_raw_embs[col], key2index_col=self.key2index[col])
print("-" * 6)
print("Done!")
# restore all_emb_cols to all_emb_cols_backup
self.all_emb_cols = self.all_emb_cols_backup
return emb_matrix_ready_dict
def get_batch_emb_matrix_by_absolute_path(self,
absolute_path_with_placeholder):
emb_matrix_ready_dict = {}
for col in self.all_emb_cols:
path = absolute_path_with_placeholder.format(col)
try:
i_raw_embs = Cache.reload_cache(
file_nm=path, base_dir=self.emb_base_dir)['word_emb_dict']
emb_matrix_ready_dict[col] = self.get_emb_matrix(
i_raw_embs, key2index_col=self.key2index[col])
except FileNotFoundError as e:
print("[Error]" + " = =" * 30)
print("ErrorMessage: ", e)
print("col: ", col)
print("file_nm:", path)
print("[Error]" + " = =" * 30)
print(f"Raw self.all_emb_cols: {self.all_emb_cols}")
self.all_emb_cols = list(emb_matrix_ready_dict.keys())
print(f"Updated self.all_emb_cols: {self.all_emb_cols}")
print("-" * 6)
print("Done!")
# restore all_emb_cols to all_emb_cols_backup
self.all_emb_cols = self.all_emb_cols_backup
return emb_matrix_ready_dict
| [
"[email protected]"
]
| |
1a9627a465aa1f53187fe367e69589eff0cf6a31 | a59d1faced9fe7348ca7143d2a8643e0ebad2132 | /pyvisdk/do/application_quiesce_fault.py | cf32f58e3304afc4abc1a5f0d39e25a272834537 | [
"MIT"
]
| permissive | Infinidat/pyvisdk | c55d0e363131a8f35d2b0e6faa3294c191dba964 | f2f4e5f50da16f659ccc1d84b6a00f397fa997f8 | refs/heads/master | 2023-05-27T08:19:12.439645 | 2014-07-20T11:49:16 | 2014-07-20T11:49:16 | 4,072,898 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,288 | py |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def ApplicationQuiesceFault(vim, *args, **kwargs):
'''This fault is thrown when creating a quiesced snapshot failed because the
(user-supplied) custom pre-freeze script in the virtual machine exited with a
non-zero return code.This indicates that the script failed to perform its
quiescing task, which causes us to fail the quiesced snapshot operation.'''
obj = vim.client.factory.create('{urn:vim25}ApplicationQuiesceFault')
# do some validation checking...
if (len(args) + len(kwargs)) < 4:
raise IndexError('Expected at least 5 arguments got: %d' % len(args))
required = [ 'dynamicProperty', 'dynamicType', 'faultCause', 'faultMessage' ]
optional = [ ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| [
"[email protected]"
]
| |
63f33a87835b8770a6f52247450c589c170448cc | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/kusto/v20210827/get_data_connection.py | 10c402e25115ff6f6d20ff26cd07edd7efef74c5 | [
"BSD-3-Clause",
"Apache-2.0"
]
| permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,654 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetDataConnectionResult',
'AwaitableGetDataConnectionResult',
'get_data_connection',
'get_data_connection_output',
]
warnings.warn("""Please use one of the variants: EventGridDataConnection, EventHubDataConnection, IotHubDataConnection.""", DeprecationWarning)
@pulumi.output_type
class GetDataConnectionResult:
"""
Class representing an data connection.
"""
def __init__(__self__, id=None, kind=None, location=None, name=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> str:
"""
Kind of the endpoint for the data connection
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetDataConnectionResult(GetDataConnectionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDataConnectionResult(
id=self.id,
kind=self.kind,
location=self.location,
name=self.name,
type=self.type)
def get_data_connection(cluster_name: Optional[str] = None,
data_connection_name: Optional[str] = None,
database_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDataConnectionResult:
"""
Class representing an data connection.
:param str cluster_name: The name of the Kusto cluster.
:param str data_connection_name: The name of the data connection.
:param str database_name: The name of the database in the Kusto cluster.
:param str resource_group_name: The name of the resource group containing the Kusto cluster.
"""
pulumi.log.warn("""get_data_connection is deprecated: Please use one of the variants: EventGridDataConnection, EventHubDataConnection, IotHubDataConnection.""")
__args__ = dict()
__args__['clusterName'] = cluster_name
__args__['dataConnectionName'] = data_connection_name
__args__['databaseName'] = database_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:kusto/v20210827:getDataConnection', __args__, opts=opts, typ=GetDataConnectionResult).value
return AwaitableGetDataConnectionResult(
id=__ret__.id,
kind=__ret__.kind,
location=__ret__.location,
name=__ret__.name,
type=__ret__.type)
@_utilities.lift_output_func(get_data_connection)
def get_data_connection_output(cluster_name: Optional[pulumi.Input[str]] = None,
data_connection_name: Optional[pulumi.Input[str]] = None,
database_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetDataConnectionResult]:
"""
Class representing an data connection.
:param str cluster_name: The name of the Kusto cluster.
:param str data_connection_name: The name of the data connection.
:param str database_name: The name of the database in the Kusto cluster.
:param str resource_group_name: The name of the resource group containing the Kusto cluster.
"""
pulumi.log.warn("""get_data_connection is deprecated: Please use one of the variants: EventGridDataConnection, EventHubDataConnection, IotHubDataConnection.""")
...
| [
"[email protected]"
]
| |
4b8eff8148ed0ac19a6ac1759eb66417d0b8a4a0 | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part010263.py | 46c4f1fd19764edd4977285229bf635d77cfbf13 | []
| no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,300 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher51281(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i3.2.1.2.1.0', 1, 1, None), Mul),
(VariableWithCount('i3.2.1.2.1.0_1', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher51281._instance is None:
CommutativeMatcher51281._instance = CommutativeMatcher51281()
return CommutativeMatcher51281._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 51280
return
yield
from collections import deque | [
"[email protected]"
]
| |
4c98e08132aeae3e18d23763c7ba5bf9f7915f22 | 3970706a16be81a63b2476222c1b061da9f11b70 | /estimator/download_data.py | 4cf480e781e68353a149d1325da327b6ec2ae348 | []
| no_license | sfujiwara/tensorflow-examples | 3de3fb90c6204bec2c455f8f1b9aa98a14f393b9 | 6b9dd3ba27e1b0d021c322f5504e888b6b7ed4fb | refs/heads/master | 2023-04-18T11:33:43.271751 | 2020-12-17T20:49:57 | 2020-12-17T20:49:57 | 126,787,804 | 1 | 0 | null | 2023-03-25T00:25:33 | 2018-03-26T07:06:44 | Python | UTF-8 | Python | false | false | 426 | py | import argparse
import tensorflow_datasets as tfds
parser = argparse.ArgumentParser()
parser.add_argument('--tfds_dir', type=str)
parser.add_argument('--dataset_name', type=str)
args = parser.parse_args()
TFDS_DIR = args.tfds_dir
DATASET_NAME = args.dataset_name
def main():
builder = tfds.builder(DATASET_NAME, data_dir=TFDS_DIR)
builder.download_and_prepare()
return
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
a015fd2835d1017c32b4f5d5ad8ec3e72eb99d16 | a78f0d96c33d8e3399bffa85ffba5c8e598e8492 | /Array/55_sort_wave.py | 94cb0b44a844fba62865e284c91e58d6ea58cb23 | []
| no_license | ANKITPODDER2000/data-structure | 78203fabf9ea7ef580d41d4d44cbff1e6c9f397d | 3c1542562e74c0888718273e16206a755b193d4e | refs/heads/main | 2023-02-04T15:40:21.017573 | 2020-12-31T10:45:18 | 2020-12-31T10:45:18 | 325,778,614 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 447 | py | from get_array_helper import take_array_user
def sort_wave(arr , n):
for i in range(0 , n , 2):
if arr[i]<arr[i-1] and i > 0:
arr[i] , arr[i-1] = arr[i-1] , arr[i]
if arr[i]<arr[i+1] and i<n-1:
arr[i] , arr[i+1] = arr[i+1] , arr[i]
arr , n = take_array_user()
print("Sorting in wave form .....")
sort_wave(arr , n)
print("Sorting done .....")
print("Array after sorting in wave form : ",arr)
| [
"[email protected]"
]
| |
479eabc4c27c4631d1beee3ab1cb8a2c9be9a668 | 4b7e282fe480415f5d52c0fc0429f144156190fe | /google/ads/googleads/v8/services/types/geographic_view_service.py | 1c6a192bba2848acf2005e44cd07b66cdd125389 | [
"Apache-2.0"
]
| permissive | Z2Xsoft/google-ads-python | c4750357bb19da91bb3b6bf2fa84bef9d2df36d3 | 1779d52a0446c8afb2437b0a9e103dcb849f5590 | refs/heads/main | 2023-08-18T15:22:17.840364 | 2021-09-26T04:08:53 | 2021-09-26T04:08:53 | 410,444,398 | 0 | 0 | Apache-2.0 | 2021-09-26T04:08:53 | 2021-09-26T03:55:38 | null | UTF-8 | Python | false | false | 1,237 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v8.services",
marshal="google.ads.googleads.v8",
manifest={"GetGeographicViewRequest",},
)
class GetGeographicViewRequest(proto.Message):
r"""Request message for
[GeographicViewService.GetGeographicView][google.ads.googleads.v8.services.GeographicViewService.GetGeographicView].
Attributes:
resource_name (str):
Required. The resource name of the geographic
view to fetch.
"""
resource_name = proto.Field(proto.STRING, number=1,)
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"[email protected]"
]
| |
98cb85d1402933244f795a346bdc4fd0313236fe | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/scrapy_scrapy/scrapy-master/scrapy/commands/startproject.py | 5941066326a89f8907da69a7681f54c726320d4d | []
| no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 3,863 | py | from __future__ import print_function
import re
import os
import string
from importlib import import_module
from os.path import join, exists, abspath
from shutil import ignore_patterns, move, copy2, copystat
import scrapy
from scrapy.commands import ScrapyCommand
from scrapy.utils.template import render_templatefile, string_camelcase
from scrapy.exceptions import UsageError
TEMPLATES_TO_RENDER = (
('scrapy.cfg',),
('${project_name}', 'settings.py.tmpl'),
('${project_name}', 'items.py.tmpl'),
('${project_name}', 'pipelines.py.tmpl'),
('${project_name}', 'middlewares.py.tmpl'),
)
IGNORE = ignore_patterns('*.pyc', '.svn')
class Command(ScrapyCommand):
requires_project = False
default_settings = {'LOG_ENABLED': False}
def syntax(self):
return "<project_name> [project_dir]"
def short_desc(self):
return "Create new project"
def _is_valid_name(self, project_name):
def _module_exists(module_name):
try:
import_module(module_name)
return True
except ImportError:
return False
if not re.search(r'^[_a-zA-Z]\w*$', project_name):
print('Error: Project names must begin with a letter and contain'\
' only\nletters, numbers and underscores')
elif _module_exists(project_name):
print('Error: Module %r already exists' % project_name)
else:
return True
return False
def _copytree(self, src, dst):
"""
Since the original function always creates the directory, to resolve
the issue a new function had to be created. It's a simple copy and
was reduced for this case.
More info at:
https://github.com/scrapy/scrapy/pull/2005
"""
ignore = IGNORE
names = os.listdir(src)
ignored_names = ignore(src, names)
if not os.path.exists(dst):
os.makedirs(dst)
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
if os.path.isdir(srcname):
self._copytree(srcname, dstname)
else:
copy2(srcname, dstname)
copystat(src, dst)
def run(self, args, opts):
if len(args) not in (1, 2):
raise UsageError()
project_name = args[0]
project_dir = args[0]
if len(args) == 2:
project_dir = args[1]
if exists(join(project_dir, 'scrapy.cfg')):
self.exitcode = 1
print('Error: scrapy.cfg already exists in %s' % abspath(project_dir))
return
if not self._is_valid_name(project_name):
self.exitcode = 1
return
self._copytree(self.templates_dir, abspath(project_dir))
move(join(project_dir, 'module'), join(project_dir, project_name))
for paths in TEMPLATES_TO_RENDER:
path = join(*paths)
tplfile = join(project_dir,
string.Template(path).substitute(project_name=project_name))
render_templatefile(tplfile, project_name=project_name,
ProjectName=string_camelcase(project_name))
print("New Scrapy project %r, using template directory %r, created in:" % \
(project_name, self.templates_dir))
print(" %s\n" % abspath(project_dir))
print("You can start your first spider with:")
print(" cd %s" % project_dir)
print(" scrapy genspider example example.com")
@property
def templates_dir(self):
_templates_base_dir = self.settings['TEMPLATES_DIR'] or \
join(scrapy.__path__[0], 'templates')
return join(_templates_base_dir, 'project')
| [
"[email protected]"
]
| |
8e3433cc468d8d0c729fe477b522903a60d3acd2 | e27333261b8e579564016c71d2061cc33972a8b8 | /.history/api/UnigramLanguageModelImplementation_20210809170904.py | 6ea53ba81f55833e5414f6e86eea471894cdaf2c | []
| no_license | Dustyik/NewsTweet_InformationRetrieval | 882e63dd20bc9101cbf48afa6c3302febf1989b1 | d9a6d92b51c288f5bcd21ea1cc54772910fa58f7 | refs/heads/master | 2023-07-01T09:12:53.215563 | 2021-08-12T08:28:33 | 2021-08-12T08:28:33 | 382,780,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,153 | py | import math
from IPython.display import display
import sys
from BM25implementation import QueryParsers
ALPHA = 0.75
NORMALIZE_PROBABILITY = True
class UnigramLanguageModel:
def __init__(self, tweets_data): #tweets is a pandas dataframe
self.tweets_data = tweets_data
self.wordsCollectionFrequencyDictionary = self.create_words_frequency_dict(tweets_data)
def create_words_frequency_dict(self, tweets_data, collection = True):
word_frequency_dictionary = {}
if collection:
tweets = tweets_data.clean_text.tolist()
for sentence in tweets:
sentence_list = list(sentence.split(" "))
for word in sentence_list:
if word in word_frequency_dictionary:
word_frequency_dictionary[word] += 1
else:
word_frequency_dictionary[word] = 1
else:
for word in tweets_data:
if word in word_frequency_dictionary:
word_frequency_dictionary[word] += 1
else:
word_frequency_dictionary[word] = 1
return word_frequency_dictionary
def calculate_total_no_of_words(self, wordsCollectionFrequencyDictionary):
values = wordsCollectionFrequencyDictionary.values()
total = sum(values)
return total
def calculate_unigram_probability(self, word: str, wordCollectionFrequencyDictionary):
totalNumberOfWords = self.calculate_total_no_of_words(wordCollectionFrequencyDictionary)
try:
value = wordCollectionFrequencyDictionary[word]/totalNumberOfWords
except KeyError as ke:
value = 1/totalNumberOfWords #add one smoothing for documents
print (word)
print (wordCollectionFrequencyDictionary)
print (value, totalNumberOfWords)
return value
def calculate_interpolated_sentence_probability(self, querySentence:list, document, alpha=ALPHA, normalize_probability=NORMALIZE_PROBABILITY):
total_score = 1
list_of_strings = list(document.split(" "))
print (list_of_strings)
documentWordFrequencyDictionary = self.create_words_frequency_dict(list_of_strings, collection = False)
for word in querySentence:
score_of_word = alpha*(self.calculate_unigram_probability(word, documentWordFrequencyDictionary)) + (1 - alpha)*(self.calculate_unigram_probability(word, self.wordsCollectionFrequencyDictionary))
total_score *= score_of_word
sys.exit()
if normalize_probability == True:
return total_score
else:
return (math.log(total_score)/math.log(2))
def getQueryLikelihoodModelScore(self, querySentence:list):
querySentenceList = QueryParsers(querySentence).query
self.tweets_data["QueryLikelihoodModelScore"] = self.tweets_data.apply(lambda row: self.calculate_interpolated_sentence_probability(querySentenceList, row.clean_text), axis = 1)
#display(self.tweets_data)
return
| [
"[email protected]"
]
| |
6db4be0988c7548d0f320f8e6b8663566e739aed | 93db886848da0d584e022da861f8e4065978bf69 | /americancultures/lib/python3.7/site-packages/oauthlib/oauth1/rfc5849/signature.py | c96fb88dfadbc8cce9f47c5ab54785f0ecb82515 | []
| no_license | jiyoojeong/code_examples_Jan2020 | 91096d7b5b8ac97b49ddfd348f9b75422bec14c8 | 4f0331f87b595b66a0c17db8e8fb2c0c99eff60e | refs/heads/master | 2020-12-27T09:36:53.836823 | 2020-02-03T00:13:46 | 2020-02-03T00:13:46 | 237,853,835 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,246 | py | # -*- coding: utf-8 -*-
"""
oauthlib.oauth1.rfc5849.signature
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module represents a direct implementation of `section 3.4`_ of the spec.
Terminology:
* Client: software interfacing with an OAuth API
* Server: the API provider
* Resource Owner: the user who is granting authorization to the client
Steps for signing a request:
1. Collect parameters from the uri sort.py, auth header, & body
2. Normalize those parameters
3. Normalize the uri
4. Pass the normalized uri, normalized parameters, and http method to
construct the base string
5. Pass the base string and any keys needed to a signing function
.. _`section 3.4`: https://tools.ietf.org/html/rfc5849#section-3.4
"""
from __future__ import absolute_import, unicode_literals
import binascii
import hashlib
import hmac
import logging
from oauthlib.common import (extract_params, safe_string_equals,
unicode_type, urldecode)
from . import utils
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
log = logging.getLogger(__name__)
def signature_base_string(http_method, base_str_uri,
normalized_encoded_request_parameters):
"""**Construct the signature base string.**
Per `section 3.4.1.1`_ of the spec.
For example, the HTTP request::
POST /request?b5=%3D%253D&a3=a&c%40=&a2=r%20b HTTP/1.1
Host: example.com
Content-Type: application/x-www-form-urlencoded
Authorization: OAuth realm="Example",
oauth_consumer_key="9djdj82h48djs9d2",
oauth_token="kkk9d7dh3k39sjv7",
oauth_signature_method="HMAC-SHA1",
oauth_timestamp="137131201",
oauth_nonce="7d8f3e4a",
oauth_signature="bYT5CMsGcbgUdFHObYMEfcx6bsw%3D"
c2&a3=2+q
is represented by the following signature base string (line breaks
are for display purposes only)::
POST&http%3A%2F%2Fexample.com%2Frequest&a2%3Dr%2520b%26a3%3D2%2520q
%26a3%3Da%26b5%3D%253D%25253D%26c%2540%3D%26c2%3D%26oauth_consumer_
key%3D9djdj82h48djs9d2%26oauth_nonce%3D7d8f3e4a%26oauth_signature_m
ethod%3DHMAC-SHA1%26oauth_timestamp%3D137131201%26oauth_token%3Dkkk
9d7dh3k39sjv7
.. _`section 3.4.1.1`: https://tools.ietf.org/html/rfc5849#section-3.4.1.1
"""
# The signature base string is constructed by concatenating together,
# in order, the following HTTP request elements:
# 1. The HTTP request method in uppercase. For example: "HEAD",
# "GET", "POST", etc. If the request uses a custom HTTP method, it
# MUST be encoded (`Section 3.6`_).
#
# .. _`Section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6
base_string = utils.escape(http_method.upper())
# 2. An "&" character (ASCII code 38).
base_string += '&'
# 3. The base string URI from `Section 3.4.1.2`_, after being encoded
# (`Section 3.6`_).
#
# .. _`Section 3.4.1.2`: https://tools.ietf.org/html/rfc5849#section-3.4.1.2
# .. _`Section 3.4.6`: https://tools.ietf.org/html/rfc5849#section-3.4.6
base_string += utils.escape(base_str_uri)
# 4. An "&" character (ASCII code 38).
base_string += '&'
# 5. The request parameters as normalized in `Section 3.4.1.3.2`_, after
# being encoded (`Section 3.6`).
#
# .. _`Section 3.4.1.3.2`: https://tools.ietf.org/html/rfc5849#section-3.4.1.3.2
# .. _`Section 3.4.6`: https://tools.ietf.org/html/rfc5849#section-3.4.6
base_string += utils.escape(normalized_encoded_request_parameters)
return base_string
def base_string_uri(uri, host=None):
"""**Base String URI**
Per `section 3.4.1.2`_ of RFC 5849.
For example, the HTTP request::
GET /r%20v/X?id=123 HTTP/1.1
Host: EXAMPLE.COM:80
is represented by the base string URI: "http://example.com/r%20v/X".
In another example, the HTTPS request::
GET /?q=1 HTTP/1.1
Host: www.example.net:8080
is represented by the base string URI: "https://www.example.net:8080/".
.. _`section 3.4.1.2`: https://tools.ietf.org/html/rfc5849#section-3.4.1.2
The host argument overrides the netloc part of the uri argument.
"""
if not isinstance(uri, unicode_type):
raise ValueError('uri must be a unicode object.')
# FIXME: urlparse does not support unicode
scheme, netloc, path, params, query, fragment = urlparse.urlparse(uri)
# The scheme, authority, and path of the request resource URI `RFC3986`
# are included by constructing an "http" or "https" URI representing
# the request resource (without the sort.py or fragment) as follows:
#
# .. _`RFC3986`: https://tools.ietf.org/html/rfc3986
if not scheme or not netloc:
raise ValueError('uri must include a scheme and netloc')
# Per `RFC 2616 section 5.1.2`_:
#
# Note that the absolute path cannot be empty; if none is present in
# the original URI, it MUST be given as "/" (the server root).
#
# .. _`RFC 2616 section 5.1.2`: https://tools.ietf.org/html/rfc2616#section-5.1.2
if not path:
path = '/'
# 1. The scheme and host MUST be in lowercase.
scheme = scheme.lower()
netloc = netloc.lower()
# 2. The host and port values MUST match the content of the HTTP
# request "Host" header field.
if host is not None:
netloc = host.lower()
# 3. The port MUST be included if it is not the default port for the
# scheme, and MUST be excluded if it is the default. Specifically,
# the port MUST be excluded when making an HTTP request `RFC2616`_
# to port 80 or when making an HTTPS request `RFC2818`_ to port 443.
# All other non-default port numbers MUST be included.
#
# .. _`RFC2616`: https://tools.ietf.org/html/rfc2616
# .. _`RFC2818`: https://tools.ietf.org/html/rfc2818
default_ports = (
('http', '80'),
('https', '443'),
)
if ':' in netloc:
host, port = netloc.split(':', 1)
if (scheme, port) in default_ports:
netloc = host
v = urlparse.urlunparse((scheme, netloc, path, params, '', ''))
# RFC 5849 does not specify which characters are encoded in the
# "base string URI", nor how they are encoded - which is very bad, since
# the signatures won't match if there are any differences. Fortunately,
# most URIs only use characters that are clearly not encoded (e.g. digits
# and A-Z, a-z), so have avoided any differences between implementations.
#
# The example from its section 3.4.1.2 illustrates that spaces in
# the path are percent encoded. But it provides no guidance as to what other
# characters (if any) must be encoded (nor how); nor if characters in the
# other components are to be encoded or not.
#
# This implementation **assumes** that **only** the space is percent-encoded
# and it is done to the entire value (not just to spaces in the path).
#
# This code may need to be changed if it is discovered that other characters
# are expected to be encoded.
#
# Note: the "base string URI" returned by this function will be encoded
# again before being concatenated into the "signature base string". So any
# spaces in the URI will actually appear in the "signature base string"
# as "%2520" (the "%20" further encoded according to section 3.6).
return v.replace(' ', '%20')
# ** Request Parameters **
#
# Per `section 3.4.1.3`_ of the spec.
#
# In order to guarantee a consistent and reproducible representation of
# the request parameters, the parameters are collected and decoded to
# their original decoded form. They are then sorted and encoded in a
# particular manner that is often different from their original
# encoding scheme, and concatenated into a single string.
#
# .. _`section 3.4.1.3`: https://tools.ietf.org/html/rfc5849#section-3.4.1.3
def collect_parameters(uri_query='', body=[], headers=None,
exclude_oauth_signature=True, with_realm=False):
"""**Parameter Sources**
Parameters starting with `oauth_` will be unescaped.
Body parameters must be supplied as a dict, a list of 2-tuples, or a
formencoded sort.py string.
Headers must be supplied as a dict.
Per `section 3.4.1.3.1`_ of the spec.
For example, the HTTP request::
POST /request?b5=%3D%253D&a3=a&c%40=&a2=r%20b HTTP/1.1
Host: example.com
Content-Type: application/x-www-form-urlencoded
Authorization: OAuth realm="Example",
oauth_consumer_key="9djdj82h48djs9d2",
oauth_token="kkk9d7dh3k39sjv7",
oauth_signature_method="HMAC-SHA1",
oauth_timestamp="137131201",
oauth_nonce="7d8f3e4a",
oauth_signature="djosJKDKJSD8743243%2Fjdk33klY%3D"
c2&a3=2+q
contains the following (fully decoded) parameters used in the
signature base sting::
+------------------------+------------------+
| Name | Value |
+------------------------+------------------+
| b5 | =%3D |
| a3 | a |
| c@ | |
| a2 | r b |
| oauth_consumer_key | 9djdj82h48djs9d2 |
| oauth_token | kkk9d7dh3k39sjv7 |
| oauth_signature_method | HMAC-SHA1 |
| oauth_timestamp | 137131201 |
| oauth_nonce | 7d8f3e4a |
| c2 | |
| a3 | 2 q |
+------------------------+------------------+
Note that the value of "b5" is "=%3D" and not "==". Both "c@" and
"c2" have empty values. While the encoding rules specified in this
specification for the purpose of constructing the signature base
string exclude the use of a "+" character (ASCII code 43) to
represent an encoded space character (ASCII code 32), this practice
is widely used in "application/x-www-form-urlencoded" encoded values,
and MUST be properly decoded, as demonstrated by one of the "a3"
parameter instances (the "a3" parameter is used twice in this
request).
.. _`section 3.4.1.3.1`: https://tools.ietf.org/html/rfc5849#section-3.4.1.3.1
"""
headers = headers or {}
params = []
# The parameters from the following sources are collected into a single
# list of name/value pairs:
# * The sort.py component of the HTTP request URI as defined by
# `RFC3986, Section 3.4`_. The sort.py component is parsed into a list
# of name/value pairs by treating it as an
# "application/x-www-form-urlencoded" string, separating the names
# and values and decoding them as defined by
# `W3C.REC-html40-19980424`_, Section 17.13.4.
#
# .. _`RFC3986, Section 3.4`: https://tools.ietf.org/html/rfc3986#section-3.4
# .. _`W3C.REC-html40-19980424`: https://tools.ietf.org/html/rfc5849#ref-W3C.REC-html40-19980424
if uri_query:
params.extend(urldecode(uri_query))
# * The OAuth HTTP "Authorization" header field (`Section 3.5.1`_) if
# present. The header's content is parsed into a list of name/value
# pairs excluding the "realm" parameter if present. The parameter
# values are decoded as defined by `Section 3.5.1`_.
#
# .. _`Section 3.5.1`: https://tools.ietf.org/html/rfc5849#section-3.5.1
if headers:
headers_lower = dict((k.lower(), v) for k, v in headers.items())
authorization_header = headers_lower.get('authorization')
if authorization_header is not None:
params.extend([i for i in utils.parse_authorization_header(
authorization_header) if with_realm or i[0] != 'realm'])
# * The HTTP request entity-body, but only if all of the following
# conditions are met:
# * The entity-body is single-part.
#
# * The entity-body follows the encoding requirements of the
# "application/x-www-form-urlencoded" content-type as defined by
# `W3C.REC-html40-19980424`_.
# * The HTTP request entity-header includes the "Content-Type"
# header field set to "application/x-www-form-urlencoded".
#
# .._`W3C.REC-html40-19980424`: https://tools.ietf.org/html/rfc5849#ref-W3C.REC-html40-19980424
# TODO: enforce header param inclusion conditions
bodyparams = extract_params(body) or []
params.extend(bodyparams)
# ensure all oauth params are unescaped
unescaped_params = []
for k, v in params:
if k.startswith('oauth_'):
v = utils.unescape(v)
unescaped_params.append((k, v))
# The "oauth_signature" parameter MUST be excluded from the signature
# base string if present.
if exclude_oauth_signature:
unescaped_params = list(filter(lambda i: i[0] != 'oauth_signature',
unescaped_params))
return unescaped_params
def normalize_parameters(params):
"""**Parameters Normalization**
Per `section 3.4.1.3.2`_ of the spec.
For example, the list of parameters from the previous section would
be normalized as follows:
Encoded::
+------------------------+------------------+
| Name | Value |
+------------------------+------------------+
| b5 | %3D%253D |
| a3 | a |
| c%40 | |
| a2 | r%20b |
| oauth_consumer_key | 9djdj82h48djs9d2 |
| oauth_token | kkk9d7dh3k39sjv7 |
| oauth_signature_method | HMAC-SHA1 |
| oauth_timestamp | 137131201 |
| oauth_nonce | 7d8f3e4a |
| c2 | |
| a3 | 2%20q |
+------------------------+------------------+
Sorted::
+------------------------+------------------+
| Name | Value |
+------------------------+------------------+
| a2 | r%20b |
| a3 | 2%20q |
| a3 | a |
| b5 | %3D%253D |
| c%40 | |
| c2 | |
| oauth_consumer_key | 9djdj82h48djs9d2 |
| oauth_nonce | 7d8f3e4a |
| oauth_signature_method | HMAC-SHA1 |
| oauth_timestamp | 137131201 |
| oauth_token | kkk9d7dh3k39sjv7 |
+------------------------+------------------+
Concatenated Pairs::
+-------------------------------------+
| Name=Value |
+-------------------------------------+
| a2=r%20b |
| a3=2%20q |
| a3=a |
| b5=%3D%253D |
| c%40= |
| c2= |
| oauth_consumer_key=9djdj82h48djs9d2 |
| oauth_nonce=7d8f3e4a |
| oauth_signature_method=HMAC-SHA1 |
| oauth_timestamp=137131201 |
| oauth_token=kkk9d7dh3k39sjv7 |
+-------------------------------------+
and concatenated together into a single string (line breaks are for
display purposes only)::
a2=r%20b&a3=2%20q&a3=a&b5=%3D%253D&c%40=&c2=&oauth_consumer_key=9dj
dj82h48djs9d2&oauth_nonce=7d8f3e4a&oauth_signature_method=HMAC-SHA1
&oauth_timestamp=137131201&oauth_token=kkk9d7dh3k39sjv7
.. _`section 3.4.1.3.2`: https://tools.ietf.org/html/rfc5849#section-3.4.1.3.2
"""
# The parameters collected in `Section 3.4.1.3`_ are normalized into a
# single string as follows:
#
# .. _`Section 3.4.1.3`: https://tools.ietf.org/html/rfc5849#section-3.4.1.3
# 1. First, the name and value of each parameter are encoded
# (`Section 3.6`_).
#
# .. _`Section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6
key_values = [(utils.escape(k), utils.escape(v)) for k, v in params]
# 2. The parameters are sorted by name, using ascending byte value
# ordering. If two or more parameters share the same name, they
# are sorted by their value.
key_values.sort()
# 3. The name of each parameter is concatenated to its corresponding
# value using an "=" character (ASCII code 61) as a separator, even
# if the value is empty.
parameter_parts = ['{0}={1}'.format(k, v) for k, v in key_values]
# 4. The sorted name/value pairs are concatenated together into a
# single string by using an "&" character (ASCII code 38) as
# separator.
return '&'.join(parameter_parts)
def sign_hmac_sha1_with_client(base_string, client):
return sign_hmac_sha1(base_string,
client.client_secret,
client.resource_owner_secret
)
def sign_hmac_sha1(base_string, client_secret, resource_owner_secret):
"""**HMAC-SHA1**
The "HMAC-SHA1" signature method uses the HMAC-SHA1 signature
algorithm as defined in `RFC2104`_::
digest = HMAC-SHA1 (key, text)
Per `section 3.4.2`_ of the spec.
.. _`RFC2104`: https://tools.ietf.org/html/rfc2104
.. _`section 3.4.2`: https://tools.ietf.org/html/rfc5849#section-3.4.2
"""
# The HMAC-SHA1 function variables are used in following way:
# text is set to the value of the signature base string from
# `Section 3.4.1.1`_.
#
# .. _`Section 3.4.1.1`: https://tools.ietf.org/html/rfc5849#section-3.4.1.1
text = base_string
# key is set to the concatenated values of:
# 1. The client shared-secret, after being encoded (`Section 3.6`_).
#
# .. _`Section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6
key = utils.escape(client_secret or '')
# 2. An "&" character (ASCII code 38), which MUST be included
# even when either secret is empty.
key += '&'
# 3. The token shared-secret, after being encoded (`Section 3.6`_).
#
# .. _`Section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6
key += utils.escape(resource_owner_secret or '')
# FIXME: HMAC does not support unicode!
key_utf8 = key.encode('utf-8')
text_utf8 = text.encode('utf-8')
signature = hmac.new(key_utf8, text_utf8, hashlib.sha1)
# digest is used to set the value of the "oauth_signature" protocol
# parameter, after the result octet string is base64-encoded
# per `RFC2045, Section 6.8`.
#
# .. _`RFC2045, Section 6.8`: https://tools.ietf.org/html/rfc2045#section-6.8
return binascii.b2a_base64(signature.digest())[:-1].decode('utf-8')
def sign_hmac_sha256_with_client(base_string, client):
return sign_hmac_sha256(base_string,
client.client_secret,
client.resource_owner_secret
)
def sign_hmac_sha256(base_string, client_secret, resource_owner_secret):
"""**HMAC-SHA256**
The "HMAC-SHA256" signature method uses the HMAC-SHA256 signature
algorithm as defined in `RFC4634`_::
digest = HMAC-SHA256 (key, text)
Per `section 3.4.2`_ of the spec.
.. _`RFC4634`: https://tools.ietf.org/html/rfc4634
.. _`section 3.4.2`: https://tools.ietf.org/html/rfc5849#section-3.4.2
"""
# The HMAC-SHA256 function variables are used in following way:
# text is set to the value of the signature base string from
# `Section 3.4.1.1`_.
#
# .. _`Section 3.4.1.1`: https://tools.ietf.org/html/rfc5849#section-3.4.1.1
text = base_string
# key is set to the concatenated values of:
# 1. The client shared-secret, after being encoded (`Section 3.6`_).
#
# .. _`Section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6
key = utils.escape(client_secret or '')
# 2. An "&" character (ASCII code 38), which MUST be included
# even when either secret is empty.
key += '&'
# 3. The token shared-secret, after being encoded (`Section 3.6`_).
#
# .. _`Section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6
key += utils.escape(resource_owner_secret or '')
# FIXME: HMAC does not support unicode!
key_utf8 = key.encode('utf-8')
text_utf8 = text.encode('utf-8')
signature = hmac.new(key_utf8, text_utf8, hashlib.sha256)
# digest is used to set the value of the "oauth_signature" protocol
# parameter, after the result octet string is base64-encoded
# per `RFC2045, Section 6.8`.
#
# .. _`RFC2045, Section 6.8`: https://tools.ietf.org/html/rfc2045#section-6.8
return binascii.b2a_base64(signature.digest())[:-1].decode('utf-8')
_jwtrs1 = None
#jwt has some nice pycrypto/cryptography abstractions
def _jwt_rs1_signing_algorithm():
global _jwtrs1
if _jwtrs1 is None:
import jwt.algorithms as jwtalgo
_jwtrs1 = jwtalgo.RSAAlgorithm(jwtalgo.hashes.SHA1)
return _jwtrs1
def sign_rsa_sha1(base_string, rsa_private_key):
"""**RSA-SHA1**
Per `section 3.4.3`_ of the spec.
The "RSA-SHA1" signature method uses the RSASSA-PKCS1-v1_5 signature
algorithm as defined in `RFC3447, Section 8.2`_ (also known as
PKCS#1), using SHA-1 as the hash function for EMSA-PKCS1-v1_5. To
use this method, the client MUST have established client credentials
with the server that included its RSA public key (in a manner that is
beyond the scope of this specification).
.. _`section 3.4.3`: https://tools.ietf.org/html/rfc5849#section-3.4.3
.. _`RFC3447, Section 8.2`: https://tools.ietf.org/html/rfc3447#section-8.2
"""
if isinstance(base_string, unicode_type):
base_string = base_string.encode('utf-8')
# TODO: finish RSA documentation
alg = _jwt_rs1_signing_algorithm()
key = _prepare_key_plus(alg, rsa_private_key)
s=alg.sign(base_string, key)
return binascii.b2a_base64(s)[:-1].decode('utf-8')
def sign_rsa_sha1_with_client(base_string, client):
if not client.rsa_key:
raise ValueError('rsa_key is required when using RSA signature method.')
return sign_rsa_sha1(base_string, client.rsa_key)
def sign_plaintext(client_secret, resource_owner_secret):
"""Sign a request using plaintext.
Per `section 3.4.4`_ of the spec.
The "PLAINTEXT" method does not employ a signature algorithm. It
MUST be used with a transport-layer mechanism such as TLS or SSL (or
sent over a secure channel with equivalent protections). It does not
utilize the signature base string or the "oauth_timestamp" and
"oauth_nonce" parameters.
.. _`section 3.4.4`: https://tools.ietf.org/html/rfc5849#section-3.4.4
"""
# The "oauth_signature" protocol parameter is set to the concatenated
# value of:
# 1. The client shared-secret, after being encoded (`Section 3.6`_).
#
# .. _`Section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6
signature = utils.escape(client_secret or '')
# 2. An "&" character (ASCII code 38), which MUST be included even
# when either secret is empty.
signature += '&'
# 3. The token shared-secret, after being encoded (`Section 3.6`_).
#
# .. _`Section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6
signature += utils.escape(resource_owner_secret or '')
return signature
def sign_plaintext_with_client(base_string, client):
return sign_plaintext(client.client_secret, client.resource_owner_secret)
def verify_hmac_sha1(request, client_secret=None,
resource_owner_secret=None):
"""Verify a HMAC-SHA1 signature.
Per `section 3.4`_ of the spec.
.. _`section 3.4`: https://tools.ietf.org/html/rfc5849#section-3.4
To satisfy `RFC2616 section 5.2`_ item 1, the request argument's uri
attribute MUST be an absolute URI whose netloc part identifies the
origin server or gateway on which the resource resides. Any Host
item of the request argument's headers dict attribute will be
ignored.
.. _`RFC2616 section 5.2`: https://tools.ietf.org/html/rfc2616#section-5.2
"""
norm_params = normalize_parameters(request.params)
bs_uri = base_string_uri(request.uri)
sig_base_str = signature_base_string(request.http_method, bs_uri,
norm_params)
signature = sign_hmac_sha1(sig_base_str, client_secret,
resource_owner_secret)
match = safe_string_equals(signature, request.signature)
if not match:
log.debug('Verify HMAC-SHA1 failed: signature base string: %s',
sig_base_str)
return match
def verify_hmac_sha256(request, client_secret=None,
resource_owner_secret=None):
"""Verify a HMAC-SHA256 signature.
Per `section 3.4`_ of the spec.
.. _`section 3.4`: https://tools.ietf.org/html/rfc5849#section-3.4
To satisfy `RFC2616 section 5.2`_ item 1, the request argument's uri
attribute MUST be an absolute URI whose netloc part identifies the
origin server or gateway on which the resource resides. Any Host
item of the request argument's headers dict attribute will be
ignored.
.. _`RFC2616 section 5.2`: https://tools.ietf.org/html/rfc2616#section-5.2
"""
norm_params = normalize_parameters(request.params)
bs_uri = base_string_uri(request.uri)
sig_base_str = signature_base_string(request.http_method, bs_uri,
norm_params)
signature = sign_hmac_sha256(sig_base_str, client_secret,
resource_owner_secret)
match = safe_string_equals(signature, request.signature)
if not match:
log.debug('Verify HMAC-SHA256 failed: signature base string: %s',
sig_base_str)
return match
def _prepare_key_plus(alg, keystr):
if isinstance(keystr, bytes):
keystr = keystr.decode('utf-8')
return alg.prepare_key(keystr)
def verify_rsa_sha1(request, rsa_public_key):
"""Verify a RSASSA-PKCS #1 v1.5 base64 encoded signature.
Per `section 3.4.3`_ of the spec.
Note this method requires the jwt and cryptography libraries.
.. _`section 3.4.3`: https://tools.ietf.org/html/rfc5849#section-3.4.3
To satisfy `RFC2616 section 5.2`_ item 1, the request argument's uri
attribute MUST be an absolute URI whose netloc part identifies the
origin server or gateway on which the resource resides. Any Host
item of the request argument's headers dict attribute will be
ignored.
.. _`RFC2616 section 5.2`: https://tools.ietf.org/html/rfc2616#section-5.2
"""
norm_params = normalize_parameters(request.params)
bs_uri = base_string_uri(request.uri)
sig_base_str = signature_base_string(request.http_method, bs_uri,
norm_params).encode('utf-8')
sig = binascii.a2b_base64(request.signature.encode('utf-8'))
alg = _jwt_rs1_signing_algorithm()
key = _prepare_key_plus(alg, rsa_public_key)
verify_ok = alg.verify(sig_base_str, key, sig)
if not verify_ok:
log.debug('Verify RSA-SHA1 failed: signature base string: %s',
sig_base_str)
return verify_ok
def verify_plaintext(request, client_secret=None, resource_owner_secret=None):
"""Verify a PLAINTEXT signature.
Per `section 3.4`_ of the spec.
.. _`section 3.4`: https://tools.ietf.org/html/rfc5849#section-3.4
"""
signature = sign_plaintext(client_secret, resource_owner_secret)
match = safe_string_equals(signature, request.signature)
if not match:
log.debug('Verify PLAINTEXT failed')
return match
| [
"[email protected]"
]
| |
c563ebf7c8f48e07c6f75e980fe4f341bf47c19f | 25ebc03b92df764ff0a6c70c14c2848a49fe1b0b | /daily/20180703/example_resumable/05handler_cli.py | 391b49df85d96e1dc81fa2dd64d1562ecb57edaa | []
| no_license | podhmo/individual-sandbox | 18db414fafd061568d0d5e993b8f8069867dfcfb | cafee43b4cf51a321f4e2c3f9949ac53eece4b15 | refs/heads/master | 2023-07-23T07:06:57.944539 | 2023-07-09T11:45:53 | 2023-07-09T11:45:53 | 61,940,197 | 6 | 0 | null | 2022-10-19T05:01:17 | 2016-06-25T11:27:04 | Python | UTF-8 | Python | false | false | 489 | py | import csv
import sys
def main(args):
yield from run(args.input)
def run(itr):
yield ["x", "x*x"]
for x in itr:
x = int(x)
yield {"x": x, "x*x": x * x}
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--input", action="append", default=["1", "2", "3", "4", "5"])
args = parser.parse_args()
itr = main(args)
w = csv.DictWriter(sys.stdout, fieldnames=next(itr))
w.writerows(itr)
| [
"[email protected]"
]
| |
b6482123aff9a7f3534d1b54a7a1b44d4566812b | 60715c9ea4c66d861708531def532814eab781fd | /python-programming-workshop/test/pythondatastructures/pythonbuiltinds/list_comprehensionn/listcompmorecomplicated.py | 73a0917342bf4c8b19047cc746f0628e88fafa51 | []
| no_license | bala4rtraining/python_programming | 6ce64d035ef04486f5dc9572cb0975dd322fcb3e | 99a5e6cf38448f5a01b310d5f7fa95493139b631 | refs/heads/master | 2023-09-03T00:10:26.272124 | 2021-11-01T08:20:52 | 2021-11-01T08:20:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 97 | py |
result = [(x,y) for x in range(5) if x % 2 == 0 for y in range(5) if y % 2 == 1]
print(result)
| [
"[email protected]"
]
| |
e771087dda9f75a0335919a1fb638e8c0f758ab6 | 8fd07ea363ba4263bafe25d213c72cc9a93e2b3e | /nsd2018-master/nsd1804/python/day05/u2d.py | 5bab4f12fb972e1d039dbb0c1b0ab2b1eb7c6dc5 | []
| no_license | ml758392/python_tedu | 82e12ae014f0fc81230386fab07f901510fc8837 | 9f20798604db0ac8cd7b69d8c7a52ee361ebc7a7 | refs/heads/master | 2020-04-12T08:30:42.354663 | 2019-03-29T11:55:30 | 2019-03-29T11:55:30 | 162,386,878 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 288 | py | import sys
def unix2dos(fname):
dst_fname = fname + '.txt'
with open(fname) as src_fobj:
with open(dst_fname, 'w') as dst_fobj:
for line in src_fobj:
dst_fobj.write(line.rstrip() + '\r\n')
if __name__ == '__main__':
unix2dos(sys.argv[1])
| [
"yy.tedu.cn"
]
| yy.tedu.cn |
e2e24e924dd08430e582554e7321d4125ec6c862 | c40e84f6ca54fd85fc4f91740f6d35b9e693584a | /LeetCode/Python/073 Set Matrix Zeroes.py | 2626b6ad540243c985a9763a59b8dc676a17801a | []
| no_license | arif-hanif/Algorithm | 8b4d7b7e1c32524558f35bcca2f70b6283b16370 | 84b5be24f7b083b6fab6228a49eb279ab764ccda | refs/heads/master | 2021-01-15T16:42:29.079179 | 2016-09-10T11:32:25 | 2016-09-10T11:32:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,025 | py | # -*- coding: utf-8 -*-
'''
Set Matrix Zeroes
=================
Given a m x n matrix, if an element is 0, set its entire row and column to 0.
Do it in place.
Follow up:
Did you use extra space?
A straight forward solution using O(mn) space is probably a bad idea.
A simple improvement uses O(m + n) space, but still not the best solution.
Could you devise a constant space solution?
'''
class Solution(object):
'''算法思路:
把有 0 的行和列保存起来,然后遍历把相关的列和行设置为 0 即可
'''
def setZeroes(self, matrix):
if not matrix:
return
rows, cols = set(), set()
for i, row in enumerate(matrix):
for j, v in enumerate(row):
if v == 0:
rows.add(i)
cols.add(j)
for r in rows:
for j in xrange(len(matrix[0])):
matrix[r][j] = 0
for c in cols:
for i in xrange(len(matrix)):
matrix[i][c] = 0
| [
"[email protected]"
]
| |
d992dc6e406ab8fbad3aebc90fc1b8a3592c3027 | 50948d4cb10dcb1cc9bc0355918478fb2841322a | /sdk/servicebus/azure-servicebus/examples/async_examples/example_queue_send_receive_batch_async.py | 2ae76d4e5a94a9d9b0c3c20ade55f474df3daa07 | [
"MIT"
]
| permissive | xiafu-msft/azure-sdk-for-python | de9cd680b39962702b629a8e94726bb4ab261594 | 4d9560cfd519ee60667f3cc2f5295a58c18625db | refs/heads/master | 2023-08-12T20:36:24.284497 | 2019-05-22T00:55:16 | 2019-05-22T00:55:16 | 187,986,993 | 1 | 0 | MIT | 2020-10-02T01:17:02 | 2019-05-22T07:33:46 | Python | UTF-8 | Python | false | false | 1,914 | py | # ------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# -------------------------------------------------------------------------
import asyncio
import conftest
from azure.servicebus.aio import ServiceBusClient, Message
from azure.servicebus.common.constants import ReceiveSettleMode
async def sample_queue_send_receive_batch_async(sb_config, queue):
client = ServiceBusClient(
service_namespace=sb_config['hostname'],
shared_access_key_name=sb_config['key_name'],
shared_access_key_value=sb_config['access_key'],
debug=True)
queue_client = client.get_queue(queue)
async with queue_client.get_sender() as sender:
for i in range(100):
message = Message("Sample message no. {}".format(i))
await sender.send(message)
await sender.send(Message("shutdown"))
async with queue_client.get_receiver(idle_timeout=1, mode=ReceiveSettleMode.PeekLock, prefetch=10) as receiver:
# Receive list of messages as a batch
batch = await receiver.fetch_next(max_batch_size=10)
await asyncio.gather(*[m.complete() for m in batch])
# Receive messages as a continuous generator
async for message in receiver:
print("Message: {}".format(message))
print("Sequence number: {}".format(message.sequence_number))
await message.complete()
if __name__ == '__main__':
live_config = conftest.get_live_servicebus_config()
queue_name = conftest.create_standard_queue(live_config)
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(sample_queue_send_receive_batch_async(live_config, queue_name))
finally:
conftest.cleanup_queue(live_config, queue_name)
| [
"[email protected]"
]
| |
75aa760a5335cac72dbbcde939f818d0c5ecf3ac | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_366/ch27_2019_03_05_20_56_38_513299.py | 0e3a0ae523a08345d0ba9fc83d035fa90b50cc99 | []
| no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 280 | py | qtde_cigarros = int(input('Quantos cigarros você fuma por dia?'))
qtde_anos = float(input('Há quantos anos você fuma?'))
def tempo_perdido(qtde_cigarros, qtde_anos):
y = qtde_cigarros*365*qtde_anos/144
return y
c = tempo_perdido(qtde_cigarros, qtde_anos)
print(c) | [
"[email protected]"
]
| |
35d99c94d8fbf0df2eb3e6cc2c0ef0d44c95e3dd | 6b3e8b4291c67195ad51e356ba46602a15d5fe38 | /test_v2/core/test_config.py | 311cc073a68e5459dfd6c8c248fdf2f4f5fda633 | [
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
]
| permissive | csaybar/raster-vision | 4f5bb1125d4fb3ae5c455db603d8fb749221dd74 | 617ca15f64e3b8a391432306a743f7d0dfff352f | refs/heads/master | 2021-02-26T19:02:53.752971 | 2020-02-27T17:25:31 | 2020-02-27T17:25:31 | 245,547,406 | 2 | 1 | NOASSERTION | 2020-03-07T01:24:09 | 2020-03-07T01:24:08 | null | UTF-8 | Python | false | false | 3,493 | py | from typing import List
import unittest
import copy
from pydantic.error_wrappers import ValidationError
from rastervision2.pipeline.config import (Config, register_config, build_config,
upgrade_config, Upgrader)
class AConfig(Config):
x: str = 'x'
@register_config('asub1')
class ASub1Config(AConfig):
y: str = 'y'
@register_config('asub2')
class ASub2Config(AConfig):
y: str = 'y'
class BConfig(Config):
x: str = 'x'
class UpgradeC1(Upgrader):
def upgrade(self, cfg_dict):
cfg_dict = copy.deepcopy(cfg_dict)
cfg_dict['x'] = cfg_dict['y']
del cfg_dict['y']
return cfg_dict
@register_config('c', version=1, upgraders=[UpgradeC1()])
class CConfig(Config):
al: List[AConfig]
bl: List[BConfig]
a: AConfig
b: BConfig
x: str = 'x'
class TestConfig(unittest.TestCase):
def test_to_from(self):
cfg = CConfig(
al=[AConfig(), ASub1Config(),
ASub2Config()],
bl=[BConfig()],
a=ASub1Config(),
b=BConfig())
exp_dict = {
'type_hint':
'c',
'version':
1,
'a': {
'type_hint': 'asub1',
'x': 'x',
'y': 'y'
},
'al': [{
'x': 'x'
}, {
'type_hint': 'asub1',
'x': 'x',
'y': 'y'
}, {
'type_hint': 'asub2',
'x': 'x',
'y': 'y'
}],
'b': {
'x': 'x'
},
'bl': [{
'x': 'x'
}],
'x':
'x'
}
self.assertDictEqual(cfg.dict(), exp_dict)
self.assertEqual(build_config(exp_dict), cfg)
def test_no_extras(self):
with self.assertRaises(ValidationError):
BConfig(zz='abc')
def test_upgrade(self):
c_dict_v0 = {
'type_hint':
'c',
'version':
0,
'a': {
'type_hint': 'asub1',
'x': 'x',
'y': 'y'
},
'al': [{
'x': 'x'
}, {
'type_hint': 'asub1',
'x': 'x',
'y': 'y'
}, {
'type_hint': 'asub2',
'x': 'x',
'y': 'y'
}],
'b': {
'x': 'x'
},
'bl': [{
'x': 'x'
}],
'y':
'x'
}
c_dict_v1 = {
'type_hint':
'c',
'version':
1,
'a': {
'type_hint': 'asub1',
'x': 'x',
'y': 'y'
},
'al': [{
'x': 'x'
}, {
'type_hint': 'asub1',
'x': 'x',
'y': 'y'
}, {
'type_hint': 'asub2',
'x': 'x',
'y': 'y'
}],
'b': {
'x': 'x'
},
'bl': [{
'x': 'x'
}],
'x':
'x'
}
upgraded_c_dict = upgrade_config(c_dict_v0)
self.assertDictEqual(upgraded_c_dict, c_dict_v1)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
4825299ada1c314576b5b7d6ef81e6e9a85796e6 | 14c8434f6a4f09b84bc7dae3b6b225e7e13b156d | /app/errors.py | abd12d1d19f7af2be57ad88f68ab3f628692e411 | []
| no_license | mingming2513953126/flack | 07299d5cc62aa4ced0734f2b00db587a24261d69 | dbc793c0908629ae7fee87250f2e0f4456e76f33 | refs/heads/master | 2021-05-10T09:11:24.354831 | 2018-01-25T13:38:02 | 2018-01-25T13:38:02 | 118,917,210 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | # encoding: utf-8
'''
@author: lileilei
@file: errors.py
@time: 2017/5/22 20:50
'''
from flask import render_template,jsonify,request
| [
"[email protected]"
]
| |
af6f8fa01e3dd3c3a068bcce200fc48515571e7f | c237d854f2fc78a7583f2bf0528355c8b14912f8 | /tests/test_example.py | 099b0812c779fffcc65bb463803178d1b6192432 | [
"MIT"
]
| permissive | azridev/flask-dashboard-shards | da072e7406e9be3b85f31a9dff6167a0d87a7496 | c6833e6d55c7dd065b4c6e9b677288e9fe9aa344 | refs/heads/master | 2021-05-19T09:08:00.079436 | 2020-03-26T19:04:00 | 2020-03-26T19:04:00 | 251,620,836 | 0 | 1 | MIT | 2020-03-31T14:04:41 | 2020-03-31T14:04:40 | null | UTF-8 | Python | false | false | 584 | py | # -*- encoding: utf-8 -*-
"""
License: MIT
Copyright (c) 2019 - present AppSeed.us
"""
from tests.test_base import check_pages, check_blueprints
@check_pages('/', '/home/index')
def test_pages(base_client):
# do something
base_client.post('/', data={})
# the pages are tested (GET request: 200) afterwards by the
# @check_pages decorator
@check_blueprints('/forms', '/ui')
def test_blueprints(base_client):
# do something
base_client.post('/', data={})
# the blueprints are tested (GET request: 200) afterwards by the
# @check_blueprints decorator
| [
"[email protected]"
]
| |
6f91b03566136db683a3d86141888b7a9833cd10 | 69c33fcad69a2e61cc60209401215530d033e712 | /Python/Python Basics/61.bug.py | 7042666dc52065c03ab0862676b4ab06c3b63872 | []
| no_license | KULDEEPMALIKM41/Practices | 7659b895ea959c7df2cdbc79c0b982b36f2bde63 | 193abe262ff281a384aac7895bb66dc39ee6e88d | refs/heads/master | 2023-08-17T11:01:11.694282 | 2021-09-30T08:12:41 | 2021-09-30T08:12:41 | 289,527,102 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 59 | py | i=1
while i<11:
print(i,end='') #indentation error
i+=1 | [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.